From ae66079240624a5d3d1b431429895c9ec0aaf468 Mon Sep 17 00:00:00 2001 From: Kevin Ushey Date: Fri, 13 Mar 2020 20:55:44 -0700 Subject: [PATCH 01/13] update to TBB 2020 Update 1 --- inst/include/index.html | 2 +- inst/include/serial/tbb/parallel_for.h | 43 +- inst/include/serial/tbb/tbb_annotate.h | 6 +- inst/include/tbb/aggregator.h | 12 +- inst/include/tbb/aligned_space.h | 25 +- inst/include/tbb/atomic.h | 48 +- inst/include/tbb/blocked_range.h | 29 +- inst/include/tbb/blocked_range2d.h | 28 +- inst/include/tbb/blocked_range3d.h | 37 +- inst/include/tbb/blocked_rangeNd.h | 150 + inst/include/tbb/cache_aligned_allocator.h | 86 +- inst/include/tbb/combinable.h | 13 +- inst/include/tbb/compat/condition_variable | 103 +- inst/include/tbb/compat/ppl.h | 19 +- inst/include/tbb/compat/thread | 21 +- inst/include/tbb/compat/tuple | 35 +- inst/include/tbb/concurrent_hash_map.h | 390 +- inst/include/tbb/concurrent_lru_cache.h | 85 +- inst/include/tbb/concurrent_map.h | 389 + inst/include/tbb/concurrent_priority_queue.h | 82 +- inst/include/tbb/concurrent_queue.h | 35 +- inst/include/tbb/concurrent_set.h | 304 + inst/include/tbb/concurrent_unordered_map.h | 249 +- inst/include/tbb/concurrent_unordered_set.h | 240 +- inst/include/tbb/concurrent_vector.h | 84 +- inst/include/tbb/critical_section.h | 22 +- inst/include/tbb/enumerable_thread_specific.h | 64 +- inst/include/tbb/flow_graph.h | 2387 +- inst/include/tbb/flow_graph_abstractions.h | 14 +- inst/include/tbb/flow_graph_opencl_node.h | 718 +- inst/include/tbb/gfx_factory.h | 359 - inst/include/tbb/global_control.h | 18 +- inst/include/tbb/index.html | 2 +- inst/include/tbb/info.h | 52 + inst/include/tbb/internal/_aggregator_impl.h | 8 +- .../tbb/internal/_concurrent_queue_impl.h | 43 +- .../tbb/internal/_concurrent_unordered_impl.h | 270 +- .../tbb/internal/_flow_graph_async_msg_impl.h | 120 +- .../tbb/internal/_flow_graph_body_impl.h | 157 +- .../tbb/internal/_flow_graph_cache_impl.h | 86 +- inst/include/tbb/internal/_flow_graph_impl.h | 1156 +- .../tbb/internal/_flow_graph_indexer_impl.h | 74 +- .../internal/_flow_graph_item_buffer_impl.h | 8 +- .../tbb/internal/_flow_graph_join_impl.h | 145 +- .../tbb/internal/_flow_graph_node_impl.h | 481 +- .../tbb/internal/_flow_graph_streaming_node.h | 9 +- .../internal/_flow_graph_tagged_buffer_impl.h | 6 +- .../tbb/internal/_flow_graph_trace_impl.h | 216 +- .../tbb/internal/_flow_graph_types_impl.h | 22 +- inst/include/tbb/internal/_mutex_padding.h | 6 +- inst/include/tbb/internal/_range_iterator.h | 6 +- .../tbb/internal/_tbb_hash_compare_impl.h | 16 +- inst/include/tbb/internal/_tbb_strings.h | 22 +- inst/include/tbb/internal/_tbb_trace_impl.h | 13 +- inst/include/tbb/internal/_tbb_windef.h | 10 +- inst/include/tbb/internal/_template_helpers.h | 136 +- .../tbb/internal/_x86_eliding_mutex_impl.h | 6 +- .../tbb/internal/_x86_rtm_rw_mutex_impl.h | 16 +- inst/include/tbb/iterators.h | 326 + .../tbb/machine/{gcc_armv7.h => gcc_arm.h} | 37 +- inst/include/tbb/machine/gcc_generic.h | 95 +- inst/include/tbb/machine/gcc_ia32_common.h | 9 +- inst/include/tbb/machine/gcc_itsx.h | 6 +- inst/include/tbb/machine/ibm_aix51.h | 6 +- inst/include/tbb/machine/icc_generic.h | 8 +- inst/include/tbb/machine/linux_common.h | 37 +- inst/include/tbb/machine/linux_ia32.h | 6 +- inst/include/tbb/machine/linux_ia64.h | 6 +- inst/include/tbb/machine/linux_intel64.h | 6 +- inst/include/tbb/machine/mac_ppc.h | 6 +- inst/include/tbb/machine/macos_common.h | 6 +- inst/include/tbb/machine/mic_common.h | 6 +- inst/include/tbb/machine/msvc_armv7.h | 6 +- inst/include/tbb/machine/msvc_ia32_common.h | 11 +- inst/include/tbb/machine/sunos_sparc.h | 6 +- inst/include/tbb/machine/windows_api.h | 6 +- inst/include/tbb/machine/windows_ia32.h | 10 +- inst/include/tbb/machine/windows_intel64.h | 10 +- inst/include/tbb/machine/xbox360_ppc.h | 119 - inst/include/tbb/memory_pool.h | 6 +- inst/include/tbb/mutex.h | 21 +- inst/include/tbb/null_mutex.h | 6 +- inst/include/tbb/null_rw_mutex.h | 6 +- inst/include/tbb/parallel_do.h | 14 +- inst/include/tbb/parallel_for.h | 32 +- inst/include/tbb/parallel_for_each.h | 6 +- inst/include/tbb/parallel_invoke.h | 37 +- inst/include/tbb/parallel_reduce.h | 172 +- inst/include/tbb/parallel_scan.h | 80 +- inst/include/tbb/parallel_sort.h | 31 +- inst/include/tbb/parallel_while.h | 12 +- inst/include/tbb/partitioner.h | 174 +- inst/include/tbb/pipeline.h | 123 +- inst/include/tbb/queuing_mutex.h | 24 +- inst/include/tbb/queuing_rw_mutex.h | 25 +- inst/include/tbb/reader_writer_lock.h | 22 +- inst/include/tbb/recursive_mutex.h | 22 +- inst/include/tbb/runtime_loader.h | 21 +- inst/include/tbb/scalable_allocator.h | 65 +- inst/include/tbb/spin_mutex.h | 12 +- inst/include/tbb/spin_rw_mutex.h | 19 +- inst/include/tbb/task.h | 180 +- inst/include/tbb/task_arena.h | 274 +- inst/include/tbb/task_group.h | 179 +- inst/include/tbb/task_scheduler_init.h | 21 +- inst/include/tbb/task_scheduler_observer.h | 30 +- inst/include/tbb/tbb.h | 28 +- inst/include/tbb/tbb_allocator.h | 19 +- inst/include/tbb/tbb_config.h | 340 +- inst/include/tbb/tbb_disable_exceptions.h | 6 +- inst/include/tbb/tbb_exception.h | 99 +- inst/include/tbb/tbb_machine.h | 24 +- inst/include/tbb/tbb_profiling.h | 109 +- inst/include/tbb/tbb_stddef.h | 73 +- inst/include/tbb/tbb_thread.h | 39 +- inst/include/tbb/tbbmalloc_proxy.h | 9 +- inst/include/tbb/tick_count.h | 6 +- src/tbb/.gitattributes | 45 + src/tbb/.gitignore | 88 + src/tbb/CHANGES | 439 +- src/tbb/Doxyfile | 1325 ++ src/tbb/{COPYING => LICENSE} | 0 src/tbb/Makefile | 29 +- src/tbb/README.md | 33 + src/tbb/build/.gitignore | 1 - src/tbb/build/AIX.gcc.inc | 6 +- src/tbb/build/AIX.inc | 8 +- src/tbb/build/BSD.clang.inc | 106 + src/tbb/build/BSD.inc | 70 + src/tbb/build/FreeBSD.clang.inc | 99 +- src/tbb/build/FreeBSD.gcc.inc | 8 +- src/tbb/build/FreeBSD.inc | 63 +- src/tbb/build/Makefile.rml | 13 +- src/tbb/build/Makefile.tbb | 8 +- src/tbb/build/Makefile.tbbbind | 69 + src/tbb/build/Makefile.tbbmalloc | 46 +- src/tbb/build/Makefile.tbbproxy | 6 +- src/tbb/build/Makefile.test | 21 +- src/tbb/build/OpenBSD.clang.inc | 15 + src/tbb/build/OpenBSD.inc | 15 + src/tbb/build/SunOS.gcc.inc | 17 +- src/tbb/build/SunOS.inc | 8 +- src/tbb/build/SunOS.suncc.inc | 9 +- src/tbb/build/android.clang.inc | 20 +- src/tbb/build/android.gcc.inc | 6 +- src/tbb/build/android.icc.inc | 6 +- src/tbb/build/android.inc | 6 +- src/tbb/build/android.linux.inc | 6 +- src/tbb/build/android.linux.launcher.sh | 6 +- src/tbb/build/android.macos.inc | 6 +- src/tbb/build/android.windows.inc | 6 +- src/tbb/build/big_iron.inc | 6 +- src/tbb/build/build.py | 204 + src/tbb/build/common.inc | 18 +- src/tbb/build/common_rules.inc | 21 +- src/tbb/build/detect.js | 26 +- src/tbb/build/generate_tbbvars.bat | 6 +- src/tbb/build/generate_tbbvars.sh | 6 +- src/tbb/build/index.html | 17 +- src/tbb/build/ios.clang.inc | 10 +- src/tbb/build/ios.macos.inc | 8 +- src/tbb/build/linux.clang.inc | 24 +- src/tbb/build/linux.gcc.inc | 39 +- src/tbb/build/linux.icc.inc | 21 +- src/tbb/build/linux.inc | 15 +- src/tbb/build/linux.pathcc.inc | 10 +- src/tbb/build/linux.xl.inc | 10 +- src/tbb/build/macos.clang.inc | 31 +- src/tbb/build/macos.gcc.inc | 32 +- src/tbb/build/macos.icc.inc | 21 +- src/tbb/build/macos.inc | 10 +- src/tbb/build/mic.icc.inc | 10 +- src/tbb/build/mic.linux.inc | 6 +- src/tbb/build/mic.linux.launcher.sh | 6 +- src/tbb/build/mic.offload.inc | 6 +- src/tbb/build/test_launcher.bat | 6 +- src/tbb/build/test_launcher.sh | 6 +- src/tbb/build/version_info_aix.sh | 6 +- src/tbb/build/version_info_android.sh | 6 +- src/tbb/build/version_info_linux.sh | 6 +- src/tbb/build/version_info_macos.sh | 8 +- src/tbb/build/version_info_sunos.sh | 6 +- src/tbb/build/version_info_windows.js | 7 +- src/tbb/build/vs2013/index.html | 2 +- src/tbb/build/vs2013/tbb.vcxproj | 17 +- src/tbb/build/vs2013/tbbmalloc.vcxproj | 17 +- src/tbb/build/vs2013/tbbmalloc_proxy.vcxproj | 16 +- src/tbb/build/windows.cl.inc | 26 +- src/tbb/build/windows.gcc.inc | 18 +- src/tbb/build/windows.icl.inc | 22 +- src/tbb/build/windows.inc | 60 +- src/tbb/cmake/README.rst | 361 + src/tbb/cmake/TBBBuild.cmake | 197 + src/tbb/cmake/TBBGet.cmake | 294 + src/tbb/cmake/TBBInstallConfig.cmake | 124 + src/tbb/cmake/TBBMakeConfig.cmake | 164 + src/tbb/cmake/tbb_config_generator.cmake | 41 + src/tbb/cmake/tbb_config_installer.cmake | 48 + src/tbb/cmake/templates/TBBConfig.cmake.in | 96 + .../templates/TBBConfigInternal.cmake.in | 98 + .../cmake/templates/TBBConfigVersion.cmake.in | 24 + src/tbb/doc/Release_Notes.txt | 132 + .../copyright_brand_disclaimer_doxygen.txt | 9 + src/tbb/examples/GettingStarted/index.html | 344 + .../GettingStarted/sub_string_finder/Makefile | 73 + .../sub_string_finder/Makefile.windows | 55 + .../msvs/sub_string_finder.sln | 48 + .../msvs/sub_string_finder.vcxproj | 198 + .../msvs/sub_string_finder_extended.vcxproj | 198 + .../msvs/sub_string_finder_pretty.vcxproj | 198 + .../sub_string_finder/readme.html | 416 + .../sub_string_finder/sub_string_finder.cpp | 74 + .../sub_string_finder_extended.cpp | 162 + .../sub_string_finder_pretty.cpp | 94 + .../project.pbxproj | 502 + src/tbb/examples/Makefile | 157 + src/tbb/examples/common/copy_libraries.bat | 84 + src/tbb/examples/common/examples-common.inc | 34 + src/tbb/examples/common/gui/Makefile.gmake | 86 + src/tbb/examples/common/gui/Makefile.win | 70 + src/tbb/examples/common/gui/convideo.cpp | 132 + src/tbb/examples/common/gui/d2dvideo.cpp | 197 + src/tbb/examples/common/gui/dxcheck.bat | 24 + src/tbb/examples/common/gui/gdivideo.cpp | 142 + src/tbb/examples/common/gui/macvideo.cpp | 160 + src/tbb/examples/common/gui/video.h | 237 + src/tbb/examples/common/gui/winvideo.h | 279 + .../common/gui/xcode/tbbExample/OpenGLView.h | 56 + .../common/gui/xcode/tbbExample/OpenGLView.m | 143 + .../common/gui/xcode/tbbExample/PkgInfo | 1 + .../tbbExample/en.lproj/InfoPlist.strings | 2 + .../xcode/tbbExample/en.lproj/MainMenu.nib | Bin 0 -> 3312 bytes .../xcode/tbbExample/en.lproj/MainMenu.xib | 48 + .../gui/xcode/tbbExample/iOS.storyboard | 28 + .../common/gui/xcode/tbbExample/main.m | 47 + .../gui/xcode/tbbExample/tbbAppDelegate.h | 47 + .../gui/xcode/tbbExample/tbbAppDelegate.m | 62 + .../tbbExample/tbbExample-Info.ios.plist | 44 + .../xcode/tbbExample/tbbExample-Info.plist | 45 + .../xcode/tbbExample/tbbExample-Prefix.pch | 32 + src/tbb/examples/common/gui/xvideo.cpp | 382 + src/tbb/examples/common/index.html | 399 + src/tbb/examples/common/toolset.props | 9 + src/tbb/examples/common/utility/fast_random.h | 78 + .../common/utility/get_default_num_threads.h | 32 + src/tbb/examples/common/utility/utility.h | 523 + .../count_strings/Makefile | 58 + .../count_strings/Makefile.windows | 47 + .../count_strings/count_strings.cpp | 232 + .../count_strings/msvs/count_strings.sln | 28 + .../count_strings/msvs/count_strings.vcxproj | 201 + .../count_strings/readme.html | 401 + .../count_strings.xcodeproj/project.pbxproj | 310 + .../examples/concurrent_hash_map/index.html | 344 + .../concurrent_priority_queue/index.html | 344 + .../shortpath/Makefile | 56 + .../shortpath/Makefile.windows | 46 + .../shortpath/msvs/shortpath.sln | 28 + .../shortpath/msvs/shortpath.vcxproj | 201 + .../shortpath/readme.html | 427 + .../shortpath/shortpath.cpp | 359 + .../xcode/shortpath.xcodeproj/project.pbxproj | 310 + src/tbb/examples/graph/binpack/Makefile | 60 + .../examples/graph/binpack/Makefile.windows | 48 + src/tbb/examples/graph/binpack/binpack.cpp | 289 + .../examples/graph/binpack/msvs/binpack.sln | 28 + .../graph/binpack/msvs/binpack.vcxproj | 201 + src/tbb/examples/graph/binpack/readme.html | 415 + .../xcode/binpack.xcodeproj/project.pbxproj | 310 + src/tbb/examples/graph/cholesky/Makefile | 55 + .../examples/graph/cholesky/Makefile.windows | 48 + src/tbb/examples/graph/cholesky/cholesky.cpp | 713 + src/tbb/examples/graph/cholesky/init.cpp | 134 + .../examples/graph/cholesky/msvs/cholesky.sln | 28 + .../graph/cholesky/msvs/cholesky.vcxproj | 206 + src/tbb/examples/graph/cholesky/readme.html | 415 + .../xcode/cholesky.xcodeproj/project.pbxproj | 318 + .../xcshareddata/xcschemes/Cholesky.xcscheme | 101 + .../graph/dining_philosophers/Makefile | 57 + .../dining_philosophers/Makefile.windows | 44 + .../dining_philosophers.cpp | 303 + .../msvs/dining_philosophers.sln | 28 + .../msvs/dining_philosophers.vcxproj | 201 + .../graph/dining_philosophers/readme.html | 383 + .../project.pbxproj | 316 + src/tbb/examples/graph/fgbzip2/Makefile | 56 + .../examples/graph/fgbzip2/Makefile.windows | 53 + src/tbb/examples/graph/fgbzip2/blocksort.cpp | 1142 + src/tbb/examples/graph/fgbzip2/bzlib.cpp | 1620 ++ src/tbb/examples/graph/fgbzip2/bzlib.h | 330 + .../examples/graph/fgbzip2/bzlib_private.h | 557 + src/tbb/examples/graph/fgbzip2/compress.cpp | 720 + src/tbb/examples/graph/fgbzip2/crctable.cpp | 152 + src/tbb/examples/graph/fgbzip2/decompress.cpp | 694 + src/tbb/examples/graph/fgbzip2/fgbzip2.cpp | 497 + src/tbb/examples/graph/fgbzip2/huffman.cpp | 253 + .../examples/graph/fgbzip2/msvs/fgbzip2.sln | 28 + .../graph/fgbzip2/msvs/fgbzip2.vcxproj | 216 + src/tbb/examples/graph/fgbzip2/randtable.cpp | 132 + src/tbb/examples/graph/fgbzip2/readme.html | 466 + .../xcode/fgbzip2.xcodeproj/project.pbxproj | 337 + src/tbb/examples/graph/index.html | 356 + src/tbb/examples/graph/logic_sim/D_latch.h | 56 + src/tbb/examples/graph/logic_sim/Makefile | 60 + .../examples/graph/logic_sim/Makefile.windows | 52 + src/tbb/examples/graph/logic_sim/basics.h | 554 + .../examples/graph/logic_sim/four_bit_adder.h | 58 + .../graph/logic_sim/msvs/logic_sim.sln | 28 + .../graph/logic_sim/msvs/logic_sim.vcxproj | 201 + .../examples/graph/logic_sim/one_bit_adder.h | 99 + src/tbb/examples/graph/logic_sim/readme.html | 406 + src/tbb/examples/graph/logic_sim/test_all.cpp | 637 + .../examples/graph/logic_sim/two_bit_adder.h | 55 + src/tbb/examples/graph/som/Makefile | 62 + src/tbb/examples/graph/som/Makefile.windows | 44 + src/tbb/examples/graph/som/msvs/som.sln | 28 + src/tbb/examples/graph/som/msvs/som.vcxproj | 217 + src/tbb/examples/graph/som/readme.html | 391 + src/tbb/examples/graph/som/som.cpp | 213 + src/tbb/examples/graph/som/som.h | 157 + src/tbb/examples/graph/som/som_graph.cpp | 429 + .../som/xcode/som.xcodeproj/project.pbxproj | 314 + src/tbb/examples/graph/stereo/Makefile | 70 + .../examples/graph/stereo/Makefile.windows | 55 + src/tbb/examples/graph/stereo/imageEffects.cl | 52 + src/tbb/examples/graph/stereo/lodepng.cpp | 6223 ++++++ src/tbb/examples/graph/stereo/lodepng.h | 1774 ++ src/tbb/examples/graph/stereo/msvs/stereo.sln | 28 + .../examples/graph/stereo/msvs/stereo.vcxproj | 163 + src/tbb/examples/graph/stereo/readme.html | 445 + src/tbb/examples/graph/stereo/stereo.cpp | 437 + src/tbb/examples/graph/stereo/utils.h | 104 + .../xcode/stereo.xcodeproj/project.pbxproj | 324 + src/tbb/examples/index.html | 581 + src/tbb/examples/parallel_do/index.html | 344 + .../parallel_do/parallel_preorder/Graph.cpp | 108 + .../parallel_do/parallel_preorder/Graph.h | 79 + .../parallel_do/parallel_preorder/Makefile | 62 + .../parallel_preorder/Makefile.windows | 50 + .../parallel_do/parallel_preorder/Matrix.h | 56 + .../parallel_do/parallel_preorder/main.cpp | 90 + .../msvs/parallel_preorder.sln | 28 + .../msvs/parallel_preorder.vcxproj | 207 + .../parallel_preorder/parallel_preorder.cpp | 52 + .../parallel_do/parallel_preorder/readme.html | 444 + .../project.pbxproj | 276 + .../parallel_for/game_of_life/Makefile | 52 + .../game_of_life/Makefile.windows | 42 + .../game_of_life/msvs/Game_of_life.sln | 40 + .../game_of_life/msvs/Game_of_life.vcxproj | 451 + .../parallel_for/game_of_life/msvs/app.ico | Bin 0 -> 1078 bytes .../parallel_for/game_of_life/msvs/app.rc | 63 + .../parallel_for/game_of_life/msvs/resource.h | 19 + .../parallel_for/game_of_life/readme.html | 392 + .../game_of_life/src/AssemblyInfo.cpp | 54 + .../parallel_for/game_of_life/src/Board.h | 103 + .../game_of_life/src/Evolution.cpp | 242 + .../parallel_for/game_of_life/src/Evolution.h | 191 + .../parallel_for/game_of_life/src/Form1.h | 302 + .../game_of_life/src/Game_of_life.cpp | 227 + .../game_of_life/src/Update_state.cpp | 398 + .../game_of_life.xcodeproj/project.pbxproj | 270 + src/tbb/examples/parallel_for/index.html | 350 + .../parallel_for/polygon_overlay/Makefile | 89 + .../polygon_overlay/Makefile.windows | 56 + .../polygon_overlay/msvs/polygon_overlay.sln | 40 + .../polygon_overlay/msvs/pover.rc | 61 + .../polygon_overlay/msvs/pover.vcxproj | 448 + .../polygon_overlay/msvs/resource.h | 30 + .../parallel_for/polygon_overlay/polymain.cpp | 615 + .../parallel_for/polygon_overlay/polymain.h | 41 + .../parallel_for/polygon_overlay/polyover.cpp | 664 + .../parallel_for/polygon_overlay/polyover.h | 36 + .../polygon_overlay/pover_global.h | 89 + .../polygon_overlay/pover_video.cpp | 171 + .../polygon_overlay/pover_video.h | 60 + .../parallel_for/polygon_overlay/readme.html | 476 + .../parallel_for/polygon_overlay/rpolygon.h | 151 + .../parallel_for/polygon_overlay/speedup.gif | Bin 0 -> 7162 bytes .../polygon_overlay.xcodeproj/project.pbxproj | 573 + .../xcschemes/tbbExample.ios.xcscheme | 91 + .../xcschemes/tbbExample.xcscheme | 99 + .../examples/parallel_for/seismic/Makefile | 96 + .../parallel_for/seismic/Makefile.windows | 63 + .../examples/parallel_for/seismic/main.cpp | 134 + .../seismic/msvs/SeismicSimulation.ico | Bin 0 -> 23558 bytes .../seismic/msvs/SeismicSimulation.rc | 145 + .../seismic/msvs/SeismicSimulation.vcxproj | 451 + .../parallel_for/seismic/msvs/resource.h | 48 + .../parallel_for/seismic/msvs/seismic.sln | 40 + .../parallel_for/seismic/msvs/small.ico | Bin 0 -> 23558 bytes .../examples/parallel_for/seismic/readme.html | 471 + .../parallel_for/seismic/seismic_video.cpp | 144 + .../parallel_for/seismic/seismic_video.h | 50 + .../parallel_for/seismic/universe.cpp | 217 + .../examples/parallel_for/seismic/universe.h | 114 + .../xcode/seismic.xcodeproj/project.pbxproj | 586 + .../xcschemes/tbbExample.ios.xcscheme | 119 + .../xcschemes/tbbExample.xcscheme | 99 + .../examples/parallel_for/tachyon/Makefile | 184 + .../parallel_for/tachyon/Makefile.windows | 147 + .../parallel_for/tachyon/dat/820spheres.dat | 1671 ++ .../parallel_for/tachyon/dat/balls.dat | 14804 +++++++++++++ .../parallel_for/tachyon/dat/balls3.dat | 14804 +++++++++++++ .../parallel_for/tachyon/dat/lattice.dat | 18012 ++++++++++++++++ .../parallel_for/tachyon/dat/model2.dat | 104 + .../parallel_for/tachyon/dat/teapot.dat | 9279 ++++++++ .../parallel_for/tachyon/dat/trypsin4pti.dat | 538 + .../parallel_for/tachyon/msvs/gui.ico | Bin 0 -> 23558 bytes .../examples/parallel_for/tachyon/msvs/gui.rc | 90 + .../parallel_for/tachyon/msvs/resource.h | 24 + .../parallel_for/tachyon/msvs/small.ico | Bin 0 -> 23558 bytes .../tachyon/msvs/tachyon.serial.vcxproj | 393 + .../parallel_for/tachyon/msvs/tachyon.sln | 94 + .../tachyon/msvs/tachyon.tbb.vcxproj | 445 + .../tachyon/msvs/tachyon.tbb1d.vcxproj | 445 + .../parallel_for/tachyon/msvs/tachyon.vcxproj | 455 + .../parallel_for/tachyon/msvs/uwp/App.xaml | 15 + .../tachyon/msvs/uwp/App.xaml.cpp | 50 + .../parallel_for/tachyon/msvs/uwp/App.xaml.h | 37 + .../tachyon/msvs/uwp/Assets/Logo.png | Bin 0 -> 35141 bytes .../tachyon/msvs/uwp/Assets/SmallLogo.png | Bin 0 -> 2552 bytes .../tachyon/msvs/uwp/Assets/SplashScreen.png | Bin 0 -> 144360 bytes .../tachyon/msvs/uwp/Assets/StoreLogo.png | Bin 0 -> 6321 bytes .../msvs/uwp/Common/StandardStyles.xaml | 1007 + .../tachyon/msvs/uwp/DirectXBase.cpp | 409 + .../tachyon/msvs/uwp/DirectXBase.h | 88 + .../tachyon/msvs/uwp/DirectXPage.xaml | 31 + .../tachyon/msvs/uwp/DirectXPage.xaml.cpp | 128 + .../tachyon/msvs/uwp/DirectXPage.xaml.h | 44 + .../tachyon/msvs/uwp/Package.appxmanifest | 49 + .../msvs/uwp/copy_libraries_and_assets.bat | 70 + .../parallel_for/tachyon/msvs/uwp/pch.h | 25 + .../tachyon/msvs/uwp/tbbTachyon.sln | 32 + .../tachyon/msvs/uwp/tbbTachyon.vcxproj | 245 + .../msvs/uwp/tbbTachyon.vcxproj.filters | 217 + .../tachyon/msvs/uwp/tbbTachyonRenderer.cpp | 185 + .../tachyon/msvs/uwp/tbbTachyonRenderer.h | 42 + .../examples/parallel_for/tachyon/readme.html | 572 + .../examples/parallel_for/tachyon/src/api.cpp | 414 + .../examples/parallel_for/tachyon/src/api.h | 204 + .../parallel_for/tachyon/src/apigeom.cpp | 264 + .../parallel_for/tachyon/src/apitrigeom.cpp | 222 + .../parallel_for/tachyon/src/apitrigeom.h | 56 + .../parallel_for/tachyon/src/bndbox.cpp | 178 + .../parallel_for/tachyon/src/bndbox.h | 70 + .../examples/parallel_for/tachyon/src/box.cpp | 164 + .../examples/parallel_for/tachyon/src/box.h | 65 + .../parallel_for/tachyon/src/camera.cpp | 105 + .../parallel_for/tachyon/src/camera.h | 52 + .../parallel_for/tachyon/src/coordsys.cpp | 92 + .../parallel_for/tachyon/src/coordsys.h | 56 + .../parallel_for/tachyon/src/cylinder.cpp | 267 + .../parallel_for/tachyon/src/cylinder.h | 74 + .../parallel_for/tachyon/src/extvol.cpp | 307 + .../parallel_for/tachyon/src/extvol.h | 69 + .../parallel_for/tachyon/src/global.cpp | 70 + .../parallel_for/tachyon/src/global.h | 64 + .../parallel_for/tachyon/src/grid.cpp | 675 + .../examples/parallel_for/tachyon/src/grid.h | 116 + .../parallel_for/tachyon/src/imageio.cpp | 143 + .../parallel_for/tachyon/src/imageio.h | 61 + .../parallel_for/tachyon/src/imap.cpp | 164 + .../examples/parallel_for/tachyon/src/imap.h | 57 + .../parallel_for/tachyon/src/intersect.cpp | 173 + .../parallel_for/tachyon/src/intersect.h | 63 + .../parallel_for/tachyon/src/jpeg.cpp | 125 + .../examples/parallel_for/tachyon/src/jpeg.h | 52 + .../parallel_for/tachyon/src/light.cpp | 124 + .../examples/parallel_for/tachyon/src/light.h | 67 + .../parallel_for/tachyon/src/machine.h | 62 + .../parallel_for/tachyon/src/macros.h | 75 + .../parallel_for/tachyon/src/main.cpp | 381 + .../parallel_for/tachyon/src/objbound.cpp | 332 + .../parallel_for/tachyon/src/objbound.h | 62 + .../parallel_for/tachyon/src/parse.cpp | 859 + .../examples/parallel_for/tachyon/src/parse.h | 107 + .../parallel_for/tachyon/src/plane.cpp | 101 + .../examples/parallel_for/tachyon/src/plane.h | 68 + .../examples/parallel_for/tachyon/src/ppm.cpp | 125 + .../examples/parallel_for/tachyon/src/ppm.h | 56 + .../parallel_for/tachyon/src/pthread.cpp | 152 + .../parallel_for/tachyon/src/pthread_w.h | 105 + .../parallel_for/tachyon/src/quadric.cpp | 166 + .../parallel_for/tachyon/src/quadric.h | 71 + .../parallel_for/tachyon/src/render.cpp | 87 + .../parallel_for/tachyon/src/render.h | 53 + .../parallel_for/tachyon/src/ring.cpp | 127 + .../examples/parallel_for/tachyon/src/ring.h | 69 + .../parallel_for/tachyon/src/shade.cpp | 254 + .../examples/parallel_for/tachyon/src/shade.h | 58 + .../parallel_for/tachyon/src/sphere.cpp | 129 + .../parallel_for/tachyon/src/sphere.h | 69 + .../tachyon/src/tachyon_video.cpp | 116 + .../parallel_for/tachyon/src/tachyon_video.h | 58 + .../parallel_for/tachyon/src/texture.cpp | 381 + .../parallel_for/tachyon/src/texture.h | 65 + .../parallel_for/tachyon/src/tgafile.cpp | 233 + .../parallel_for/tachyon/src/tgafile.h | 57 + .../examples/parallel_for/tachyon/src/trace.h | 94 + .../parallel_for/tachyon/src/trace.serial.cpp | 173 + .../parallel_for/tachyon/src/trace.simple.cpp | 178 + .../parallel_for/tachyon/src/trace.tbb.cpp | 266 + .../parallel_for/tachyon/src/trace.tbb1d.cpp | 202 + .../parallel_for/tachyon/src/trace_rest.cpp | 134 + .../parallel_for/tachyon/src/triangle.cpp | 241 + .../parallel_for/tachyon/src/triangle.h | 90 + .../examples/parallel_for/tachyon/src/types.h | 222 + .../examples/parallel_for/tachyon/src/ui.cpp | 111 + .../examples/parallel_for/tachyon/src/ui.h | 64 + .../parallel_for/tachyon/src/util.cpp | 171 + .../examples/parallel_for/tachyon/src/util.h | 84 + .../parallel_for/tachyon/src/vector.cpp | 130 + .../parallel_for/tachyon/src/vector.h | 64 + .../examples/parallel_for/tachyon/src/vol.cpp | 298 + .../examples/parallel_for/tachyon/src/vol.h | 59 + .../xcode/tachyon.xcodeproj/project.pbxproj | 1295 ++ .../xcschemes/tachyon.serial.xcscheme | 104 + .../xcschemes/tachyon.tbb.ios.xcscheme | 97 + .../xcschemes/tachyon.tbb.xcscheme | 105 + .../xcschemes/tachyon.tbb1d.xcscheme | 105 + .../parallel_reduce/convex_hull/Makefile | 63 + .../convex_hull/Makefile.windows | 52 + .../parallel_reduce/convex_hull/convex_hull.h | 184 + .../convex_hull/convex_hull_bench.cpp | 631 + .../convex_hull/convex_hull_sample.cpp | 293 + .../convex_hull/msvs/convex_hull.sln | 38 + .../msvs/convex_hull_benchmark.vcxproj | 206 + .../msvs/convex_hull_sample.vcxproj | 203 + .../parallel_reduce/convex_hull/readme.html | 400 + .../convex_hull.xcodeproj/project.pbxproj | 350 + src/tbb/examples/parallel_reduce/index.html | 346 + .../examples/parallel_reduce/primes/Makefile | 67 + .../parallel_reduce/primes/Makefile.windows | 48 + .../examples/parallel_reduce/primes/main.cpp | 106 + .../parallel_reduce/primes/msvs/primes.sln | 28 + .../primes/msvs/primes.vcxproj | 205 + .../parallel_reduce/primes/primes.cpp | 304 + .../examples/parallel_reduce/primes/primes.h | 40 + .../parallel_reduce/primes/readme.html | 407 + .../xcode/primes.xcodeproj/project.pbxproj | 268 + src/tbb/examples/pipeline/index.html | 344 + src/tbb/examples/pipeline/square/Makefile | 60 + .../examples/pipeline/square/Makefile.windows | 46 + .../examples/pipeline/square/gen_input.cpp | 58 + .../examples/pipeline/square/msvs/square.sln | 28 + .../pipeline/square/msvs/square.vcxproj | 202 + src/tbb/examples/pipeline/square/readme.html | 417 + src/tbb/examples/pipeline/square/square.cpp | 266 + .../xcode/square.xcodeproj/project.pbxproj | 266 + src/tbb/examples/task/index.html | 344 + src/tbb/examples/task/tree_sum/Makefile | 58 + .../examples/task/tree_sum/Makefile.windows | 48 + .../tree_sum/OptimizedParallelSumTree.cpp | 65 + .../examples/task/tree_sum/SerialSumTree.cpp | 26 + .../task/tree_sum/SimpleParallelSumTree.cpp | 58 + src/tbb/examples/task/tree_sum/TreeMaker.h | 112 + src/tbb/examples/task/tree_sum/common.h | 32 + src/tbb/examples/task/tree_sum/main.cpp | 105 + .../examples/task/tree_sum/msvs/tree_sum.sln | 28 + .../task/tree_sum/msvs/tree_sum.vcxproj | 208 + src/tbb/examples/task/tree_sum/readme.html | 423 + .../xcode/tree_sum.xcodeproj/project.pbxproj | 278 + src/tbb/examples/task_arena/fractal/Makefile | 86 + .../task_arena/fractal/Makefile.windows | 62 + .../examples/task_arena/fractal/fractal.cpp | 244 + src/tbb/examples/task_arena/fractal/fractal.h | 164 + .../task_arena/fractal/fractal_video.h | 84 + src/tbb/examples/task_arena/fractal/main.cpp | 87 + .../task_arena/fractal/msvs/fractal.sln | 40 + .../task_arena/fractal/msvs/fractal.vcxproj | 450 + .../examples/task_arena/fractal/msvs/gui.ico | Bin 0 -> 23558 bytes .../examples/task_arena/fractal/msvs/gui.rc | 90 + .../task_arena/fractal/msvs/resource.h | 24 + .../task_arena/fractal/msvs/small.ico | Bin 0 -> 23558 bytes .../examples/task_arena/fractal/readme.html | 445 + .../xcode/fractal.xcodeproj/project.pbxproj | 613 + .../xcschemes/tbbExample.ios.xcscheme | 91 + .../xcschemes/tbbExample.xcscheme | 93 + src/tbb/examples/task_arena/index.html | 343 + src/tbb/examples/task_group/index.html | 343 + src/tbb/examples/task_group/sudoku/Makefile | 70 + .../task_group/sudoku/Makefile.windows | 46 + src/tbb/examples/task_group/sudoku/input1 | 19 + src/tbb/examples/task_group/sudoku/input2 | 18 + src/tbb/examples/task_group/sudoku/input3 | 19 + src/tbb/examples/task_group/sudoku/input4 | 19 + .../task_group/sudoku/msvs/sudoku.sln | 28 + .../task_group/sudoku/msvs/sudoku.vcxproj | 201 + .../examples/task_group/sudoku/readme.html | 417 + src/tbb/examples/task_group/sudoku/sudoku.cpp | 316 + .../xcode/sudoku.xcodeproj/project.pbxproj | 262 + .../test_all/fibonacci/CMakeLists.txt | 31 + .../examples/test_all/fibonacci/Fibonacci.cpp | 624 + src/tbb/examples/test_all/fibonacci/Makefile | 51 + .../test_all/fibonacci/Makefile.windows | 41 + .../test_all/fibonacci/msvs/fibonacci.sln | 28 + .../test_all/fibonacci/msvs/fibonacci.vcxproj | 201 + .../examples/test_all/fibonacci/readme.html | 402 + .../xcode/fibonacci.xcodeproj/project.pbxproj | 268 + src/tbb/examples/test_all/index.html | 344 + src/tbb/include/index.html | 2 +- src/tbb/include/serial/tbb/parallel_for.h | 43 +- src/tbb/include/serial/tbb/tbb_annotate.h | 6 +- src/tbb/include/tbb/aggregator.h | 12 +- src/tbb/include/tbb/aligned_space.h | 21 +- src/tbb/include/tbb/atomic.h | 48 +- src/tbb/include/tbb/blocked_range.h | 20 +- src/tbb/include/tbb/blocked_range2d.h | 8 +- src/tbb/include/tbb/blocked_range3d.h | 8 +- src/tbb/include/tbb/blocked_rangeNd.h | 150 + src/tbb/include/tbb/cache_aligned_allocator.h | 86 +- src/tbb/include/tbb/combinable.h | 13 +- src/tbb/include/tbb/compat/condition_variable | 103 +- src/tbb/include/tbb/compat/ppl.h | 19 +- src/tbb/include/tbb/compat/thread | 21 +- src/tbb/include/tbb/compat/tuple | 35 +- src/tbb/include/tbb/concurrent_hash_map.h | 379 +- src/tbb/include/tbb/concurrent_lru_cache.h | 13 +- src/tbb/include/tbb/concurrent_map.h | 389 + .../include/tbb/concurrent_priority_queue.h | 82 +- src/tbb/include/tbb/concurrent_queue.h | 35 +- src/tbb/include/tbb/concurrent_set.h | 304 + .../include/tbb/concurrent_unordered_map.h | 249 +- .../include/tbb/concurrent_unordered_set.h | 240 +- src/tbb/include/tbb/concurrent_vector.h | 75 +- src/tbb/include/tbb/critical_section.h | 22 +- .../include/tbb/enumerable_thread_specific.h | 64 +- src/tbb/include/tbb/flow_graph.h | 1853 +- src/tbb/include/tbb/flow_graph_abstractions.h | 12 +- src/tbb/include/tbb/flow_graph_opencl_node.h | 94 +- src/tbb/include/tbb/gfx_factory.h | 359 - src/tbb/include/tbb/global_control.h | 18 +- src/tbb/include/tbb/index.html | 2 +- src/tbb/include/tbb/info.h | 52 + .../include/tbb/internal/_aggregator_impl.h | 8 +- .../tbb/internal/_concurrent_queue_impl.h | 32 +- .../tbb/internal/_concurrent_unordered_impl.h | 256 +- .../tbb/internal/_flow_graph_async_msg_impl.h | 6 +- .../tbb/internal/_flow_graph_body_impl.h | 157 +- .../tbb/internal/_flow_graph_cache_impl.h | 86 +- .../include/tbb/internal/_flow_graph_impl.h | 248 +- .../tbb/internal/_flow_graph_indexer_impl.h | 42 +- .../internal/_flow_graph_item_buffer_impl.h | 8 +- .../tbb/internal/_flow_graph_join_impl.h | 99 +- .../tbb/internal/_flow_graph_node_impl.h | 430 +- .../tbb/internal/_flow_graph_streaming_node.h | 9 +- .../internal/_flow_graph_tagged_buffer_impl.h | 6 +- .../tbb/internal/_flow_graph_trace_impl.h | 150 +- .../tbb/internal/_flow_graph_types_impl.h | 22 +- src/tbb/include/tbb/internal/_mutex_padding.h | 6 +- .../include/tbb/internal/_range_iterator.h | 6 +- .../tbb/internal/_tbb_hash_compare_impl.h | 16 +- src/tbb/include/tbb/internal/_tbb_strings.h | 24 +- .../include/tbb/internal/_tbb_trace_impl.h | 13 +- src/tbb/include/tbb/internal/_tbb_windef.h | 10 +- .../include/tbb/internal/_template_helpers.h | 136 +- .../tbb/internal/_x86_eliding_mutex_impl.h | 6 +- .../tbb/internal/_x86_rtm_rw_mutex_impl.h | 16 +- src/tbb/include/tbb/iterators.h | 326 + .../tbb/machine/{gcc_armv7.h => gcc_arm.h} | 37 +- src/tbb/include/tbb/machine/gcc_generic.h | 95 +- src/tbb/include/tbb/machine/gcc_ia32_common.h | 9 +- src/tbb/include/tbb/machine/gcc_itsx.h | 6 +- src/tbb/include/tbb/machine/ibm_aix51.h | 6 +- src/tbb/include/tbb/machine/icc_generic.h | 8 +- src/tbb/include/tbb/machine/linux_common.h | 37 +- src/tbb/include/tbb/machine/linux_ia32.h | 6 +- src/tbb/include/tbb/machine/linux_ia64.h | 6 +- src/tbb/include/tbb/machine/linux_intel64.h | 6 +- src/tbb/include/tbb/machine/mac_ppc.h | 6 +- src/tbb/include/tbb/machine/macos_common.h | 6 +- src/tbb/include/tbb/machine/mic_common.h | 6 +- src/tbb/include/tbb/machine/msvc_armv7.h | 6 +- .../include/tbb/machine/msvc_ia32_common.h | 11 +- src/tbb/include/tbb/machine/sunos_sparc.h | 6 +- src/tbb/include/tbb/machine/windows_api.h | 6 +- src/tbb/include/tbb/machine/windows_ia32.h | 10 +- src/tbb/include/tbb/machine/windows_intel64.h | 10 +- src/tbb/include/tbb/memory_pool.h | 6 +- src/tbb/include/tbb/mutex.h | 21 +- src/tbb/include/tbb/null_mutex.h | 6 +- src/tbb/include/tbb/null_rw_mutex.h | 6 +- src/tbb/include/tbb/parallel_do.h | 14 +- src/tbb/include/tbb/parallel_for.h | 34 +- src/tbb/include/tbb/parallel_for_each.h | 6 +- src/tbb/include/tbb/parallel_invoke.h | 37 +- src/tbb/include/tbb/parallel_reduce.h | 44 +- src/tbb/include/tbb/parallel_scan.h | 12 +- src/tbb/include/tbb/parallel_sort.h | 31 +- src/tbb/include/tbb/parallel_while.h | 12 +- src/tbb/include/tbb/partitioner.h | 94 +- src/tbb/include/tbb/pipeline.h | 123 +- src/tbb/include/tbb/queuing_mutex.h | 13 +- src/tbb/include/tbb/queuing_rw_mutex.h | 14 +- src/tbb/include/tbb/reader_writer_lock.h | 22 +- src/tbb/include/tbb/recursive_mutex.h | 22 +- src/tbb/include/tbb/runtime_loader.h | 21 +- src/tbb/include/tbb/scalable_allocator.h | 62 +- src/tbb/include/tbb/spin_mutex.h | 12 +- src/tbb/include/tbb/spin_rw_mutex.h | 19 +- src/tbb/include/tbb/task.h | 158 +- src/tbb/include/tbb/task_arena.h | 142 +- src/tbb/include/tbb/task_group.h | 155 +- src/tbb/include/tbb/task_scheduler_init.h | 21 +- src/tbb/include/tbb/task_scheduler_observer.h | 30 +- src/tbb/include/tbb/tbb.h | 28 +- src/tbb/include/tbb/tbb_allocator.h | 8 +- src/tbb/include/tbb/tbb_config.h | 267 +- src/tbb/include/tbb/tbb_disable_exceptions.h | 6 +- src/tbb/include/tbb/tbb_exception.h | 22 +- src/tbb/include/tbb/tbb_machine.h | 18 +- src/tbb/include/tbb/tbb_profiling.h | 109 +- src/tbb/include/tbb/tbb_stddef.h | 60 +- src/tbb/include/tbb/tbb_thread.h | 29 +- src/tbb/include/tbb/tbbmalloc_proxy.h | 9 +- src/tbb/include/tbb/tick_count.h | 6 +- src/tbb/index.html | 48 + src/tbb/jni/Android.mk | 62 + src/tbb/jni/Application.mk | 67 + src/tbb/python/Makefile | 45 + src/tbb/python/TBB.py | 24 + src/tbb/python/index.html | 84 + src/tbb/python/rml/Makefile | 151 + src/tbb/python/rml/ipc_server.cpp | 1115 + src/tbb/python/rml/ipc_utils.cpp | 140 + src/tbb/python/rml/ipc_utils.h | 30 + src/tbb/python/setup.py | 120 + src/tbb/python/tbb/__init__.py | 325 + src/tbb/python/tbb/__main__.py | 20 + src/tbb/python/tbb/api.i | 175 + src/tbb/python/tbb/pool.py | 631 + src/tbb/python/tbb/test.py | 195 + src/tbb/src/Makefile | 29 +- src/tbb/src/index.html | 2 +- src/tbb/src/old/concurrent_queue_v2.cpp | 10 +- src/tbb/src/old/concurrent_queue_v2.h | 36 +- src/tbb/src/old/concurrent_vector_v2.cpp | 8 +- src/tbb/src/old/concurrent_vector_v2.h | 6 +- src/tbb/src/old/spin_rw_mutex_v2.cpp | 6 +- src/tbb/src/old/spin_rw_mutex_v2.h | 6 +- src/tbb/src/old/task_v2.cpp | 6 +- src/tbb/src/old/test_concurrent_queue_v2.cpp | 6 +- src/tbb/src/old/test_concurrent_vector_v2.cpp | 6 +- src/tbb/src/old/test_mutex_v2.cpp | 6 +- .../old/test_task_scheduler_observer_v3.cpp | 14 +- .../src/perf/coarse_grained_raii_lru_cache.h | 6 +- src/tbb/src/perf/cpq_pdes.cpp | 6 +- src/tbb/src/perf/fibonacci_impl_tbb.cpp | 6 +- src/tbb/src/perf/harness_perf.h | 39 + src/tbb/src/perf/perf.cpp | 6 +- src/tbb/src/perf/perf.h | 6 +- src/tbb/src/perf/perf_sched.cpp | 6 +- src/tbb/src/perf/run_statistics.sh | 8 +- src/tbb/src/perf/statistics.cpp | 6 +- src/tbb/src/perf/statistics.h | 6 +- src/tbb/src/perf/statistics_xml.h | 6 +- src/tbb/src/perf/time_async_return.cpp | 222 + src/tbb/src/perf/time_cpq_throughput_test.cpp | 6 +- src/tbb/src/perf/time_fibonacci_cutoff.cpp | 6 +- src/tbb/src/perf/time_framework.h | 6 +- src/tbb/src/perf/time_hash_map.cpp | 6 +- src/tbb/src/perf/time_hash_map_fill.cpp | 6 +- src/tbb/src/perf/time_hash_map_fill.html | 4 +- src/tbb/src/perf/time_locked_work.cpp | 6 +- .../src/perf/time_lru_cache_throughput.cpp | 8 +- src/tbb/src/perf/time_parallel_for_each.cpp | 6 +- src/tbb/src/perf/time_resumable_tasks.cpp | 323 + src/tbb/src/perf/time_sandbox.h | 6 +- src/tbb/src/perf/time_split_node.cpp | 6 +- src/tbb/src/perf/time_vector.cpp | 6 +- src/tbb/src/rml/client/index.html | 2 +- src/tbb/src/rml/client/library_assert.h | 6 +- src/tbb/src/rml/client/omp_dynamic_link.cpp | 6 +- src/tbb/src/rml/client/omp_dynamic_link.h | 6 +- src/tbb/src/rml/client/rml_factory.h | 8 +- src/tbb/src/rml/client/rml_omp.cpp | 6 +- src/tbb/src/rml/client/rml_tbb.cpp | 6 +- src/tbb/src/rml/include/index.html | 2 +- src/tbb/src/rml/include/rml_base.h | 6 +- src/tbb/src/rml/include/rml_omp.h | 6 +- src/tbb/src/rml/include/rml_tbb.h | 6 +- src/tbb/src/rml/index.html | 2 +- src/tbb/src/rml/perfor/omp_nested.cpp | 6 +- src/tbb/src/rml/perfor/omp_simple.cpp | 6 +- src/tbb/src/rml/perfor/tbb_multi_omp.cpp | 6 +- src/tbb/src/rml/perfor/tbb_simple.cpp | 6 +- src/tbb/src/rml/perfor/thread_level.h | 6 +- src/tbb/src/rml/server/index.html | 2 +- src/tbb/src/rml/server/irml.rc | 8 +- src/tbb/src/rml/server/job_automaton.h | 6 +- src/tbb/src/rml/server/lin-rml-export.def | 6 +- src/tbb/src/rml/server/rml_server.cpp | 48 +- src/tbb/src/rml/server/thread_monitor.h | 46 +- src/tbb/src/rml/server/wait_counter.h | 6 +- src/tbb/src/rml/server/win32-rml-export.def | 6 +- src/tbb/src/rml/server/win64-rml-export.def | 6 +- src/tbb/src/rml/test/rml_omp_stub.cpp | 6 +- src/tbb/src/rml/test/test_job_automaton.cpp | 6 +- src/tbb/src/rml/test/test_rml_mixed.cpp | 6 +- src/tbb/src/rml/test/test_rml_omp.cpp | 6 +- src/tbb/src/rml/test/test_rml_omp_c_linkage.c | 6 +- src/tbb/src/rml/test/test_rml_tbb.cpp | 6 +- src/tbb/src/rml/test/test_server.h | 10 +- src/tbb/src/rml/test/test_thread_monitor.cpp | 6 +- src/tbb/src/tbb/arena.cpp | 296 +- src/tbb/src/tbb/arena.h | 217 +- src/tbb/src/tbb/cache_aligned_allocator.cpp | 22 +- src/tbb/src/tbb/cilk-tbb-interop.h | 6 +- src/tbb/src/tbb/co_context.h | 217 + src/tbb/src/tbb/concurrent_hash_map.cpp | 6 +- src/tbb/src/tbb/concurrent_monitor.cpp | 23 +- src/tbb/src/tbb/concurrent_monitor.h | 12 +- src/tbb/src/tbb/concurrent_queue.cpp | 10 +- src/tbb/src/tbb/concurrent_vector.cpp | 20 +- src/tbb/src/tbb/condition_variable.cpp | 18 +- src/tbb/src/tbb/critical_section.cpp | 6 +- src/tbb/src/tbb/custom_scheduler.h | 616 +- src/tbb/src/tbb/dynamic_link.cpp | 71 +- src/tbb/src/tbb/dynamic_link.h | 8 +- src/tbb/src/tbb/governor.cpp | 192 +- src/tbb/src/tbb/governor.h | 15 +- src/tbb/src/tbb/ia32-masm/atomic_support.asm | 6 +- src/tbb/src/tbb/ia32-masm/itsx.asm | 6 +- src/tbb/src/tbb/ia32-masm/lock_byte.asm | 6 +- src/tbb/src/tbb/ia64-gas/atomic_support.s | 6 +- src/tbb/src/tbb/ia64-gas/ia64_misc.s | 6 +- src/tbb/src/tbb/ia64-gas/lock_byte.s | 6 +- src/tbb/src/tbb/ia64-gas/log2.s | 6 +- src/tbb/src/tbb/ia64-gas/pause.s | 6 +- src/tbb/src/tbb/ibm_aix51/atomic_support.c | 6 +- src/tbb/src/tbb/index.html | 2 +- .../src/tbb/intel64-masm/atomic_support.asm | 6 +- src/tbb/src/tbb/intel64-masm/intel64_misc.asm | 6 +- src/tbb/src/tbb/intel64-masm/itsx.asm | 6 +- src/tbb/src/tbb/intrusive_list.h | 6 +- src/tbb/src/tbb/itt_notify.cpp | 11 +- src/tbb/src/tbb/itt_notify.h | 23 +- src/tbb/src/tbb/lin32-tbb-export.def | 6 +- src/tbb/src/tbb/lin32-tbb-export.lst | 20 +- src/tbb/src/tbb/lin32-tbbbind-export.def | 24 + src/tbb/src/tbb/lin64-tbb-export.def | 6 +- src/tbb/src/tbb/lin64-tbb-export.lst | 20 +- src/tbb/src/tbb/lin64-tbbbind-export.def | 24 + src/tbb/src/tbb/lin64ipf-tbb-export.def | 6 +- src/tbb/src/tbb/lin64ipf-tbb-export.lst | 20 +- src/tbb/src/tbb/mac32-tbb-export.def | 6 +- src/tbb/src/tbb/mac32-tbb-export.lst | 27 +- src/tbb/src/tbb/mac64-tbb-export.def | 6 +- src/tbb/src/tbb/mac64-tbb-export.lst | 27 +- src/tbb/src/tbb/mailbox.h | 6 +- src/tbb/src/tbb/market.cpp | 388 +- src/tbb/src/tbb/market.h | 45 +- src/tbb/src/tbb/mutex.cpp | 6 +- src/tbb/src/tbb/observer_proxy.cpp | 71 +- src/tbb/src/tbb/observer_proxy.h | 11 +- src/tbb/src/tbb/pipeline.cpp | 53 +- src/tbb/src/tbb/private_server.cpp | 22 +- src/tbb/src/tbb/queuing_mutex.cpp | 6 +- src/tbb/src/tbb/queuing_rw_mutex.cpp | 10 +- src/tbb/src/tbb/reader_writer_lock.cpp | 6 +- src/tbb/src/tbb/recursive_mutex.cpp | 6 +- src/tbb/src/tbb/scheduler.cpp | 200 +- src/tbb/src/tbb/scheduler.h | 349 +- src/tbb/src/tbb/scheduler_common.h | 23 +- src/tbb/src/tbb/scheduler_utility.h | 6 +- src/tbb/src/tbb/semaphore.cpp | 6 +- src/tbb/src/tbb/semaphore.h | 18 +- src/tbb/src/tbb/spin_mutex.cpp | 6 +- src/tbb/src/tbb/spin_rw_mutex.cpp | 6 +- src/tbb/src/tbb/task.cpp | 8 +- src/tbb/src/tbb/task_group_context.cpp | 14 +- src/tbb/src/tbb/task_stream.h | 6 +- src/tbb/src/tbb/task_stream_extended.h | 319 + src/tbb/src/tbb/tbb_assert_impl.h | 17 +- src/tbb/src/tbb/tbb_bind.cpp | 298 + src/tbb/src/tbb/tbb_environment.h | 85 + src/tbb/src/tbb/tbb_main.cpp | 110 +- src/tbb/src/tbb/tbb_main.h | 9 +- src/tbb/src/tbb/tbb_misc.cpp | 67 +- src/tbb/src/tbb/tbb_misc.h | 37 +- src/tbb/src/tbb/tbb_misc_ex.cpp | 8 +- src/tbb/src/tbb/tbb_resource.rc | 8 +- src/tbb/src/tbb/tbb_statistics.cpp | 6 +- src/tbb/src/tbb/tbb_statistics.h | 6 +- src/tbb/src/tbb/tbb_thread.cpp | 6 +- src/tbb/src/tbb/tbb_version.h | 6 +- src/tbb/src/tbb/tls.h | 6 +- src/tbb/src/tbb/tools_api/disable_warnings.h | 6 +- src/tbb/src/tbb/tools_api/ittnotify.h | 711 +- src/tbb/src/tbb/tools_api/ittnotify_config.h | 116 +- src/tbb/src/tbb/tools_api/ittnotify_static.c | 258 +- src/tbb/src/tbb/tools_api/ittnotify_static.h | 58 +- src/tbb/src/tbb/tools_api/ittnotify_types.h | 6 +- src/tbb/src/tbb/tools_api/legacy/ittnotify.h | 24 +- src/tbb/src/tbb/win32-tbb-export.def | 6 +- src/tbb/src/tbb/win32-tbb-export.lst | 20 +- src/tbb/src/tbb/win32-tbbbind-export.def | 22 + src/tbb/src/tbb/win64-gcc-tbb-export.def | 6 +- src/tbb/src/tbb/win64-gcc-tbb-export.lst | 25 +- src/tbb/src/tbb/win64-tbb-export.def | 6 +- src/tbb/src/tbb/win64-tbb-export.lst | 20 +- src/tbb/src/tbb/win64-tbbbind-export.def | 22 + src/tbb/src/tbb/winrt-tbb-export.lst | 6 +- src/tbb/src/tbb/x86_rtm_rw_mutex.cpp | 17 +- src/tbb/src/tbbmalloc/Customize.h | 91 +- src/tbb/src/tbbmalloc/MapMemory.h | 116 +- src/tbb/src/tbbmalloc/Statistics.h | 6 +- src/tbb/src/tbbmalloc/Synchronize.h | 104 + src/tbb/src/tbbmalloc/TypeDefinitions.h | 6 +- src/tbb/src/tbbmalloc/backend.cpp | 425 +- src/tbb/src/tbbmalloc/backend.h | 385 + src/tbb/src/tbbmalloc/backref.cpp | 14 +- src/tbb/src/tbbmalloc/frontend.cpp | 612 +- src/tbb/src/tbbmalloc/index.html | 2 +- src/tbb/src/tbbmalloc/large_objects.cpp | 183 +- src/tbb/src/tbbmalloc/large_objects.h | 368 + src/tbb/src/tbbmalloc/lin32-proxy-export.def | 6 +- .../src/tbbmalloc/lin32-tbbmalloc-export.def | 7 +- src/tbb/src/tbbmalloc/lin64-proxy-export.def | 6 +- .../src/tbbmalloc/lin64-tbbmalloc-export.def | 7 +- .../src/tbbmalloc/lin64ipf-proxy-export.def | 6 +- .../tbbmalloc/lin64ipf-tbbmalloc-export.def | 10 +- .../src/tbbmalloc/mac32-tbbmalloc-export.def | 8 +- .../src/tbbmalloc/mac64-tbbmalloc-export.def | 8 +- src/tbb/src/tbbmalloc/proxy.cpp | 230 +- src/tbb/src/tbbmalloc/proxy.h | 6 +- src/tbb/src/tbbmalloc/proxy_overload_osx.h | 6 +- src/tbb/src/tbbmalloc/shared_utils.h | 74 +- .../tbbmalloc/tbb_function_replacement.cpp | 261 +- .../src/tbbmalloc/tbb_function_replacement.h | 14 +- src/tbb/src/tbbmalloc/tbbmalloc.cpp | 15 +- src/tbb/src/tbbmalloc/tbbmalloc.rc | 8 +- src/tbb/src/tbbmalloc/tbbmalloc_internal.h | 774 +- .../src/tbbmalloc/tbbmalloc_internal_api.h | 8 +- .../tbbmalloc/win32-gcc-tbbmalloc-export.def | 7 +- .../src/tbbmalloc/win32-tbbmalloc-export.def | 8 +- .../tbbmalloc/win64-gcc-tbbmalloc-export.def | 7 +- .../src/tbbmalloc/win64-tbbmalloc-export.def | 8 +- src/tbb/src/tbbproxy/tbbproxy-windows.asm | 6 +- src/tbb/src/tbbproxy/tbbproxy.cpp | 12 +- src/tbb/src/test/harness.h | 882 + src/tbb/src/test/harness_allocator.h | 869 + src/tbb/src/test/harness_allocator_overload.h | 35 + src/tbb/src/test/harness_assert.h | 37 + src/tbb/src/test/harness_bad_expr.h | 73 + src/tbb/src/test/harness_barrier.h | 136 + src/tbb/src/test/harness_checktype.h | 95 + src/tbb/src/test/harness_concurrency.h | 101 + .../src/test/harness_concurrency_tracker.h | 170 + src/tbb/src/test/harness_cpu.h | 116 + src/tbb/src/test/harness_defs.h | 220 + src/tbb/src/test/harness_dynamic_libs.h | 124 + src/tbb/src/test/harness_eh.h | 313 + src/tbb/src/test/harness_fp.h | 168 + src/tbb/src/test/harness_graph.h | 1236 ++ src/tbb/src/test/harness_inject_scheduler.h | 82 + src/tbb/src/test/harness_iterator.h | 160 + src/tbb/src/test/harness_m128.h | 125 + src/tbb/src/test/harness_memory.h | 141 + src/tbb/src/test/harness_mic.h | 42 + src/tbb/src/test/harness_preload.h | 43 + src/tbb/src/test/harness_report.h | 174 + src/tbb/src/test/harness_runtime_loader.h | 33 + src/tbb/src/test/harness_state_trackable.h | 143 + src/tbb/src/test/harness_task.h | 51 + src/tbb/src/test/harness_tbb_independence.h | 83 + .../src/test/harness_test_cases_framework.h | 236 + src/tbb/src/test/harness_tls.h | 75 + src/tbb/src/test/harness_tsx.h | 66 + src/tbb/src/test/test_ScalableAllocator.cpp | 223 + .../src/test/test_ScalableAllocator_STL.cpp | 50 + src/tbb/src/test/test_aggregator.cpp | 180 + src/tbb/src/test/test_aligned_space.cpp | 115 + src/tbb/src/test/test_allocator.h | 278 + src/tbb/src/test/test_allocator_STL.h | 147 + .../src/test/test_arena_constraints_hwloc.cpp | 382 + .../src/test/test_arena_constraints_stubs.cpp | 41 + src/tbb/src/test/test_assembly.cpp | 160 + src/tbb/src/test/test_async_msg.cpp | 599 + src/tbb/src/test/test_async_node.cpp | 839 + src/tbb/src/test/test_atomic.cpp | 1601 ++ src/tbb/src/test/test_blocked_range.cpp | 206 + src/tbb/src/test/test_blocked_range2d.cpp | 168 + src/tbb/src/test/test_blocked_range3d.cpp | 201 + src/tbb/src/test/test_blocked_rangeNd.cpp | 255 + src/tbb/src/test/test_broadcast_node.cpp | 385 + src/tbb/src/test/test_buffer_node.cpp | 483 + .../src/test/test_cache_aligned_allocator.cpp | 76 + .../test/test_cache_aligned_allocator_STL.cpp | 42 + src/tbb/src/test/test_cilk_common.h | 79 + src/tbb/src/test/test_cilk_dynamic_load.cpp | 152 + src/tbb/src/test/test_cilk_interop.cpp | 151 + src/tbb/src/test/test_combinable.cpp | 503 + src/tbb/src/test/test_composite_node.cpp | 586 + .../test/test_concurrent_associative_common.h | 1518 ++ src/tbb/src/test/test_concurrent_hash_map.cpp | 1713 ++ .../src/test/test_concurrent_lru_cache.cpp | 462 + src/tbb/src/test/test_concurrent_map.cpp | 265 + src/tbb/src/test/test_concurrent_monitor.cpp | 365 + .../src/test/test_concurrent_ordered_common.h | 375 + .../test/test_concurrent_priority_queue.cpp | 1216 ++ src/tbb/src/test/test_concurrent_queue.cpp | 1752 ++ .../test/test_concurrent_queue_whitebox.cpp | 97 + src/tbb/src/test/test_concurrent_set.cpp | 253 + .../test/test_concurrent_unordered_common.h | 293 + .../test/test_concurrent_unordered_map.cpp | 252 + .../test/test_concurrent_unordered_set.cpp | 272 + src/tbb/src/test/test_concurrent_vector.cpp | 1874 ++ src/tbb/src/test/test_condition_variable.h | 763 + .../src/test/test_container_move_support.h | 897 + src/tbb/src/test/test_continue_node.cpp | 568 + src/tbb/src/test/test_critical_section.cpp | 212 + src/tbb/src/test/test_dynamic_link.cpp | 80 + src/tbb/src/test/test_eh_algorithms.cpp | 1579 ++ src/tbb/src/test/test_eh_flow_graph.cpp | 2039 ++ src/tbb/src/test/test_eh_tasks.cpp | 787 + .../test/test_enumerable_thread_specific.cpp | 1380 ++ .../src/test/test_environment_whitebox.cpp | 241 + .../src/test/test_examples_common_utility.cpp | 598 + src/tbb/src/test/test_fast_random.cpp | 196 + src/tbb/src/test/test_flow_graph.cpp | 372 + .../src/test/test_flow_graph_priorities.cpp | 599 + src/tbb/src/test/test_flow_graph_whitebox.cpp | 707 + .../src/test/test_follows_and_precedes_api.h | 263 + src/tbb/src/test/test_fp.cpp | 381 + src/tbb/src/test/test_function_node.cpp | 673 + src/tbb/src/test/test_global_control.cpp | 793 + .../src/test/test_global_control_whitebox.cpp | 78 + src/tbb/src/test/test_halt.cpp | 109 + src/tbb/src/test/test_handle_perror.cpp | 54 + src/tbb/src/test/test_hw_concurrency.cpp | 52 + src/tbb/src/test/test_indexer_node.cpp | 980 + src/tbb/src/test/test_initializer_list.h | 172 + src/tbb/src/test/test_inits_loop.cpp | 90 + src/tbb/src/test/test_intrusive_list.cpp | 146 + src/tbb/src/test/test_iterators.cpp | 281 + src/tbb/src/test/test_ittnotify.cpp | 88 + src/tbb/src/test/test_join_node.cpp | 192 + src/tbb/src/test/test_join_node.h | 2150 ++ .../src/test/test_join_node_key_matching.cpp | 102 + .../test/test_join_node_msg_key_matching.cpp | 115 + src/tbb/src/test/test_lambda.cpp | 235 + src/tbb/src/test/test_limiter_node.cpp | 674 + src/tbb/src/test/test_malloc_atexit.cpp | 157 + src/tbb/src/test/test_malloc_compliance.cpp | 1121 + .../src/test/test_malloc_init_shutdown.cpp | 176 + src/tbb/src/test/test_malloc_lib_unload.cpp | 218 + src/tbb/src/test/test_malloc_new_handler.cpp | 81 + src/tbb/src/test/test_malloc_overload.cpp | 477 + .../src/test/test_malloc_overload_disable.cpp | 69 + src/tbb/src/test/test_malloc_pools.cpp | 883 + src/tbb/src/test/test_malloc_pure_c.c | 128 + src/tbb/src/test/test_malloc_regression.cpp | 186 + .../src/test/test_malloc_shutdown_hang.cpp | 125 + src/tbb/src/test/test_malloc_used_by_lib.cpp | 167 + src/tbb/src/test/test_malloc_whitebox.cpp | 1629 ++ src/tbb/src/test/test_model_plugin.cpp | 216 + src/tbb/src/test/test_multifunction_node.cpp | 753 + src/tbb/src/test/test_mutex.cpp | 711 + .../src/test/test_mutex_native_threads.cpp | 217 + src/tbb/src/test/test_opencl_kernel_32.spir | Bin 0 -> 1440 bytes src/tbb/src/test/test_opencl_kernel_64.spir | Bin 0 -> 1468 bytes src/tbb/src/test/test_opencl_node.cl | 185 + src/tbb/src/test/test_opencl_node.cpp | 911 + .../test_opencl_precompiled_kernel_gpu_32.ir | Bin 0 -> 4110 bytes .../test_opencl_precompiled_kernel_gpu_64.ir | Bin 0 -> 4186 bytes src/tbb/src/test/test_openmp.cpp | 246 + src/tbb/src/test/test_overwrite_node.cpp | 203 + src/tbb/src/test/test_parallel_do.cpp | 424 + src/tbb/src/test/test_parallel_for.cpp | 777 + src/tbb/src/test/test_parallel_for_each.cpp | 244 + .../test/test_parallel_for_vectorization.cpp | 71 + src/tbb/src/test/test_parallel_invoke.cpp | 317 + src/tbb/src/test/test_parallel_pipeline.cpp | 673 + src/tbb/src/test/test_parallel_reduce.cpp | 488 + src/tbb/src/test/test_parallel_scan.cpp | 459 + src/tbb/src/test/test_parallel_sort.cpp | 556 + src/tbb/src/test/test_parallel_while.cpp | 167 + src/tbb/src/test/test_partitioner.h | 607 + .../src/test/test_partitioner_whitebox.cpp | 147 + src/tbb/src/test/test_partitioner_whitebox.h | 468 + src/tbb/src/test/test_pipeline.cpp | 309 + src/tbb/src/test/test_pipeline_with_tbf.cpp | 528 + src/tbb/src/test/test_priority_queue_node.cpp | 401 + src/tbb/src/test/test_queue_node.cpp | 508 + src/tbb/src/test/test_range_based_for.h | 75 + src/tbb/src/test/test_reader_writer_lock.cpp | 234 + src/tbb/src/test/test_resumable_tasks.cpp | 431 + src/tbb/src/test/test_runtime_loader.cpp | 281 + .../src/test/test_rwm_upgrade_downgrade.cpp | 70 + src/tbb/src/test/test_semaphore.cpp | 311 + src/tbb/src/test/test_sequencer_node.cpp | 455 + src/tbb/src/test/test_source_node.cpp | 504 + src/tbb/src/test/test_split_node.cpp | 425 + src/tbb/src/test/test_static_assert.cpp | 85 + src/tbb/src/test/test_std_thread.cpp | 39 + src/tbb/src/test/test_streaming_node.cpp | 913 + src/tbb/src/test/test_tagged_msg.cpp | 259 + src/tbb/src/test/test_task.cpp | 1345 ++ src/tbb/src/test/test_task_arena.cpp | 1674 ++ src/tbb/src/test/test_task_assertions.cpp | 90 + src/tbb/src/test/test_task_auto_init.cpp | 198 + src/tbb/src/test/test_task_enqueue.cpp | 376 + src/tbb/src/test/test_task_group.cpp | 1115 + src/tbb/src/test/test_task_leaks.cpp | 268 + src/tbb/src/test/test_task_priority.cpp | 667 + src/tbb/src/test/test_task_scheduler_init.cpp | 367 + .../src/test/test_task_scheduler_observer.cpp | 344 + src/tbb/src/test/test_task_steal_limit.cpp | 75 + .../src/test/test_tbb_condition_variable.cpp | 25 + src/tbb/src/test/test_tbb_fork.cpp | 326 + src/tbb/src/test/test_tbb_header.cpp | 364 + src/tbb/src/test/test_tbb_thread.cpp | 29 + src/tbb/src/test/test_tbb_version.cpp | 284 + src/tbb/src/test/test_thread.h | 305 + src/tbb/src/test/test_tick_count.cpp | 199 + src/tbb/src/test/test_tuple.cpp | 200 + src/tbb/src/test/test_write_once_node.cpp | 212 + src/tbb/src/test/test_yield.cpp | 64 + src/tbb/third-party-programs.txt | 350 + tools/update-tbb.R | 23 + 1122 files changed, 240885 insertions(+), 11428 deletions(-) create mode 100644 inst/include/tbb/blocked_rangeNd.h create mode 100644 inst/include/tbb/concurrent_map.h create mode 100644 inst/include/tbb/concurrent_set.h delete mode 100644 inst/include/tbb/gfx_factory.h create mode 100644 inst/include/tbb/info.h create mode 100644 inst/include/tbb/iterators.h rename inst/include/tbb/machine/{gcc_armv7.h => gcc_arm.h} (94%) delete mode 100644 inst/include/tbb/machine/xbox360_ppc.h create mode 100644 src/tbb/.gitattributes create mode 100644 src/tbb/.gitignore create mode 100644 src/tbb/Doxyfile rename src/tbb/{COPYING => LICENSE} (100%) create mode 100644 src/tbb/README.md delete mode 100644 src/tbb/build/.gitignore create mode 100644 src/tbb/build/BSD.clang.inc create mode 100644 src/tbb/build/BSD.inc create mode 100644 src/tbb/build/Makefile.tbbbind create mode 100644 src/tbb/build/OpenBSD.clang.inc create mode 100644 src/tbb/build/OpenBSD.inc create mode 100644 src/tbb/build/build.py create mode 100644 src/tbb/cmake/README.rst create mode 100644 src/tbb/cmake/TBBBuild.cmake create mode 100644 src/tbb/cmake/TBBGet.cmake create mode 100644 src/tbb/cmake/TBBInstallConfig.cmake create mode 100644 src/tbb/cmake/TBBMakeConfig.cmake create mode 100644 src/tbb/cmake/tbb_config_generator.cmake create mode 100644 src/tbb/cmake/tbb_config_installer.cmake create mode 100644 src/tbb/cmake/templates/TBBConfig.cmake.in create mode 100644 src/tbb/cmake/templates/TBBConfigInternal.cmake.in create mode 100644 src/tbb/cmake/templates/TBBConfigVersion.cmake.in create mode 100644 src/tbb/doc/Release_Notes.txt create mode 100644 src/tbb/doc/copyright_brand_disclaimer_doxygen.txt create mode 100644 src/tbb/examples/GettingStarted/index.html create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/Makefile create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/Makefile.windows create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/msvs/sub_string_finder.sln create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/msvs/sub_string_finder.vcxproj create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/msvs/sub_string_finder_extended.vcxproj create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/msvs/sub_string_finder_pretty.vcxproj create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/readme.html create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/sub_string_finder.cpp create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/sub_string_finder_extended.cpp create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/sub_string_finder_pretty.cpp create mode 100644 src/tbb/examples/GettingStarted/sub_string_finder/xcode/sub_string_finder.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/Makefile create mode 100644 src/tbb/examples/common/copy_libraries.bat create mode 100644 src/tbb/examples/common/examples-common.inc create mode 100644 src/tbb/examples/common/gui/Makefile.gmake create mode 100644 src/tbb/examples/common/gui/Makefile.win create mode 100644 src/tbb/examples/common/gui/convideo.cpp create mode 100644 src/tbb/examples/common/gui/d2dvideo.cpp create mode 100644 src/tbb/examples/common/gui/dxcheck.bat create mode 100644 src/tbb/examples/common/gui/gdivideo.cpp create mode 100644 src/tbb/examples/common/gui/macvideo.cpp create mode 100644 src/tbb/examples/common/gui/video.h create mode 100644 src/tbb/examples/common/gui/winvideo.h create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/OpenGLView.h create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/OpenGLView.m create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/PkgInfo create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/en.lproj/InfoPlist.strings create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/en.lproj/MainMenu.nib create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/en.lproj/MainMenu.xib create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/iOS.storyboard create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/main.m create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/tbbAppDelegate.h create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/tbbAppDelegate.m create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/tbbExample-Info.ios.plist create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/tbbExample-Info.plist create mode 100644 src/tbb/examples/common/gui/xcode/tbbExample/tbbExample-Prefix.pch create mode 100644 src/tbb/examples/common/gui/xvideo.cpp create mode 100644 src/tbb/examples/common/index.html create mode 100644 src/tbb/examples/common/toolset.props create mode 100644 src/tbb/examples/common/utility/fast_random.h create mode 100644 src/tbb/examples/common/utility/get_default_num_threads.h create mode 100644 src/tbb/examples/common/utility/utility.h create mode 100644 src/tbb/examples/concurrent_hash_map/count_strings/Makefile create mode 100644 src/tbb/examples/concurrent_hash_map/count_strings/Makefile.windows create mode 100644 src/tbb/examples/concurrent_hash_map/count_strings/count_strings.cpp create mode 100644 src/tbb/examples/concurrent_hash_map/count_strings/msvs/count_strings.sln create mode 100644 src/tbb/examples/concurrent_hash_map/count_strings/msvs/count_strings.vcxproj create mode 100644 src/tbb/examples/concurrent_hash_map/count_strings/readme.html create mode 100644 src/tbb/examples/concurrent_hash_map/count_strings/xcode/count_strings.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/concurrent_hash_map/index.html create mode 100644 src/tbb/examples/concurrent_priority_queue/index.html create mode 100644 src/tbb/examples/concurrent_priority_queue/shortpath/Makefile create mode 100644 src/tbb/examples/concurrent_priority_queue/shortpath/Makefile.windows create mode 100644 src/tbb/examples/concurrent_priority_queue/shortpath/msvs/shortpath.sln create mode 100644 src/tbb/examples/concurrent_priority_queue/shortpath/msvs/shortpath.vcxproj create mode 100644 src/tbb/examples/concurrent_priority_queue/shortpath/readme.html create mode 100644 src/tbb/examples/concurrent_priority_queue/shortpath/shortpath.cpp create mode 100644 src/tbb/examples/concurrent_priority_queue/shortpath/xcode/shortpath.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/graph/binpack/Makefile create mode 100644 src/tbb/examples/graph/binpack/Makefile.windows create mode 100644 src/tbb/examples/graph/binpack/binpack.cpp create mode 100644 src/tbb/examples/graph/binpack/msvs/binpack.sln create mode 100644 src/tbb/examples/graph/binpack/msvs/binpack.vcxproj create mode 100644 src/tbb/examples/graph/binpack/readme.html create mode 100644 src/tbb/examples/graph/binpack/xcode/binpack.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/graph/cholesky/Makefile create mode 100644 src/tbb/examples/graph/cholesky/Makefile.windows create mode 100644 src/tbb/examples/graph/cholesky/cholesky.cpp create mode 100644 src/tbb/examples/graph/cholesky/init.cpp create mode 100644 src/tbb/examples/graph/cholesky/msvs/cholesky.sln create mode 100644 src/tbb/examples/graph/cholesky/msvs/cholesky.vcxproj create mode 100644 src/tbb/examples/graph/cholesky/readme.html create mode 100644 src/tbb/examples/graph/cholesky/xcode/cholesky.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/graph/cholesky/xcode/cholesky.xcodeproj/xcshareddata/xcschemes/Cholesky.xcscheme create mode 100644 src/tbb/examples/graph/dining_philosophers/Makefile create mode 100644 src/tbb/examples/graph/dining_philosophers/Makefile.windows create mode 100644 src/tbb/examples/graph/dining_philosophers/dining_philosophers.cpp create mode 100644 src/tbb/examples/graph/dining_philosophers/msvs/dining_philosophers.sln create mode 100644 src/tbb/examples/graph/dining_philosophers/msvs/dining_philosophers.vcxproj create mode 100644 src/tbb/examples/graph/dining_philosophers/readme.html create mode 100644 src/tbb/examples/graph/dining_philosophers/xcode/dining_philosophers.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/graph/fgbzip2/Makefile create mode 100644 src/tbb/examples/graph/fgbzip2/Makefile.windows create mode 100644 src/tbb/examples/graph/fgbzip2/blocksort.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/bzlib.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/bzlib.h create mode 100644 src/tbb/examples/graph/fgbzip2/bzlib_private.h create mode 100644 src/tbb/examples/graph/fgbzip2/compress.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/crctable.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/decompress.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/fgbzip2.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/huffman.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/msvs/fgbzip2.sln create mode 100644 src/tbb/examples/graph/fgbzip2/msvs/fgbzip2.vcxproj create mode 100644 src/tbb/examples/graph/fgbzip2/randtable.cpp create mode 100644 src/tbb/examples/graph/fgbzip2/readme.html create mode 100644 src/tbb/examples/graph/fgbzip2/xcode/fgbzip2.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/graph/index.html create mode 100644 src/tbb/examples/graph/logic_sim/D_latch.h create mode 100644 src/tbb/examples/graph/logic_sim/Makefile create mode 100644 src/tbb/examples/graph/logic_sim/Makefile.windows create mode 100644 src/tbb/examples/graph/logic_sim/basics.h create mode 100644 src/tbb/examples/graph/logic_sim/four_bit_adder.h create mode 100644 src/tbb/examples/graph/logic_sim/msvs/logic_sim.sln create mode 100644 src/tbb/examples/graph/logic_sim/msvs/logic_sim.vcxproj create mode 100644 src/tbb/examples/graph/logic_sim/one_bit_adder.h create mode 100644 src/tbb/examples/graph/logic_sim/readme.html create mode 100644 src/tbb/examples/graph/logic_sim/test_all.cpp create mode 100644 src/tbb/examples/graph/logic_sim/two_bit_adder.h create mode 100644 src/tbb/examples/graph/som/Makefile create mode 100644 src/tbb/examples/graph/som/Makefile.windows create mode 100644 src/tbb/examples/graph/som/msvs/som.sln create mode 100644 src/tbb/examples/graph/som/msvs/som.vcxproj create mode 100644 src/tbb/examples/graph/som/readme.html create mode 100644 src/tbb/examples/graph/som/som.cpp create mode 100644 src/tbb/examples/graph/som/som.h create mode 100644 src/tbb/examples/graph/som/som_graph.cpp create mode 100644 src/tbb/examples/graph/som/xcode/som.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/graph/stereo/Makefile create mode 100644 src/tbb/examples/graph/stereo/Makefile.windows create mode 100644 src/tbb/examples/graph/stereo/imageEffects.cl create mode 100644 src/tbb/examples/graph/stereo/lodepng.cpp create mode 100644 src/tbb/examples/graph/stereo/lodepng.h create mode 100644 src/tbb/examples/graph/stereo/msvs/stereo.sln create mode 100644 src/tbb/examples/graph/stereo/msvs/stereo.vcxproj create mode 100644 src/tbb/examples/graph/stereo/readme.html create mode 100644 src/tbb/examples/graph/stereo/stereo.cpp create mode 100644 src/tbb/examples/graph/stereo/utils.h create mode 100644 src/tbb/examples/graph/stereo/xcode/stereo.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/index.html create mode 100644 src/tbb/examples/parallel_do/index.html create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/Graph.cpp create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/Graph.h create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/Makefile create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/Makefile.windows create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/Matrix.h create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/main.cpp create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/msvs/parallel_preorder.sln create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/msvs/parallel_preorder.vcxproj create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/parallel_preorder.cpp create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/readme.html create mode 100644 src/tbb/examples/parallel_do/parallel_preorder/xcode/parallel_preorder.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/parallel_for/game_of_life/Makefile create mode 100644 src/tbb/examples/parallel_for/game_of_life/Makefile.windows create mode 100644 src/tbb/examples/parallel_for/game_of_life/msvs/Game_of_life.sln create mode 100644 src/tbb/examples/parallel_for/game_of_life/msvs/Game_of_life.vcxproj create mode 100644 src/tbb/examples/parallel_for/game_of_life/msvs/app.ico create mode 100644 src/tbb/examples/parallel_for/game_of_life/msvs/app.rc create mode 100644 src/tbb/examples/parallel_for/game_of_life/msvs/resource.h create mode 100644 src/tbb/examples/parallel_for/game_of_life/readme.html create mode 100644 src/tbb/examples/parallel_for/game_of_life/src/AssemblyInfo.cpp create mode 100644 src/tbb/examples/parallel_for/game_of_life/src/Board.h create mode 100644 src/tbb/examples/parallel_for/game_of_life/src/Evolution.cpp create mode 100644 src/tbb/examples/parallel_for/game_of_life/src/Evolution.h create mode 100644 src/tbb/examples/parallel_for/game_of_life/src/Form1.h create mode 100644 src/tbb/examples/parallel_for/game_of_life/src/Game_of_life.cpp create mode 100644 src/tbb/examples/parallel_for/game_of_life/src/Update_state.cpp create mode 100644 src/tbb/examples/parallel_for/game_of_life/xcode/game_of_life.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/parallel_for/index.html create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/Makefile create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/Makefile.windows create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/msvs/polygon_overlay.sln create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/msvs/pover.rc create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/msvs/pover.vcxproj create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/msvs/resource.h create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/polymain.cpp create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/polymain.h create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/polyover.cpp create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/polyover.h create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/pover_global.h create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/pover_video.cpp create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/pover_video.h create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/readme.html create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/rpolygon.h create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/speedup.gif create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/xcode/polygon_overlay.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/xcode/polygon_overlay.xcodeproj/xcshareddata/xcschemes/tbbExample.ios.xcscheme create mode 100644 src/tbb/examples/parallel_for/polygon_overlay/xcode/polygon_overlay.xcodeproj/xcshareddata/xcschemes/tbbExample.xcscheme create mode 100644 src/tbb/examples/parallel_for/seismic/Makefile create mode 100644 src/tbb/examples/parallel_for/seismic/Makefile.windows create mode 100644 src/tbb/examples/parallel_for/seismic/main.cpp create mode 100644 src/tbb/examples/parallel_for/seismic/msvs/SeismicSimulation.ico create mode 100644 src/tbb/examples/parallel_for/seismic/msvs/SeismicSimulation.rc create mode 100644 src/tbb/examples/parallel_for/seismic/msvs/SeismicSimulation.vcxproj create mode 100644 src/tbb/examples/parallel_for/seismic/msvs/resource.h create mode 100644 src/tbb/examples/parallel_for/seismic/msvs/seismic.sln create mode 100644 src/tbb/examples/parallel_for/seismic/msvs/small.ico create mode 100644 src/tbb/examples/parallel_for/seismic/readme.html create mode 100644 src/tbb/examples/parallel_for/seismic/seismic_video.cpp create mode 100644 src/tbb/examples/parallel_for/seismic/seismic_video.h create mode 100644 src/tbb/examples/parallel_for/seismic/universe.cpp create mode 100644 src/tbb/examples/parallel_for/seismic/universe.h create mode 100644 src/tbb/examples/parallel_for/seismic/xcode/seismic.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/parallel_for/seismic/xcode/seismic.xcodeproj/xcshareddata/xcschemes/tbbExample.ios.xcscheme create mode 100644 src/tbb/examples/parallel_for/seismic/xcode/seismic.xcodeproj/xcshareddata/xcschemes/tbbExample.xcscheme create mode 100644 src/tbb/examples/parallel_for/tachyon/Makefile create mode 100644 src/tbb/examples/parallel_for/tachyon/Makefile.windows create mode 100644 src/tbb/examples/parallel_for/tachyon/dat/820spheres.dat create mode 100644 src/tbb/examples/parallel_for/tachyon/dat/balls.dat create mode 100644 src/tbb/examples/parallel_for/tachyon/dat/balls3.dat create mode 100644 src/tbb/examples/parallel_for/tachyon/dat/lattice.dat create mode 100644 src/tbb/examples/parallel_for/tachyon/dat/model2.dat create mode 100644 src/tbb/examples/parallel_for/tachyon/dat/teapot.dat create mode 100644 src/tbb/examples/parallel_for/tachyon/dat/trypsin4pti.dat create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/gui.ico create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/gui.rc create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/resource.h create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/small.ico create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/tachyon.serial.vcxproj create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/tachyon.sln create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/tachyon.tbb.vcxproj create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/tachyon.tbb1d.vcxproj create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/tachyon.vcxproj create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/App.xaml create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/App.xaml.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/App.xaml.h create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/Assets/Logo.png create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/Assets/SmallLogo.png create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/Assets/SplashScreen.png create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/Assets/StoreLogo.png create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/Common/StandardStyles.xaml create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/DirectXBase.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/DirectXBase.h create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/DirectXPage.xaml create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/DirectXPage.xaml.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/DirectXPage.xaml.h create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/Package.appxmanifest create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/copy_libraries_and_assets.bat create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/pch.h create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/tbbTachyon.sln create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/tbbTachyon.vcxproj create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/tbbTachyon.vcxproj.filters create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/tbbTachyonRenderer.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/msvs/uwp/tbbTachyonRenderer.h create mode 100644 src/tbb/examples/parallel_for/tachyon/readme.html create mode 100644 src/tbb/examples/parallel_for/tachyon/src/api.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/api.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/apigeom.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/apitrigeom.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/apitrigeom.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/bndbox.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/bndbox.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/box.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/box.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/camera.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/camera.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/coordsys.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/coordsys.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/cylinder.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/cylinder.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/extvol.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/extvol.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/global.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/global.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/grid.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/grid.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/imageio.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/imageio.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/imap.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/imap.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/intersect.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/intersect.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/jpeg.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/jpeg.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/light.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/light.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/machine.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/macros.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/main.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/objbound.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/objbound.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/parse.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/parse.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/plane.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/plane.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/ppm.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/ppm.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/pthread.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/pthread_w.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/quadric.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/quadric.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/render.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/render.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/ring.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/ring.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/shade.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/shade.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/sphere.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/sphere.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/tachyon_video.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/tachyon_video.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/texture.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/texture.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/tgafile.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/tgafile.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/trace.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/trace.serial.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/trace.simple.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/trace.tbb.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/trace.tbb1d.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/trace_rest.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/triangle.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/triangle.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/types.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/ui.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/ui.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/util.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/util.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/vector.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/vector.h create mode 100644 src/tbb/examples/parallel_for/tachyon/src/vol.cpp create mode 100644 src/tbb/examples/parallel_for/tachyon/src/vol.h create mode 100644 src/tbb/examples/parallel_for/tachyon/xcode/tachyon.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/parallel_for/tachyon/xcode/tachyon.xcodeproj/xcshareddata/xcschemes/tachyon.serial.xcscheme create mode 100644 src/tbb/examples/parallel_for/tachyon/xcode/tachyon.xcodeproj/xcshareddata/xcschemes/tachyon.tbb.ios.xcscheme create mode 100644 src/tbb/examples/parallel_for/tachyon/xcode/tachyon.xcodeproj/xcshareddata/xcschemes/tachyon.tbb.xcscheme create mode 100644 src/tbb/examples/parallel_for/tachyon/xcode/tachyon.xcodeproj/xcshareddata/xcschemes/tachyon.tbb1d.xcscheme create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/Makefile create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/Makefile.windows create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/convex_hull.h create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/convex_hull_bench.cpp create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/convex_hull_sample.cpp create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/msvs/convex_hull.sln create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/msvs/convex_hull_benchmark.vcxproj create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/msvs/convex_hull_sample.vcxproj create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/readme.html create mode 100644 src/tbb/examples/parallel_reduce/convex_hull/xcode/convex_hull.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/parallel_reduce/index.html create mode 100644 src/tbb/examples/parallel_reduce/primes/Makefile create mode 100644 src/tbb/examples/parallel_reduce/primes/Makefile.windows create mode 100644 src/tbb/examples/parallel_reduce/primes/main.cpp create mode 100644 src/tbb/examples/parallel_reduce/primes/msvs/primes.sln create mode 100644 src/tbb/examples/parallel_reduce/primes/msvs/primes.vcxproj create mode 100644 src/tbb/examples/parallel_reduce/primes/primes.cpp create mode 100644 src/tbb/examples/parallel_reduce/primes/primes.h create mode 100644 src/tbb/examples/parallel_reduce/primes/readme.html create mode 100644 src/tbb/examples/parallel_reduce/primes/xcode/primes.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/pipeline/index.html create mode 100644 src/tbb/examples/pipeline/square/Makefile create mode 100644 src/tbb/examples/pipeline/square/Makefile.windows create mode 100644 src/tbb/examples/pipeline/square/gen_input.cpp create mode 100644 src/tbb/examples/pipeline/square/msvs/square.sln create mode 100644 src/tbb/examples/pipeline/square/msvs/square.vcxproj create mode 100644 src/tbb/examples/pipeline/square/readme.html create mode 100644 src/tbb/examples/pipeline/square/square.cpp create mode 100644 src/tbb/examples/pipeline/square/xcode/square.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/task/index.html create mode 100644 src/tbb/examples/task/tree_sum/Makefile create mode 100644 src/tbb/examples/task/tree_sum/Makefile.windows create mode 100644 src/tbb/examples/task/tree_sum/OptimizedParallelSumTree.cpp create mode 100644 src/tbb/examples/task/tree_sum/SerialSumTree.cpp create mode 100644 src/tbb/examples/task/tree_sum/SimpleParallelSumTree.cpp create mode 100644 src/tbb/examples/task/tree_sum/TreeMaker.h create mode 100644 src/tbb/examples/task/tree_sum/common.h create mode 100644 src/tbb/examples/task/tree_sum/main.cpp create mode 100644 src/tbb/examples/task/tree_sum/msvs/tree_sum.sln create mode 100644 src/tbb/examples/task/tree_sum/msvs/tree_sum.vcxproj create mode 100644 src/tbb/examples/task/tree_sum/readme.html create mode 100644 src/tbb/examples/task/tree_sum/xcode/tree_sum.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/task_arena/fractal/Makefile create mode 100644 src/tbb/examples/task_arena/fractal/Makefile.windows create mode 100644 src/tbb/examples/task_arena/fractal/fractal.cpp create mode 100644 src/tbb/examples/task_arena/fractal/fractal.h create mode 100644 src/tbb/examples/task_arena/fractal/fractal_video.h create mode 100644 src/tbb/examples/task_arena/fractal/main.cpp create mode 100644 src/tbb/examples/task_arena/fractal/msvs/fractal.sln create mode 100644 src/tbb/examples/task_arena/fractal/msvs/fractal.vcxproj create mode 100644 src/tbb/examples/task_arena/fractal/msvs/gui.ico create mode 100644 src/tbb/examples/task_arena/fractal/msvs/gui.rc create mode 100644 src/tbb/examples/task_arena/fractal/msvs/resource.h create mode 100644 src/tbb/examples/task_arena/fractal/msvs/small.ico create mode 100644 src/tbb/examples/task_arena/fractal/readme.html create mode 100644 src/tbb/examples/task_arena/fractal/xcode/fractal.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/task_arena/fractal/xcode/fractal.xcodeproj/xcshareddata/xcschemes/tbbExample.ios.xcscheme create mode 100644 src/tbb/examples/task_arena/fractal/xcode/fractal.xcodeproj/xcshareddata/xcschemes/tbbExample.xcscheme create mode 100644 src/tbb/examples/task_arena/index.html create mode 100644 src/tbb/examples/task_group/index.html create mode 100644 src/tbb/examples/task_group/sudoku/Makefile create mode 100644 src/tbb/examples/task_group/sudoku/Makefile.windows create mode 100644 src/tbb/examples/task_group/sudoku/input1 create mode 100644 src/tbb/examples/task_group/sudoku/input2 create mode 100644 src/tbb/examples/task_group/sudoku/input3 create mode 100644 src/tbb/examples/task_group/sudoku/input4 create mode 100644 src/tbb/examples/task_group/sudoku/msvs/sudoku.sln create mode 100644 src/tbb/examples/task_group/sudoku/msvs/sudoku.vcxproj create mode 100644 src/tbb/examples/task_group/sudoku/readme.html create mode 100644 src/tbb/examples/task_group/sudoku/sudoku.cpp create mode 100644 src/tbb/examples/task_group/sudoku/xcode/sudoku.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/test_all/fibonacci/CMakeLists.txt create mode 100644 src/tbb/examples/test_all/fibonacci/Fibonacci.cpp create mode 100644 src/tbb/examples/test_all/fibonacci/Makefile create mode 100644 src/tbb/examples/test_all/fibonacci/Makefile.windows create mode 100644 src/tbb/examples/test_all/fibonacci/msvs/fibonacci.sln create mode 100644 src/tbb/examples/test_all/fibonacci/msvs/fibonacci.vcxproj create mode 100644 src/tbb/examples/test_all/fibonacci/readme.html create mode 100644 src/tbb/examples/test_all/fibonacci/xcode/fibonacci.xcodeproj/project.pbxproj create mode 100644 src/tbb/examples/test_all/index.html create mode 100644 src/tbb/include/tbb/blocked_rangeNd.h create mode 100644 src/tbb/include/tbb/concurrent_map.h create mode 100644 src/tbb/include/tbb/concurrent_set.h delete mode 100644 src/tbb/include/tbb/gfx_factory.h create mode 100644 src/tbb/include/tbb/info.h create mode 100644 src/tbb/include/tbb/iterators.h rename src/tbb/include/tbb/machine/{gcc_armv7.h => gcc_arm.h} (94%) create mode 100644 src/tbb/index.html create mode 100644 src/tbb/jni/Android.mk create mode 100644 src/tbb/jni/Application.mk create mode 100644 src/tbb/python/Makefile create mode 100644 src/tbb/python/TBB.py create mode 100644 src/tbb/python/index.html create mode 100644 src/tbb/python/rml/Makefile create mode 100644 src/tbb/python/rml/ipc_server.cpp create mode 100644 src/tbb/python/rml/ipc_utils.cpp create mode 100644 src/tbb/python/rml/ipc_utils.h create mode 100644 src/tbb/python/setup.py create mode 100644 src/tbb/python/tbb/__init__.py create mode 100644 src/tbb/python/tbb/__main__.py create mode 100644 src/tbb/python/tbb/api.i create mode 100644 src/tbb/python/tbb/pool.py create mode 100644 src/tbb/python/tbb/test.py create mode 100644 src/tbb/src/perf/harness_perf.h create mode 100644 src/tbb/src/perf/time_async_return.cpp create mode 100644 src/tbb/src/perf/time_resumable_tasks.cpp create mode 100644 src/tbb/src/tbb/co_context.h create mode 100644 src/tbb/src/tbb/lin32-tbbbind-export.def create mode 100644 src/tbb/src/tbb/lin64-tbbbind-export.def create mode 100644 src/tbb/src/tbb/task_stream_extended.h create mode 100644 src/tbb/src/tbb/tbb_bind.cpp create mode 100644 src/tbb/src/tbb/tbb_environment.h create mode 100644 src/tbb/src/tbb/win32-tbbbind-export.def create mode 100644 src/tbb/src/tbb/win64-tbbbind-export.def create mode 100644 src/tbb/src/tbbmalloc/Synchronize.h create mode 100644 src/tbb/src/tbbmalloc/backend.h create mode 100644 src/tbb/src/tbbmalloc/large_objects.h create mode 100644 src/tbb/src/test/harness.h create mode 100644 src/tbb/src/test/harness_allocator.h create mode 100644 src/tbb/src/test/harness_allocator_overload.h create mode 100644 src/tbb/src/test/harness_assert.h create mode 100644 src/tbb/src/test/harness_bad_expr.h create mode 100644 src/tbb/src/test/harness_barrier.h create mode 100644 src/tbb/src/test/harness_checktype.h create mode 100644 src/tbb/src/test/harness_concurrency.h create mode 100644 src/tbb/src/test/harness_concurrency_tracker.h create mode 100644 src/tbb/src/test/harness_cpu.h create mode 100644 src/tbb/src/test/harness_defs.h create mode 100644 src/tbb/src/test/harness_dynamic_libs.h create mode 100644 src/tbb/src/test/harness_eh.h create mode 100644 src/tbb/src/test/harness_fp.h create mode 100644 src/tbb/src/test/harness_graph.h create mode 100644 src/tbb/src/test/harness_inject_scheduler.h create mode 100644 src/tbb/src/test/harness_iterator.h create mode 100644 src/tbb/src/test/harness_m128.h create mode 100644 src/tbb/src/test/harness_memory.h create mode 100644 src/tbb/src/test/harness_mic.h create mode 100644 src/tbb/src/test/harness_preload.h create mode 100644 src/tbb/src/test/harness_report.h create mode 100644 src/tbb/src/test/harness_runtime_loader.h create mode 100644 src/tbb/src/test/harness_state_trackable.h create mode 100644 src/tbb/src/test/harness_task.h create mode 100644 src/tbb/src/test/harness_tbb_independence.h create mode 100644 src/tbb/src/test/harness_test_cases_framework.h create mode 100644 src/tbb/src/test/harness_tls.h create mode 100644 src/tbb/src/test/harness_tsx.h create mode 100644 src/tbb/src/test/test_ScalableAllocator.cpp create mode 100644 src/tbb/src/test/test_ScalableAllocator_STL.cpp create mode 100644 src/tbb/src/test/test_aggregator.cpp create mode 100644 src/tbb/src/test/test_aligned_space.cpp create mode 100644 src/tbb/src/test/test_allocator.h create mode 100644 src/tbb/src/test/test_allocator_STL.h create mode 100644 src/tbb/src/test/test_arena_constraints_hwloc.cpp create mode 100644 src/tbb/src/test/test_arena_constraints_stubs.cpp create mode 100644 src/tbb/src/test/test_assembly.cpp create mode 100644 src/tbb/src/test/test_async_msg.cpp create mode 100644 src/tbb/src/test/test_async_node.cpp create mode 100644 src/tbb/src/test/test_atomic.cpp create mode 100644 src/tbb/src/test/test_blocked_range.cpp create mode 100644 src/tbb/src/test/test_blocked_range2d.cpp create mode 100644 src/tbb/src/test/test_blocked_range3d.cpp create mode 100644 src/tbb/src/test/test_blocked_rangeNd.cpp create mode 100644 src/tbb/src/test/test_broadcast_node.cpp create mode 100644 src/tbb/src/test/test_buffer_node.cpp create mode 100644 src/tbb/src/test/test_cache_aligned_allocator.cpp create mode 100644 src/tbb/src/test/test_cache_aligned_allocator_STL.cpp create mode 100644 src/tbb/src/test/test_cilk_common.h create mode 100644 src/tbb/src/test/test_cilk_dynamic_load.cpp create mode 100644 src/tbb/src/test/test_cilk_interop.cpp create mode 100644 src/tbb/src/test/test_combinable.cpp create mode 100644 src/tbb/src/test/test_composite_node.cpp create mode 100644 src/tbb/src/test/test_concurrent_associative_common.h create mode 100644 src/tbb/src/test/test_concurrent_hash_map.cpp create mode 100644 src/tbb/src/test/test_concurrent_lru_cache.cpp create mode 100644 src/tbb/src/test/test_concurrent_map.cpp create mode 100644 src/tbb/src/test/test_concurrent_monitor.cpp create mode 100644 src/tbb/src/test/test_concurrent_ordered_common.h create mode 100644 src/tbb/src/test/test_concurrent_priority_queue.cpp create mode 100644 src/tbb/src/test/test_concurrent_queue.cpp create mode 100644 src/tbb/src/test/test_concurrent_queue_whitebox.cpp create mode 100644 src/tbb/src/test/test_concurrent_set.cpp create mode 100644 src/tbb/src/test/test_concurrent_unordered_common.h create mode 100644 src/tbb/src/test/test_concurrent_unordered_map.cpp create mode 100644 src/tbb/src/test/test_concurrent_unordered_set.cpp create mode 100644 src/tbb/src/test/test_concurrent_vector.cpp create mode 100644 src/tbb/src/test/test_condition_variable.h create mode 100644 src/tbb/src/test/test_container_move_support.h create mode 100644 src/tbb/src/test/test_continue_node.cpp create mode 100644 src/tbb/src/test/test_critical_section.cpp create mode 100644 src/tbb/src/test/test_dynamic_link.cpp create mode 100644 src/tbb/src/test/test_eh_algorithms.cpp create mode 100644 src/tbb/src/test/test_eh_flow_graph.cpp create mode 100644 src/tbb/src/test/test_eh_tasks.cpp create mode 100644 src/tbb/src/test/test_enumerable_thread_specific.cpp create mode 100644 src/tbb/src/test/test_environment_whitebox.cpp create mode 100644 src/tbb/src/test/test_examples_common_utility.cpp create mode 100644 src/tbb/src/test/test_fast_random.cpp create mode 100644 src/tbb/src/test/test_flow_graph.cpp create mode 100644 src/tbb/src/test/test_flow_graph_priorities.cpp create mode 100644 src/tbb/src/test/test_flow_graph_whitebox.cpp create mode 100644 src/tbb/src/test/test_follows_and_precedes_api.h create mode 100644 src/tbb/src/test/test_fp.cpp create mode 100644 src/tbb/src/test/test_function_node.cpp create mode 100644 src/tbb/src/test/test_global_control.cpp create mode 100644 src/tbb/src/test/test_global_control_whitebox.cpp create mode 100644 src/tbb/src/test/test_halt.cpp create mode 100644 src/tbb/src/test/test_handle_perror.cpp create mode 100644 src/tbb/src/test/test_hw_concurrency.cpp create mode 100644 src/tbb/src/test/test_indexer_node.cpp create mode 100644 src/tbb/src/test/test_initializer_list.h create mode 100644 src/tbb/src/test/test_inits_loop.cpp create mode 100644 src/tbb/src/test/test_intrusive_list.cpp create mode 100644 src/tbb/src/test/test_iterators.cpp create mode 100644 src/tbb/src/test/test_ittnotify.cpp create mode 100644 src/tbb/src/test/test_join_node.cpp create mode 100644 src/tbb/src/test/test_join_node.h create mode 100644 src/tbb/src/test/test_join_node_key_matching.cpp create mode 100644 src/tbb/src/test/test_join_node_msg_key_matching.cpp create mode 100644 src/tbb/src/test/test_lambda.cpp create mode 100644 src/tbb/src/test/test_limiter_node.cpp create mode 100644 src/tbb/src/test/test_malloc_atexit.cpp create mode 100644 src/tbb/src/test/test_malloc_compliance.cpp create mode 100644 src/tbb/src/test/test_malloc_init_shutdown.cpp create mode 100644 src/tbb/src/test/test_malloc_lib_unload.cpp create mode 100644 src/tbb/src/test/test_malloc_new_handler.cpp create mode 100644 src/tbb/src/test/test_malloc_overload.cpp create mode 100644 src/tbb/src/test/test_malloc_overload_disable.cpp create mode 100644 src/tbb/src/test/test_malloc_pools.cpp create mode 100644 src/tbb/src/test/test_malloc_pure_c.c create mode 100644 src/tbb/src/test/test_malloc_regression.cpp create mode 100644 src/tbb/src/test/test_malloc_shutdown_hang.cpp create mode 100644 src/tbb/src/test/test_malloc_used_by_lib.cpp create mode 100644 src/tbb/src/test/test_malloc_whitebox.cpp create mode 100644 src/tbb/src/test/test_model_plugin.cpp create mode 100644 src/tbb/src/test/test_multifunction_node.cpp create mode 100644 src/tbb/src/test/test_mutex.cpp create mode 100644 src/tbb/src/test/test_mutex_native_threads.cpp create mode 100644 src/tbb/src/test/test_opencl_kernel_32.spir create mode 100644 src/tbb/src/test/test_opencl_kernel_64.spir create mode 100644 src/tbb/src/test/test_opencl_node.cl create mode 100644 src/tbb/src/test/test_opencl_node.cpp create mode 100644 src/tbb/src/test/test_opencl_precompiled_kernel_gpu_32.ir create mode 100644 src/tbb/src/test/test_opencl_precompiled_kernel_gpu_64.ir create mode 100644 src/tbb/src/test/test_openmp.cpp create mode 100644 src/tbb/src/test/test_overwrite_node.cpp create mode 100644 src/tbb/src/test/test_parallel_do.cpp create mode 100644 src/tbb/src/test/test_parallel_for.cpp create mode 100644 src/tbb/src/test/test_parallel_for_each.cpp create mode 100644 src/tbb/src/test/test_parallel_for_vectorization.cpp create mode 100644 src/tbb/src/test/test_parallel_invoke.cpp create mode 100644 src/tbb/src/test/test_parallel_pipeline.cpp create mode 100644 src/tbb/src/test/test_parallel_reduce.cpp create mode 100644 src/tbb/src/test/test_parallel_scan.cpp create mode 100644 src/tbb/src/test/test_parallel_sort.cpp create mode 100644 src/tbb/src/test/test_parallel_while.cpp create mode 100644 src/tbb/src/test/test_partitioner.h create mode 100644 src/tbb/src/test/test_partitioner_whitebox.cpp create mode 100644 src/tbb/src/test/test_partitioner_whitebox.h create mode 100644 src/tbb/src/test/test_pipeline.cpp create mode 100644 src/tbb/src/test/test_pipeline_with_tbf.cpp create mode 100644 src/tbb/src/test/test_priority_queue_node.cpp create mode 100644 src/tbb/src/test/test_queue_node.cpp create mode 100644 src/tbb/src/test/test_range_based_for.h create mode 100644 src/tbb/src/test/test_reader_writer_lock.cpp create mode 100644 src/tbb/src/test/test_resumable_tasks.cpp create mode 100644 src/tbb/src/test/test_runtime_loader.cpp create mode 100644 src/tbb/src/test/test_rwm_upgrade_downgrade.cpp create mode 100644 src/tbb/src/test/test_semaphore.cpp create mode 100644 src/tbb/src/test/test_sequencer_node.cpp create mode 100644 src/tbb/src/test/test_source_node.cpp create mode 100644 src/tbb/src/test/test_split_node.cpp create mode 100644 src/tbb/src/test/test_static_assert.cpp create mode 100644 src/tbb/src/test/test_std_thread.cpp create mode 100644 src/tbb/src/test/test_streaming_node.cpp create mode 100644 src/tbb/src/test/test_tagged_msg.cpp create mode 100644 src/tbb/src/test/test_task.cpp create mode 100644 src/tbb/src/test/test_task_arena.cpp create mode 100644 src/tbb/src/test/test_task_assertions.cpp create mode 100644 src/tbb/src/test/test_task_auto_init.cpp create mode 100644 src/tbb/src/test/test_task_enqueue.cpp create mode 100644 src/tbb/src/test/test_task_group.cpp create mode 100644 src/tbb/src/test/test_task_leaks.cpp create mode 100644 src/tbb/src/test/test_task_priority.cpp create mode 100644 src/tbb/src/test/test_task_scheduler_init.cpp create mode 100644 src/tbb/src/test/test_task_scheduler_observer.cpp create mode 100644 src/tbb/src/test/test_task_steal_limit.cpp create mode 100644 src/tbb/src/test/test_tbb_condition_variable.cpp create mode 100644 src/tbb/src/test/test_tbb_fork.cpp create mode 100644 src/tbb/src/test/test_tbb_header.cpp create mode 100644 src/tbb/src/test/test_tbb_thread.cpp create mode 100644 src/tbb/src/test/test_tbb_version.cpp create mode 100644 src/tbb/src/test/test_thread.h create mode 100644 src/tbb/src/test/test_tick_count.cpp create mode 100644 src/tbb/src/test/test_tuple.cpp create mode 100644 src/tbb/src/test/test_write_once_node.cpp create mode 100644 src/tbb/src/test/test_yield.cpp create mode 100644 src/tbb/third-party-programs.txt create mode 100644 tools/update-tbb.R diff --git a/inst/include/index.html b/inst/include/index.html index b0962e01..c8698efb 100644 --- a/inst/include/index.html +++ b/inst/include/index.html @@ -15,7 +15,7 @@

Directories


Up to parent directory

-Copyright © 2005-2017 Intel Corporation. All Rights Reserved. +Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel is a registered trademark or trademark of Intel Corporation or its subsidiaries in the United States and other countries. diff --git a/inst/include/serial/tbb/parallel_for.h b/inst/include/serial/tbb/parallel_for.h index e5959c22..0a58453b 100644 --- a/inst/include/serial/tbb/parallel_for.h +++ b/inst/include/serial/tbb/parallel_for.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2018 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,11 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "../../tbb/internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_parallel_for_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_parallel_for_H +#pragma message("TBB Warning: serial/tbb/parallel_for.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_SERIAL_parallel_for_H #define __TBB_SERIAL_parallel_for_H @@ -98,35 +105,35 @@ void start_for< Range, Body, Partitioner >::execute() { //! Parallel iteration over range with default partitioner. /** @ingroup algorithms **/ template -void parallel_for( const Range& range, const Body& body ) { +__TBB_DEPRECATED_VERBOSE void parallel_for( const Range& range, const Body& body ) { serial::interface9::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); } //! Parallel iteration over range with simple partitioner. /** @ingroup algorithms **/ template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { +__TBB_DEPRECATED_VERBOSE void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { serial::interface9::start_for::run(range,body,partitioner); } //! Parallel iteration over range with auto_partitioner. /** @ingroup algorithms **/ template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { +__TBB_DEPRECATED_VERBOSE void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { serial::interface9::start_for::run(range,body,partitioner); } //! Parallel iteration over range with static_partitioner. /** @ingroup algorithms **/ template -void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner ) { +__TBB_DEPRECATED_VERBOSE void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner ) { serial::interface9::start_for::run(range,body,partitioner); } //! Parallel iteration over range with affinity_partitioner. /** @ingroup algorithms **/ template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { +__TBB_DEPRECATED_VERBOSE void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { serial::interface9::start_for::run(range,body,partitioner); } @@ -154,53 +161,53 @@ void parallel_for_impl(Index first, Index last, Index step, const Function& f, P //! Parallel iteration over a range of integers with explicit step and default partitioner template -void parallel_for(Index first, Index last, Index step, const Function& f) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, Index step, const Function& f) { parallel_for_impl(first, last, step, f, auto_partitioner()); } //! Parallel iteration over a range of integers with explicit step and simple partitioner template -void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& p) { parallel_for_impl(first, last, step, f, p); } //! Parallel iteration over a range of integers with explicit step and auto partitioner template -void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& p) { parallel_for_impl(first, last, step, f, p); } //! Parallel iteration over a range of integers with explicit step and static partitioner template -void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& p) { parallel_for_impl(first, last, step, f, p); } //! Parallel iteration over a range of integers with explicit step and affinity partitioner template -void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& p) { parallel_for_impl(first, last, step, f, p); } //! Parallel iteration over a range of integers with default step and default partitioner template -void parallel_for(Index first, Index last, const Function& f) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, const Function& f) { parallel_for_impl(first, last, static_cast(1), f, auto_partitioner()); } //! Parallel iteration over a range of integers with default step and simple partitioner template -void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& p) { parallel_for_impl(first, last, static_cast(1), f, p); } //! Parallel iteration over a range of integers with default step and auto partitioner template - void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& p) { parallel_for_impl(first, last, static_cast(1), f, p); } //! Parallel iteration over a range of integers with default step and static partitioner template -void parallel_for(Index first, Index last, const Function& f, const static_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, const Function& f, const static_partitioner& p) { parallel_for_impl(first, last, static_cast(1), f, p); } //! Parallel iteration over a range of integers with default step and affinity_partitioner template -void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& p) { +__TBB_DEPRECATED_VERBOSE void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& p) { parallel_for_impl(first, last, static_cast(1), f, p); } diff --git a/inst/include/serial/tbb/tbb_annotate.h b/inst/include/serial/tbb/tbb_annotate.h index c16defea..3e67e4c4 100644 --- a/inst/include/serial/tbb/tbb_annotate.h +++ b/inst/include/serial/tbb/tbb_annotate.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2018 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_annotate_H diff --git a/inst/include/tbb/aggregator.h b/inst/include/tbb/aggregator.h index 6aecbb74..786c52c8 100644 --- a/inst/include/tbb/aggregator.h +++ b/inst/include/tbb/aggregator.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__aggregator_H #define __TBB__aggregator_H +#define __TBB_aggregator_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #if !TBB_PREVIEW_AGGREGATOR #error Set TBB_PREVIEW_AGGREGATOR before including aggregator.h #endif @@ -199,4 +198,7 @@ using interface6::aggregator_operation; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_aggregator_H_include_area + #endif // __TBB__aggregator_H diff --git a/inst/include/tbb/aligned_space.h b/inst/include/tbb/aligned_space.h index 56fd85f3..ad8a3faf 100644 --- a/inst/include/tbb/aligned_space.h +++ b/inst/include/tbb/aligned_space.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_aligned_space_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_aligned_space_H +#pragma message("TBB Warning: tbb/aligned_space.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_aligned_space_H #define __TBB_aligned_space_H +#define __TBB_aligned_space_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb_stddef.h" #include "tbb_machine.h" @@ -30,18 +40,21 @@ namespace tbb { /** The elements are not constructed or destroyed by this class. @ingroup memory_allocation */ template -class aligned_space { +class __TBB_DEPRECATED_VERBOSE_MSG("tbb::aligned_space is deprecated, use std::aligned_storage") aligned_space { private: typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type; element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)]; public: //! Pointer to beginning of array - T* begin() {return internal::punned_cast(this);} + T* begin() const {return internal::punned_cast(this);} //! Pointer to one past last element in array. - T* end() {return begin()+N;} + T* end() const {return begin()+N;} }; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_aligned_space_H_include_area + #endif /* __TBB_aligned_space_H */ diff --git a/inst/include/tbb/atomic.h b/inst/include/tbb/atomic.h index 72ec534e..1557eb95 100644 --- a/inst/include/tbb/atomic.h +++ b/inst/include/tbb/atomic.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_atomic_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_atomic_H +#pragma message("TBB Warning: tbb/atomic.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_atomic_H #define __TBB_atomic_H +#define __TBB_atomic_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include #if _MSC_VER @@ -404,10 +414,12 @@ struct atomic_impl_with_arithmetic: atomic_impl { /** See the Reference for details. @ingroup synchronization */ template -struct atomic: internal::atomic_impl { +struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") +atomic: internal::atomic_impl { #if __TBB_ATOMIC_CTORS atomic() = default; constexpr atomic(T arg): internal::atomic_impl(arg) {} + constexpr atomic(const atomic& rhs): internal::atomic_impl(rhs) {} #endif T operator=( T rhs ) { // "this" required here in strict ISO C++ because store_with_release is a dependent name @@ -418,16 +430,20 @@ struct atomic: internal::atomic_impl { #if __TBB_ATOMIC_CTORS #define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ + template<> struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ atomic() = default; \ constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ + constexpr atomic(const atomic& rhs): \ + internal::atomic_impl_with_arithmetic(rhs) {} \ \ T operator=( T rhs ) {return store_with_release(rhs);} \ atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ }; #else #define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ + template<> struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ T operator=( T rhs ) {return store_with_release(rhs);} \ atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ }; @@ -451,15 +467,20 @@ __TBB_DECL_ATOMIC(unsigned long) type synonyms on the platform. Type U should be the wider variant of T from the perspective of /Wp64. */ #define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ + template<> struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ atomic() = default ; \ constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ + constexpr atomic(const atomic& rhs): \ + internal::atomic_impl_with_arithmetic(rhs) {} \ + \ T operator=( U rhs ) {return store_with_release(T(rhs));} \ atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ }; #else #define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ + template<> struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ T operator=( U rhs ) {return store_with_release(T(rhs));} \ atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ }; @@ -482,10 +503,12 @@ __TBB_DECL_ATOMIC(wchar_t) #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ //! Specialization for atomic with arithmetic and operator->. -template struct atomic: internal::atomic_impl_with_arithmetic { +template struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") +atomic: internal::atomic_impl_with_arithmetic { #if __TBB_ATOMIC_CTORS atomic() = default ; constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic(arg) {} + constexpr atomic(const atomic& rhs): internal::atomic_impl_with_arithmetic(rhs) {} #endif T* operator=( T* rhs ) { // "this" required here in strict ISO C++ because store_with_release is a dependent name @@ -500,10 +523,12 @@ template struct atomic: internal::atomic_impl_with_arithmetic, for sake of not allowing arithmetic or operator->. -template<> struct atomic: internal::atomic_impl { +template<> struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") +atomic: internal::atomic_impl { #if __TBB_ATOMIC_CTORS atomic() = default ; constexpr atomic(void* arg): internal::atomic_impl(arg) {} + constexpr atomic(const atomic& rhs): internal::atomic_impl(rhs) {} #endif void* operator=( void* rhs ) { // "this" required here in strict ISO C++ because store_with_release is a dependent name @@ -555,4 +580,7 @@ inline atomic& as_atomic( T& t ) { #pragma warning (pop) #endif // warnings are restored +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_atomic_H_include_area + #endif /* __TBB_atomic_H */ diff --git a/inst/include/tbb/blocked_range.h b/inst/include/tbb/blocked_range.h index 9f24cd2b..b77e7e0a 100644 --- a/inst/include/tbb/blocked_range.h +++ b/inst/include/tbb/blocked_range.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_blocked_range_H @@ -25,6 +21,15 @@ namespace tbb { +namespace internal { + +// blocked_rangeNd_impl forward declaration in tbb::internal namespace to +// name it as a friend for a tbb::blocked_range. +template +class blocked_rangeNd_impl; + +} // namespace internal + /** \page range_req Requirements on range concept Class \c R implementing the concept of range must define: - \code R::R( const R& ); \endcode Copy constructor @@ -47,9 +52,11 @@ class blocked_range { //! Type for size of a range typedef std::size_t size_type; - //! Construct range with default-constructed values for begin and end. +#if __TBB_DEPRECATED_BLOCKED_RANGE_DEFAULT_CTOR + //! Construct range with default-constructed values for begin, end, and grainsize. /** Requires that Value have a default constructor. */ - blocked_range() : my_end(), my_begin() {} + blocked_range() : my_end(), my_begin(), my_grainsize() {} +#endif //! Construct range over half-open interval [begin,end), with the given grainsize. blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) : @@ -115,13 +122,12 @@ class blocked_range { #endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ private: - /** NOTE: my_end MUST be declared before my_begin, otherwise the forking constructor will break. */ + /** NOTE: my_end MUST be declared before my_begin, otherwise the splitting constructor will break. */ Value my_end; Value my_begin; size_type my_grainsize; - //! Auxiliary function used by forking constructor. - /** Using this function lets us not require that Value support assignment or default construction. */ + //! Auxiliary function used by the splitting constructor. static Value do_split( blocked_range& r, split ) { __TBB_ASSERT( r.is_divisible(), "cannot split blocked_range that is not divisible" ); @@ -152,6 +158,9 @@ class blocked_range { template friend class blocked_range3d; + + template + friend class internal::blocked_rangeNd_impl; }; } // namespace tbb diff --git a/inst/include/tbb/blocked_range2d.h b/inst/include/tbb/blocked_range2d.h index f1b9f35d..2498e046 100644 --- a/inst/include/tbb/blocked_range2d.h +++ b/inst/include/tbb/blocked_range2d.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_blocked_range2d_H @@ -45,19 +41,17 @@ class blocked_range2d { ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : my_rows(row_begin,row_end,row_grainsize), my_cols(col_begin,col_end,col_grainsize) - { - } + {} blocked_range2d( RowValue row_begin, RowValue row_end, ColValue col_begin, ColValue col_end ) : my_rows(row_begin,row_end), my_cols(col_begin,col_end) - { - } + {} //! True if range is empty bool empty() const { - // Yes, it is a logical OR here, not AND. + // Range is empty if at least one dimension is empty. return my_rows.empty() || my_cols.empty(); } @@ -86,6 +80,14 @@ class blocked_range2d { } #endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ + //! The rows of the iteration space + const row_range_type& rows() const {return my_rows;} + + //! The columns of the iteration space + const col_range_type& cols() const {return my_cols;} + +private: + template void do_split( blocked_range2d& r, Split& split_obj ) { @@ -95,12 +97,6 @@ class blocked_range2d { my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); } } - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} }; } // namespace tbb diff --git a/inst/include/tbb/blocked_range3d.h b/inst/include/tbb/blocked_range3d.h index c62565ee..15f93130 100644 --- a/inst/include/tbb/blocked_range3d.h +++ b/inst/include/tbb/blocked_range3d.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_blocked_range3d_H @@ -49,8 +45,7 @@ class blocked_range3d { my_pages(page_begin,page_end), my_rows(row_begin,row_end), my_cols(col_begin,col_end) - { - } + {} blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, @@ -58,12 +53,11 @@ class blocked_range3d { my_pages(page_begin,page_end,page_grainsize), my_rows(row_begin,row_end,row_grainsize), my_cols(col_begin,col_end,col_grainsize) - { - } + {} //! True if range is empty bool empty() const { - // Yes, it is a logical OR here, not AND. + // Range is empty if at least one dimension is empty. return my_pages.empty() || my_rows.empty() || my_cols.empty(); } @@ -94,6 +88,17 @@ class blocked_range3d { } #endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ + //! The pages of the iteration space + const page_range_type& pages() const {return my_pages;} + + //! The rows of the iteration space + const row_range_type& rows() const {return my_rows;} + + //! The columns of the iteration space + const col_range_type& cols() const {return my_cols;} + +private: + template void do_split( blocked_range3d& r, Split& split_obj) { @@ -103,7 +108,7 @@ class blocked_range3d { } else { my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); } - } else { + } else { if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) { my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); } else { @@ -111,16 +116,6 @@ class blocked_range3d { } } } - - //! The pages of the iteration space - const page_range_type& pages() const {return my_pages;} - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} - }; } // namespace tbb diff --git a/inst/include/tbb/blocked_rangeNd.h b/inst/include/tbb/blocked_rangeNd.h new file mode 100644 index 00000000..922c77c6 --- /dev/null +++ b/inst/include/tbb/blocked_rangeNd.h @@ -0,0 +1,150 @@ +/* + Copyright (c) 2017-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_blocked_rangeNd_H +#define __TBB_blocked_rangeNd_H + +#if ! TBB_PREVIEW_BLOCKED_RANGE_ND + #error Set TBB_PREVIEW_BLOCKED_RANGE_ND to include blocked_rangeNd.h +#endif + +#include "tbb_config.h" + +// tbb::blocked_rangeNd requires C++11 support +#if __TBB_CPP11_PRESENT && __TBB_CPP11_ARRAY_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT + +#include "internal/_template_helpers.h" // index_sequence, make_index_sequence + +#include +#include // std::any_of +#include // std::is_same, std::enable_if + +#include "tbb/blocked_range.h" + +namespace tbb { +namespace internal { + +/* + The blocked_rangeNd_impl uses make_index_sequence to automatically generate a ctor with + exactly N arguments of the type tbb::blocked_range. Such ctor provides an opportunity + to use braced-init-list parameters to initialize each dimension. + Use of parameters, whose representation is a braced-init-list, but they're not + std::initializer_list or a reference to one, produces a non-deduced context + within template argument deduction. + + NOTE: blocked_rangeNd must be exactly a templated alias to the blocked_rangeNd_impl + (and not e.g. a derived class), otherwise it would need to declare its own ctor + facing the same problem that the impl class solves. +*/ + +template> +class blocked_rangeNd_impl; + +template +class blocked_rangeNd_impl> { +public: + //! Type of a value. + using value_type = Value; + +private: + + //! Helper type to construct range with N tbb::blocked_range objects. + template + using dim_type_helper = tbb::blocked_range; + +public: + blocked_rangeNd_impl() = delete; + + //! Constructs N-dimensional range over N half-open intervals each represented as tbb::blocked_range. + blocked_rangeNd_impl(const dim_type_helper&... args) : my_dims{ {args...} } {} + + //! Dimensionality of a range. + static constexpr unsigned int ndims() { return N; } + + //! Range in certain dimension. + const tbb::blocked_range& dim(unsigned int dimension) const { + __TBB_ASSERT(dimension < N, "out of bound"); + return my_dims[dimension]; + } + + //------------------------------------------------------------------------ + // Methods that implement Range concept + //------------------------------------------------------------------------ + + //! True if at least one dimension is empty. + bool empty() const { + return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) { + return d.empty(); + }); + } + + //! True if at least one dimension is divisible. + bool is_divisible() const { + return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) { + return d.is_divisible(); + }); + } + +#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES + //! Static field to support proportional split. + static const bool is_splittable_in_proportion = true; + + blocked_rangeNd_impl(blocked_rangeNd_impl& r, proportional_split proportion) : my_dims(r.my_dims) { + do_split(r, proportion); + } +#endif + + blocked_rangeNd_impl(blocked_rangeNd_impl& r, split proportion) : my_dims(r.my_dims) { + do_split(r, proportion); + } + +private: + __TBB_STATIC_ASSERT(N != 0, "zero dimensional blocked_rangeNd can't be constructed"); + + //! Ranges in each dimension. + std::array, N> my_dims; + + template + void do_split(blocked_rangeNd_impl& r, split_type proportion) { + __TBB_STATIC_ASSERT((is_same_type::value + || is_same_type::value), + "type of split object is incorrect"); + __TBB_ASSERT(r.is_divisible(), "can't split not divisible range"); + + auto my_it = std::max_element(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& first, const tbb::blocked_range& second) { + return (first.size() * second.grainsize() < second.size() * first.grainsize()); + }); + + auto r_it = r.my_dims.begin() + (my_it - my_dims.begin()); + + my_it->my_begin = tbb::blocked_range::do_split(*r_it, proportion); + + // (!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin)) equals to + // (my_it->my_begin == r_it->my_end), but we can't use operator== due to Value concept + __TBB_ASSERT(!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin), + "blocked_range has been split incorrectly"); + } +}; + +} // namespace internal + +template +using blocked_rangeNd = internal::blocked_rangeNd_impl; + +} // namespace tbb + +#endif /* __TBB_CPP11_PRESENT && __TBB_CPP11_ARRAY_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT */ +#endif /* __TBB_blocked_rangeNd_H */ diff --git a/inst/include/tbb/cache_aligned_allocator.h b/inst/include/tbb/cache_aligned_allocator.h index d435e785..5b4897c4 100644 --- a/inst/include/tbb/cache_aligned_allocator.h +++ b/inst/include/tbb/cache_aligned_allocator.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_cache_aligned_allocator_H @@ -24,7 +20,11 @@ #include #include "tbb_stddef.h" #if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward +#include // std::forward +#endif + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +#include #endif namespace tbb { @@ -69,7 +69,6 @@ class cache_aligned_allocator { template struct rebind { typedef cache_aligned_allocator other; }; - cache_aligned_allocator() throw() {} cache_aligned_allocator( const cache_aligned_allocator& ) throw() {} template cache_aligned_allocator(const cache_aligned_allocator&) throw() {} @@ -132,6 +131,79 @@ inline bool operator==( const cache_aligned_allocator&, const cache_aligned_a template inline bool operator!=( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return false;} +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +//! C++17 memory resource wrapper to ensure cache line size alignment +class cache_aligned_resource : public std::pmr::memory_resource { +public: + cache_aligned_resource() : cache_aligned_resource(std::pmr::get_default_resource()) {} + explicit cache_aligned_resource(std::pmr::memory_resource* upstream) : m_upstream(upstream) {} + + std::pmr::memory_resource* upstream_resource() const { + return m_upstream; + } + +private: + //! We don't know what memory resource set. Use padding to guarantee alignment + void* do_allocate(size_t bytes, size_t alignment) override { + size_t cache_line_alignment = correct_alignment(alignment); + uintptr_t base = (uintptr_t)m_upstream->allocate(correct_size(bytes) + cache_line_alignment); + __TBB_ASSERT(base != 0, "Upstream resource returned NULL."); +#if _MSC_VER && !defined(__INTEL_COMPILER) + // unary minus operator applied to unsigned type, result still unsigned + #pragma warning(push) + #pragma warning(disable: 4146 4706) +#endif + // Round up to the next cache line (align the base address) + uintptr_t result = (base + cache_line_alignment) & -cache_line_alignment; +#if _MSC_VER && !defined(__INTEL_COMPILER) + #pragma warning(pop) +#endif + // Record where block actually starts. + ((uintptr_t*)result)[-1] = base; + return (void*)result; + } + + void do_deallocate(void* ptr, size_t bytes, size_t alignment) override { + if (ptr) { + // Recover where block actually starts + uintptr_t base = ((uintptr_t*)ptr)[-1]; + m_upstream->deallocate((void*)base, correct_size(bytes) + correct_alignment(alignment)); + } + } + + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override { + if (this == &other) { return true; } +#if __TBB_USE_OPTIONAL_RTTI + const cache_aligned_resource* other_res = dynamic_cast(&other); + return other_res && (this->upstream_resource() == other_res->upstream_resource()); +#else + return false; +#endif + } + + size_t correct_alignment(size_t alignment) { + __TBB_ASSERT(tbb::internal::is_power_of_two(alignment), "Alignment is not a power of 2"); +#if __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT + size_t cache_line_size = std::hardware_destructive_interference_size; +#else + size_t cache_line_size = internal::NFS_GetLineSize(); +#endif + return alignment < cache_line_size ? cache_line_size : alignment; + } + + size_t correct_size(size_t bytes) { + // To handle the case, when small size requested. There could be not + // enough space to store the original pointer. + return bytes < sizeof(uintptr_t) ? sizeof(uintptr_t) : bytes; + } + + std::pmr::memory_resource* m_upstream; +}; + +#endif /* __TBB_CPP17_MEMORY_RESOURCE_PRESENT */ + } // namespace tbb #endif /* __TBB_cache_aligned_allocator_H */ + diff --git a/inst/include/tbb/combinable.h b/inst/include/tbb/combinable.h index 0063dbb4..aa8d24b1 100644 --- a/inst/include/tbb/combinable.h +++ b/inst/include/tbb/combinable.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_combinable_H #define __TBB_combinable_H +#define __TBB_combinable_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "enumerable_thread_specific.h" #include "cache_aligned_allocator.h" @@ -82,4 +81,8 @@ namespace tbb { }; } // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_combinable_H_include_area + #endif /* __TBB_combinable_H */ diff --git a/inst/include/tbb/compat/condition_variable b/inst/include/tbb/compat/condition_variable index 43edfc03..e9545b8b 100644 --- a/inst/include/tbb/compat/condition_variable +++ b/inst/include/tbb/compat/condition_variable @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,21 +12,31 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "../internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_condition_variable_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_condition_variable_H +#pragma message("TBB Warning: tbb/compat/condition_variable is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_condition_variable_H #define __TBB_condition_variable_H +#define __TBB_condition_variable_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + #if _WIN32||_WIN64 #include "../machine/windows_api.h" -namespace tbb { +namespace tbb { namespace interface5 { -namespace internal { +namespace internal { struct condition_variable_using_event { //! Event for blocking waiting threads. @@ -69,17 +79,17 @@ namespace interface5 { // C++0x standard working draft 30.4.3 // Lock tag types -struct defer_lock_t { }; //! do not acquire ownership of the mutex -struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking -struct adopt_lock_t { }; //! assume the calling thread has already -const defer_lock_t defer_lock = {}; -const try_to_lock_t try_to_lock = {}; -const adopt_lock_t adopt_lock = {}; +struct __TBB_DEPRECATED_VERBOSE defer_lock_t { }; //! do not acquire ownership of the mutex +struct __TBB_DEPRECATED_VERBOSE try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking +struct __TBB_DEPRECATED_VERBOSE adopt_lock_t { }; //! assume the calling thread has already +__TBB_DEPRECATED_VERBOSE const defer_lock_t defer_lock = {}; +__TBB_DEPRECATED_VERBOSE const try_to_lock_t try_to_lock = {}; +__TBB_DEPRECATED_VERBOSE const adopt_lock_t adopt_lock = {}; // C++0x standard working draft 30.4.3.1 -//! lock_guard +//! lock_guard template -class lock_guard : tbb::internal::no_copy { +class __TBB_DEPRECATED_VERBOSE lock_guard : tbb::internal::no_copy { public: //! mutex type typedef M mutex_type; @@ -88,7 +98,7 @@ public: /** precondition: If mutex_type is not a recursive mutex, the calling thread does not own the mutex m. */ explicit lock_guard(mutex_type& m) : pm(m) {m.lock();} - + //! Adopt_lock constructor /** precondition: the calling thread owns the mutex m. */ lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {} @@ -100,9 +110,9 @@ private: }; // C++0x standard working draft 30.4.3.2 -//! unique_lock +//! unique_lock template -class unique_lock : tbb::internal::no_copy { +class __TBB_DEPRECATED_VERBOSE unique_lock : tbb::internal::no_copy { friend class condition_variable; public: typedef M mutex_type; @@ -136,7 +146,7 @@ public: unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {} //! Timed unique_lock acquisition. - /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that + /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that it uses tbb::tick_count::interval_t to specify the time duration. */ unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );} @@ -169,14 +179,14 @@ public: if( !owns ) { pm->lock(); owns = true; - } else + } else throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else + } else throw_exception_v4( tbb::internal::eid_operation_not_permitted ); __TBB_ASSERT( owns, NULL ); } - //! Try to lock the mutex. + //! Try to lock the mutex. /** If successful, note that this lock owns it. Otherwise, set it false. */ bool try_lock() { if( pm ) { @@ -184,17 +194,17 @@ public: owns = pm->try_lock(); else throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else + } else throw_exception_v4( tbb::internal::eid_operation_not_permitted ); return owns; } - - //! Try to lock the mutex. + + //! Try to lock the mutex. bool try_lock_for( const tick_count::interval_t &i ); //! Unlock the mutex /** And note that this lock no longer owns it. */ - void unlock() { + void unlock() { if( owns ) { pm->unlock(); owns = false; @@ -212,10 +222,10 @@ public: //! Release control over the mutex. mutex_type* release() { - mutex_type* o_pm = pm; - pm = NULL; - owns = false; - return o_pm; + mutex_type* o_pm = pm; + pm = NULL; + owns = false; + return o_pm; } // 30.4.3.2.4 observers @@ -235,12 +245,12 @@ private: }; template -bool unique_lock::try_lock_for( const tick_count::interval_t &i) -{ +__TBB_DEPRECATED_VERBOSE bool unique_lock::try_lock_for( const tick_count::interval_t &i) +{ const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */ // the smallest wait-time is 0.1 milliseconds. bool res = pm->try_lock(); - int duration_in_micro; + int duration_in_micro; if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) { tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3 do { @@ -252,7 +262,7 @@ bool unique_lock::try_lock_for( const tick_count::interval_t &i) return (owns=res); } -//! Swap the two unique locks that have the mutexes of same type +//! Swap the two unique locks that have the mutexes of same type template void swap(unique_lock& x, unique_lock& y) { x.swap( y ); } @@ -277,24 +287,24 @@ typedef pthread_cond_t condvar_impl_t; //! cv_status /** C++0x standard working draft 30.5 */ -enum cv_status { no_timeout, timeout }; +enum cv_status { no_timeout, timeout }; //! condition variable -/** C++0x standard working draft 30.5.1 +/** C++0x standard working draft 30.5.1 @ingroup synchronization */ -class condition_variable : tbb::internal::no_copy { +class __TBB_DEPRECATED_VERBOSE condition_variable : tbb::internal::no_copy { public: //! Constructor - condition_variable() { + condition_variable() { #if _WIN32||_WIN64 - internal_initialize_condition_variable( my_cv ); + internal_initialize_condition_variable( my_cv ); #else pthread_cond_init( &my_cv, NULL ); #endif } //! Destructor - ~condition_variable() { + ~condition_variable() { //precondition: There shall be no thread blocked on *this. #if _WIN32||_WIN64 internal_destroy_condition_variable( my_cv ); @@ -304,18 +314,18 @@ public: } //! Notify one thread and wake it up - void notify_one() { + void notify_one() { #if _WIN32||_WIN64 - internal_condition_variable_notify_one( my_cv ); + internal_condition_variable_notify_one( my_cv ); #else pthread_cond_signal( &my_cv ); #endif } - //! Notify all threads - void notify_all() { + //! Notify all threads + void notify_all() { #if _WIN32||_WIN64 - internal_condition_variable_notify_all( my_cv ); + internal_condition_variable_notify_all( my_cv ); #else pthread_cond_broadcast( &my_cv ); #endif @@ -449,7 +459,7 @@ inline cv_status condition_variable::wait_for( unique_lock& lock, const t __TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable) -} // namespace tbb +} // namespace tbb #if TBB_IMPLEMENT_CPP0X @@ -469,8 +479,11 @@ using tbb::interface5::cv_status; using tbb::interface5::timeout; using tbb::interface5::no_timeout; -} // namespace std +} // namespace std #endif /* TBB_IMPLEMENT_CPP0X */ +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_condition_variable_H_include_area + #endif /* __TBB_condition_variable_H */ diff --git a/inst/include/tbb/compat/ppl.h b/inst/include/tbb/compat/ppl.h index 840dfb22..f441b038 100644 --- a/inst/include/tbb/compat/ppl.h +++ b/inst/include/tbb/compat/ppl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "../internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_ppl_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_ppl_H +#pragma message("TBB Warning: tbb/compat/ppl.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_compat_ppl_H #define __TBB_compat_ppl_H +#define __TBB_ppl_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + #include "../task_group.h" #include "../parallel_invoke.h" #include "../parallel_for_each.h" @@ -59,4 +69,7 @@ namespace Concurrency { } // namespace Concurrency +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_ppl_H_include_area + #endif /* __TBB_compat_ppl_H */ diff --git a/inst/include/tbb/compat/thread b/inst/include/tbb/compat/thread index 0edd9289..9e5e09d9 100644 --- a/inst/include/tbb/compat/thread +++ b/inst/include/tbb/compat/thread @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "../internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_thread_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_thread_H +#pragma message("TBB Warning: tbb/compat/thread is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_thread_H #define __TBB_thread_H +#define __TBB_thread_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + #include "../tbb_config.h" #if TBB_IMPLEMENT_CPP0X @@ -35,7 +45,7 @@ namespace this_thread { using tbb::this_tbb_thread::get_id; using tbb::this_tbb_thread::yield; - inline void sleep_for(const tbb::tick_count::interval_t& rel_time) { + __TBB_DEPRECATED_VERBOSE inline void sleep_for(const tbb::tick_count::interval_t& rel_time) { tbb::internal::thread_sleep_v3( rel_time ); } } @@ -50,6 +60,9 @@ namespace this_thread { #endif /* TBB_IMPLEMENT_CPP0X */ +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_thread_H_include_area + #else /* __TBB_thread_H */ #if __TBB_COMPAT_THREAD_RECURSION_PROTECTOR diff --git a/inst/include/tbb/compat/tuple b/inst/include/tbb/compat/tuple index 5767c49e..59fb7ac0 100644 --- a/inst/include/tbb/compat/tuple +++ b/inst/include/tbb/compat/tuple @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "../internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_tuple_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_tuple_H +#pragma message("TBB Warning: tbb/compat/tuple is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_tuple_H #define __TBB_tuple_H +#define __TBB_tuple_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + #include #include "../tbb_stddef.h" @@ -214,7 +224,7 @@ struct cons{ typedef __HT head_type; typedef __TT tail_type; - head_type head; + head_type head; tail_type tail; static const int length = 1 + tail_type::length; @@ -276,12 +286,12 @@ struct cons{ template -struct cons<__HT,null_type> { +struct cons<__HT,null_type> { typedef __HT head_type; typedef null_type tail_type; - head_type head; + head_type head; static const int length = 1; @@ -350,7 +360,7 @@ inline const __T wrap_dcons(__T*) { return __T(); } // tuple definition template -class tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U { +class __TBB_DEPRECATED_VERBOSE tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U { // friends template friend class tuple_size; template friend struct tuple_element; @@ -366,7 +376,7 @@ class tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T typedef typename internal::tuple_traits<__T0,__T1,__T2,__T3, __T4 __TBB_T_PACK >::U my_cons; public: - tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL) + __TBB_DEPRECATED_VERBOSE tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL) ,const __T1& t1=internal::wrap_dcons((__T1*)NULL) ,const __T2& t2=internal::wrap_dcons((__T2*)NULL) ,const __T3& t3=internal::wrap_dcons((__T3*)NULL) @@ -425,7 +435,7 @@ public: // empty tuple template<> -class tuple : public null_type { +class __TBB_DEPRECATED_VERBOSE tuple : public null_type { }; // helper classes @@ -437,7 +447,7 @@ public: }; template <> -class tuple_size > { +class tuple_size > { public: static const size_t value = 0; }; @@ -484,5 +494,8 @@ namespace tbb { #undef __TBB_CONST_REF_T_PARAM_PACK #undef __TBB_T_PARAM_LIST_PACK #undef __TBB_CONST_NULL_REF_PACK - + +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_tuple_H_include_area + #endif /* __TBB_tuple_H */ diff --git a/inst/include/tbb/concurrent_hash_map.h b/inst/include/tbb/concurrent_hash_map.h index 8497c838..80bad97b 100644 --- a/inst/include/tbb/concurrent_hash_map.h +++ b/inst/include/tbb/concurrent_hash_map.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,39 +12,29 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_concurrent_hash_map_H #define __TBB_concurrent_hash_map_H -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif +#define __TBB_concurrent_hash_map_H_include_area +#include "internal/_warning_suppress_enable_notice.h" +#include "tbb_stddef.h" #include #include // Need std::pair #include // Need std::memset #include __TBB_STD_SWAP_HEADER -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "cache_aligned_allocator.h" #include "tbb_allocator.h" #include "spin_rw_mutex.h" #include "atomic.h" #include "tbb_exception.h" #include "tbb_profiling.h" +#include "aligned_space.h" #include "internal/_tbb_hash_compare_impl.h" +#include "internal/_template_helpers.h" +#include "internal/_allocator_traits.h" #if __TBB_INITIALIZER_LISTS_PRESENT #include #endif @@ -54,12 +44,17 @@ #if __TBB_STATISTICS #include #endif +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT +// Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT +// for most of platforms, tuple present macro was added for logical correctness +#include +#endif namespace tbb { namespace interface5 { - template, typename A = tbb_allocator > > + template, typename A = tbb_allocator > > class concurrent_hash_map; //! @cond INTERNAL @@ -130,9 +125,10 @@ namespace interface5 { #endif //! Constructor hash_map_base() { - std::memset( (void*) this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512 - + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8 - + embedded_buckets*sizeof(bucket) ); // n*8 or n*16 + std::memset(my_table, 0, sizeof(my_table)); + my_mask = 0; + my_size = 0; + std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment)); for( size_type i = 0; i < embedded_block; i++ ) // fill the table my_table[i] = my_embedded_segment + segment_base(i); my_mask = embedded_buckets - 1; @@ -166,7 +162,7 @@ namespace interface5 { //! Initialize buckets static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) { - if( is_initial ) std::memset((void*) ptr, 0, sz*sizeof(bucket) ); + if( is_initial ) std::memset( static_cast(ptr), 0, sz*sizeof(bucket) ); else for(size_type i = 0; i < sz; i++, ptr++) { *reinterpret_cast(&ptr->mutex) = 0; ptr->node_list = rehash_req; @@ -190,22 +186,25 @@ namespace interface5 { }; //! Enable segment - void enable_segment( segment_index_t k, bool is_initial = false ) { + template + void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) { + typedef typename tbb::internal::allocator_rebind::type bucket_allocator_type; + typedef tbb::internal::allocator_traits bucket_allocator_traits; + bucket_allocator_type bucket_allocator(allocator); __TBB_ASSERT( k, "Zero segment must be embedded" ); enable_segment_failsafe watchdog( my_table, k ); - cache_aligned_allocator alloc; size_type sz; __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment"); if( k >= first_block ) { sz = segment_size( k ); - segment_ptr_t ptr = alloc.allocate( sz ); + segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz); init_buckets( ptr, sz, is_initial ); itt_hide_store_word( my_table[k], ptr ); sz <<= 1;// double it to get entire capacity of the container } else { // the first block __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); sz = segment_size( first_block ); - segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets ); + segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets); init_buckets( ptr, sz - embedded_buckets, is_initial ); ptr -= segment_base(embedded_block); for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets @@ -215,6 +214,22 @@ namespace interface5 { watchdog.my_segment_ptr = 0; } + template + void delete_segment(segment_index_t s, const Allocator& allocator) { + typedef typename tbb::internal::allocator_rebind::type bucket_allocator_type; + typedef tbb::internal::allocator_traits bucket_allocator_traits; + bucket_allocator_type bucket_allocator(allocator); + segment_ptr_t buckets_ptr = my_table[s]; + size_type sz = segment_size( s ? s : 1 ); + + if( s >= first_block) // the first segment or the next + bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz); + else if( s == embedded_block && embedded_block != first_block ) + bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, + segment_size(first_block) - embedded_buckets); + if( s >= embedded_block ) my_table[s] = 0; + } + //! Get bucket by (masked) hashcode bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere? segment_index_t s = segment_index_of( h ); @@ -283,11 +298,12 @@ namespace interface5 { } //! Prepare enough segments for number of buckets - void reserve(size_type buckets) { + template + void reserve(size_type buckets, const Allocator& allocator) { if( !buckets-- ) return; bool is_initial = !my_size; for( size_type m = my_mask; buckets > m; m = my_mask ) - enable_segment( segment_index_of( m+1 ), is_initial ); + enable_segment( segment_index_of( m+1 ), allocator, is_initial ); } //! Swap hash_map_bases void internal_swap(hash_map_base &table) { @@ -299,6 +315,25 @@ namespace interface5 { for(size_type i = embedded_block; i < pointers_per_table; i++) swap(this->my_table[i], table.my_table[i]); } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + void internal_move(hash_map_base&& other) { + my_mask = other.my_mask; + other.my_mask = embedded_buckets - 1; + my_size = other.my_size; + other.my_size = 0; + + for(size_type i = 0; i < embedded_buckets; ++i) { + my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list; + other.my_embedded_segment[i].node_list = NULL; + } + + for(size_type i = embedded_block; i < pointers_per_table; ++i) { + my_table[i] = other.my_table[i]; + other.my_table[i] = NULL; + } + } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT }; template @@ -376,9 +411,17 @@ namespace interface5 { my_bucket(other.my_bucket), my_node(other.my_node) {} + + hash_map_iterator& operator=( const hash_map_iterator &other ) { + my_map = other.my_map; + my_index = other.my_index; + my_bucket = other.my_bucket; + my_node = other.my_node; + return *this; + } Value& operator*() const { __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); - return my_node->item; + return my_node->value(); } Value* operator->() const {return &operator*();} hash_map_iterator& operator++(); @@ -564,62 +607,80 @@ class concurrent_hash_map : protected internal::hash_map_base { protected: friend class const_accessor; - struct node; - typedef typename Allocator::template rebind::other node_allocator_type; + class node; + typedef typename tbb::internal::allocator_rebind::type node_allocator_type; + typedef tbb::internal::allocator_traits node_allocator_traits; node_allocator_type my_allocator; HashCompare my_hash_compare; - struct node : public node_base { - value_type item; - node( const Key &key ) : item(key, T()) {} - node( const Key &key, const T &t ) : item(key, t) {} -#if __TBB_CPP11_RVALUE_REF_PRESENT - node( const Key &key, T &&t ) : item(key, std::move(t)) {} - node( value_type&& i ) : item(std::move(i)){} -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - node( Args&&... args ) : item(std::forward(args)...) {} -#if __TBB_COPY_FROM_NON_CONST_REF_BROKEN - node( value_type& i ) : item(const_cast(i)) {} -#endif //__TBB_COPY_FROM_NON_CONST_REF_BROKEN -#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif //__TBB_CPP11_RVALUE_REF_PRESENT - node( const value_type& i ) : item(i) {} - - // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17 - void *operator new( size_t /*size*/, node_allocator_type &a ) { - void *ptr = a.allocate(1); - if(!ptr) - tbb::internal::throw_exception(tbb::internal::eid_bad_alloc); - return ptr; - } - // match placement-new form above to be called if exception thrown in constructor - void operator delete( void *ptr, node_allocator_type &a ) { a.deallocate(static_cast(ptr),1); } + class node : public node_base { + tbb::aligned_space my_value; + public: + value_type* storage() { return my_value.begin(); } + value_type& value() { return *storage(); } }; void delete_node( node_base *n ) { - my_allocator.destroy( static_cast(n) ); - my_allocator.deallocate( static_cast(n), 1); + node_allocator_traits::destroy(my_allocator, static_cast(n)->storage()); + node_allocator_traits::destroy(my_allocator, static_cast(n)); + node_allocator_traits::deallocate(my_allocator, static_cast(n), 1); + } + + struct node_scoped_guard : tbb::internal::no_copy { + node* my_node; + node_allocator_type& my_alloc; + + node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {} + ~node_scoped_guard() { + if(my_node) { + node_allocator_traits::destroy(my_alloc, my_node); + node_allocator_traits::deallocate(my_alloc, my_node, 1); + } + } + void dismiss() { my_node = NULL; } + }; + +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + static node* create_node(node_allocator_type& allocator, Args&&... args) +#else + template + static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2) +#endif + { + node* node_ptr = node_allocator_traits::allocate(allocator, 1); + node_scoped_guard guard(node_ptr, allocator); + node_allocator_traits::construct(allocator, node_ptr); +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward(args)...); +#else + node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward(arg1), tbb::internal::forward(arg2)); +#endif + guard.dismiss(); + return node_ptr; } static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){ - return new( allocator ) node(key, *t); + return create_node(allocator, key, *t); } #if __TBB_CPP11_RVALUE_REF_PRESENT static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){ - return new( allocator ) node(key, std::move(*const_cast(t))); + return create_node(allocator, key, std::move(*const_cast(t))); } -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT - template - static node* allocate_node_emplace_construct(node_allocator_type& allocator, Args&&... args){ - return new( allocator ) node(std::forward(args)...); - } -#endif //#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT #endif static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){ - return new( allocator ) node(key); +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT + // Emplace construct an empty T object inside the pair + return create_node(allocator, std::piecewise_construct, + std::forward_as_tuple(key), std::forward_as_tuple()); +#else + // Use of a temporary object is impossible, because create_node takes a non-const reference. + // copy-initialization is possible because T is already required to be CopyConstructible. + T obj = T(); + return create_node(allocator, key, tbb::internal::move(obj)); +#endif } static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){ @@ -629,7 +690,7 @@ class concurrent_hash_map : protected internal::hash_map_base { node *search_bucket( const key_type &key, bucket *b ) const { node *n = static_cast( b->node_list ); - while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) ) + while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) ) n = static_cast( n->next ); __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket"); return n; @@ -674,7 +735,7 @@ class concurrent_hash_map : protected internal::hash_map_base { __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL ); restart: for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(n)->item.first ); + hashcode_t c = my_hash_compare.hash( static_cast(n)->value().first ); #if TBB_USE_ASSERT hashcode_t bmask = h & (mask>>1); bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket @@ -726,7 +787,7 @@ class concurrent_hash_map : protected internal::hash_map_base { //! Return reference to associated value in hash table. const_reference operator*() const { __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); - return my_node->item; + return my_node->value(); } //! Return pointer to associated value in hash table. @@ -756,7 +817,7 @@ class concurrent_hash_map : protected internal::hash_map_base { //! Return reference to associated value in hash table. reference operator*() const { __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); - return this->my_node->item; + return this->my_node->value(); } //! Return pointer to associated value in hash table. @@ -770,18 +831,39 @@ class concurrent_hash_map : protected internal::hash_map_base { : internal::hash_map_base(), my_allocator(a) {} + explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) + {} + //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() ) - : my_allocator(a) + : internal::hash_map_base(), my_allocator(a) + { + reserve( n, my_allocator ); + } + + concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) { - reserve( n ); + reserve( n, my_allocator ); } //! Copy constructor - concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a = allocator_type() ) + concurrent_hash_map( const concurrent_hash_map &table ) + : internal::hash_map_base(), + my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator())) + { + call_clear_on_leave scope_guard(this); + internal_copy(table); + scope_guard.dismiss(); + } + + concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a) : internal::hash_map_base(), my_allocator(a) { + call_clear_on_leave scope_guard(this); internal_copy(table); + scope_guard.dismiss(); } #if __TBB_CPP11_RVALUE_REF_PRESENT @@ -789,7 +871,7 @@ class concurrent_hash_map : protected internal::hash_map_base { concurrent_hash_map( concurrent_hash_map &&table ) : internal::hash_map_base(), my_allocator(std::move(table.get_allocator())) { - swap(table); + internal_move(std::move(table)); } //! Move constructor @@ -797,10 +879,10 @@ class concurrent_hash_map : protected internal::hash_map_base { : internal::hash_map_base(), my_allocator(a) { if (a == table.get_allocator()){ - this->swap(table); + internal_move(std::move(table)); }else{ call_clear_on_leave scope_guard(this); - internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end())); + internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size()); scope_guard.dismiss(); } } @@ -809,19 +891,38 @@ class concurrent_hash_map : protected internal::hash_map_base { //! Construction with copying iteration range and given allocator instance template concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() ) - : my_allocator(a) + : internal::hash_map_base(), my_allocator(a) + { + call_clear_on_leave scope_guard(this); + internal_copy(first, last, std::distance(first, last)); + scope_guard.dismiss(); + } + + template + concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) { - reserve( std::distance(first, last) ); // TODO: load_factor? - internal_copy(first, last); + call_clear_on_leave scope_guard(this); + internal_copy(first, last, std::distance(first, last)); + scope_guard.dismiss(); } #if __TBB_INITIALIZER_LISTS_PRESENT //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. concurrent_hash_map( std::initializer_list il, const allocator_type &a = allocator_type() ) - : my_allocator(a) + : internal::hash_map_base(), my_allocator(a) { - reserve(il.size()); - internal_copy(il.begin(), il.end()); + call_clear_on_leave scope_guard(this); + internal_copy(il.begin(), il.end(), il.size()); + scope_guard.dismiss(); + } + + concurrent_hash_map( std::initializer_list il, const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) + { + call_clear_on_leave scope_guard(this); + internal_copy(il.begin(), il.end(), il.size()); + scope_guard.dismiss(); } #endif //__TBB_INITIALIZER_LISTS_PRESENT @@ -829,7 +930,9 @@ class concurrent_hash_map : protected internal::hash_map_base { //! Assignment concurrent_hash_map& operator=( const concurrent_hash_map &table ) { if( this!=&table ) { + typedef typename node_allocator_traits::propagate_on_container_copy_assignment pocca_type; clear(); + tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type()); internal_copy(table); } return *this; @@ -838,17 +941,9 @@ class concurrent_hash_map : protected internal::hash_map_base { #if __TBB_CPP11_RVALUE_REF_PRESENT //! Move Assignment concurrent_hash_map& operator=( concurrent_hash_map &&table ) { - if(this != &table){ - typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; - if(pocma_t::value || this->my_allocator == table.my_allocator) { - concurrent_hash_map trash (std::move(*this)); - //TODO: swapping allocators here may be a problem, replace with single direction moving iff pocma is set - this->swap(table); - } else { - //do per element move - concurrent_hash_map moved_copy(std::move(table), this->my_allocator); - this->swap(moved_copy); - } + if(this != &table) { + typedef typename node_allocator_traits::propagate_on_container_move_assignment pocma_type; + internal_move_assign(std::move(table), pocma_type()); } return *this; } @@ -858,8 +953,7 @@ class concurrent_hash_map : protected internal::hash_map_base { //! Assignment concurrent_hash_map& operator=( std::initializer_list il ) { clear(); - reserve(il.size()); - internal_copy(il.begin(), il.end()); + internal_copy(il.begin(), il.end(), il.size()); return *this; } #endif //__TBB_INITIALIZER_LISTS_PRESENT @@ -1067,8 +1161,8 @@ class concurrent_hash_map : protected internal::hash_map_base { template bool generic_emplace( Accessor && result, Args &&... args ) { result.release(); - node * node_ptr = allocate_node_emplace_construct(my_allocator, std::forward(args)...); - return lookup(/*insert*/true, node_ptr->item.first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr ); + node * node_ptr = create_node(my_allocator, std::forward(args)...); + return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr ); } #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT #endif //__TBB_CPP11_RVALUE_REF_PRESENT @@ -1084,7 +1178,24 @@ class concurrent_hash_map : protected internal::hash_map_base { void internal_copy( const concurrent_hash_map& source ); template - void internal_copy( I first, I last ); + void internal_copy( I first, I last, size_type reserve_size ); + +#if __TBB_CPP11_RVALUE_REF_PRESENT + // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type + void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_true_type) { + tbb::internal::allocator_move_assignment(my_allocator, other.my_allocator, tbb::internal::traits_true_type()); + internal_move(std::move(other)); + } + + void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_false_type) { + if (this->my_allocator == other.my_allocator) { + internal_move(std::move(other)); + } else { + //do per element move + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size()); + } + } +#endif //! Fast find when no concurrent erasure is used. For internal use inside TBB only! /** Return pointer to item with given key, or NULL if no such item exists. @@ -1109,13 +1220,40 @@ class concurrent_hash_map : protected internal::hash_map_base { } n = search_bucket( key, b ); if( n ) - return &n->item; + return n->storage(); else if( check_mask_race( h, m ) ) goto restart; return 0; } }; +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +namespace internal { +using namespace tbb::internal; + +template typename Map, typename Key, typename T, typename... Args> +using hash_map_t = Map< + Key, T, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, tbb_hash_compare >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, tbb_allocator > > +>; +} + +// Deduction guide for the constructor from two iterators and hash_compare/ allocator +template +concurrent_hash_map(I, I, Args...) +-> internal::hash_map_t,internal::iterator_mapped_t, Args...>; + +// Deduction guide for the constructor from an initializer_list and hash_compare/ allocator +// Deduction guide for an initializer_list, hash_compare and allocator is implicit +template +concurrent_hash_map(std::initializer_list>, CompareOrAllocator) +-> internal::hash_map_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + template bool concurrent_hash_map::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) { __TBB_ASSERT( !result || !result->my_node, NULL ); @@ -1188,7 +1326,7 @@ bool concurrent_hash_map::lookup( bool op_insert, const Key #if __TBB_STATISTICS my_info_resizes++; // concurrent ones #endif - enable_segment( grow_segment ); + enable_segment( grow_segment, my_allocator ); } if( tmp_n ) // if op_insert only delete_node( tmp_n ); @@ -1256,7 +1394,7 @@ bool concurrent_hash_map::erase( const Key &key ) { search: node_base **p = &b()->node_list; n = *p; - while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->item.first ) ) { + while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->value().first ) ) { p = &n->next; n = *p; } @@ -1283,16 +1421,18 @@ bool concurrent_hash_map::erase( const Key &key ) { template void concurrent_hash_map::swap(concurrent_hash_map &table) { - //TODO: respect C++11 allocator_traits::propogate_on_constainer_swap - using std::swap; - swap(this->my_allocator, table.my_allocator); - swap(this->my_hash_compare, table.my_hash_compare); - internal_swap(table); + typedef typename node_allocator_traits::propagate_on_container_swap pocs_type; + if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) { + using std::swap; + tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type()); + swap(this->my_hash_compare, table.my_hash_compare); + internal_swap(table); + } } template void concurrent_hash_map::rehash(size_type sz) { - reserve( sz ); // TODO: add reduction of number of buckets as well + reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well hashcode_t mask = my_mask; hashcode_t b = (mask+1)>>1; // size or first index of the last segment __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2 @@ -1311,7 +1451,7 @@ void concurrent_hash_map::rehash(size_type sz) { // now h - is index of the root rehashed bucket b_old mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(q)->item.first ); + hashcode_t c = my_hash_compare.hash( static_cast(q)->value().first ); if( (c & mask) != h ) { // should be rehashed *p = q->next; // exclude from b_old bucket *b_new = get_bucket( c & mask ); @@ -1338,7 +1478,7 @@ void concurrent_hash_map::rehash(size_type sz) { #endif #if TBB_USE_ASSERT for( ; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ) & mask; + hashcode_t h = my_hash_compare.hash( static_cast(n)->value().first ) & mask; __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" ); } #endif @@ -1385,7 +1525,7 @@ void concurrent_hash_map::clear() { #endif #if __TBB_EXTRA_DEBUG for(; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ); + hashcode_t h = my_hash_compare.hash( static_cast(n)->value().first ); h &= m; __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" ); } @@ -1415,11 +1555,10 @@ void concurrent_hash_map::clear() { reported = true; } #endif -#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS +#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS my_size = 0; segment_index_t s = segment_index_of( m ); __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" ); - cache_aligned_allocator alloc; do { __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" ); segment_ptr_t buckets_ptr = my_table[s]; @@ -1429,20 +1568,16 @@ void concurrent_hash_map::clear() { buckets_ptr[i].node_list = n->next; delete_node( n ); } - if( s >= first_block) // the first segment or the next - alloc.deallocate( buckets_ptr, sz ); - else if( s == embedded_block && embedded_block != first_block ) - alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets ); - if( s >= embedded_block ) my_table[s] = 0; + delete_segment(s, my_allocator); } while(s-- > 0); my_mask = embedded_buckets - 1; } template void concurrent_hash_map::internal_copy( const concurrent_hash_map& source ) { - reserve( source.my_size ); // TODO: load_factor? hashcode_t mask = source.my_mask; if( my_mask == mask ) { // optimized version + reserve( source.my_size, my_allocator ); // TODO: load_factor? bucket *dst = 0, *src = 0; bool rehash_required = false; for( hashcode_t k = 0; k <= mask; k++ ) { @@ -1454,24 +1589,26 @@ void concurrent_hash_map::internal_copy( const concurrent_h rehash_required = true; dst->node_list = internal::rehash_req; } else for(; n; n = static_cast( n->next ) ) { - add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) ); + node* node_ptr = create_node(my_allocator, n->value().first, n->value().second); + add_to_bucket( dst, node_ptr); ++my_size; // TODO: replace by non-atomic op } } if( rehash_required ) rehash(); - } else internal_copy( source.begin(), source.end() ); + } else internal_copy( source.begin(), source.end(), source.my_size ); } template template -void concurrent_hash_map::internal_copy(I first, I last) { +void concurrent_hash_map::internal_copy(I first, I last, size_type reserve_size) { + reserve( reserve_size, my_allocator ); // TODO: load_factor? hashcode_t m = my_mask; for(; first != last; ++first) { hashcode_t h = my_hash_compare.hash( (*first).first ); bucket *b = get_bucket( h & m ); __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = new( my_allocator ) node(*first); - add_to_bucket( b, n ); + node* node_ptr = create_node(my_allocator, (*first).first, (*first).second); + add_to_bucket( b, node_ptr ); ++my_size; // TODO: replace by non-atomic op } } @@ -1507,4 +1644,7 @@ inline void swap(concurrent_hash_map &a, concurrent_hash } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_hash_map_H_include_area + #endif /* __TBB_concurrent_hash_map_H */ diff --git a/inst/include/tbb/concurrent_lru_cache.h b/inst/include/tbb/concurrent_lru_cache.h index 8aacf241..a18dbf29 100644 --- a/inst/include/tbb/concurrent_lru_cache.h +++ b/inst/include/tbb/concurrent_lru_cache.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,23 +12,27 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_concurrent_lru_cache_H #define __TBB_concurrent_lru_cache_H +#define __TBB_concurrent_lru_cache_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h #endif +#include "tbb_stddef.h" + #include #include +#include // std::find +#if __TBB_CPP11_RVALUE_REF_PRESENT +#include // std::move +#endif -#include "tbb_stddef.h" #include "atomic.h" #include "internal/_aggregator_impl.h" @@ -99,37 +103,84 @@ class concurrent_lru_cache : internal::no_assign{ } private: +#if !__TBB_CPP11_RVALUE_REF_PRESENT struct handle_move_t:no_assign{ concurrent_lru_cache & my_cache_ref; typename map_storage_type::reference my_map_record_ref; handle_move_t(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {}; }; +#endif class handle_object { concurrent_lru_cache * my_cache_pointer; - typename map_storage_type::reference my_map_record_ref; + typename map_storage_type::pointer my_map_record_ptr; public: - handle_object(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_pointer(&cache_ref), my_map_record_ref(value_ref) {} - handle_object(handle_move_t m):my_cache_pointer(&m.my_cache_ref), my_map_record_ref(m.my_map_record_ref){} - operator handle_move_t(){ return move(*this);} + handle_object() : my_cache_pointer(), my_map_record_ptr() {} + handle_object(concurrent_lru_cache& cache_ref, typename map_storage_type::reference value_ref) : my_cache_pointer(&cache_ref), my_map_record_ptr(&value_ref) {} + operator bool() const { + return (my_cache_pointer && my_map_record_ptr); + } +#if __TBB_CPP11_RVALUE_REF_PRESENT + // TODO: add check for double moved objects by special dedicated field + handle_object(handle_object&& src) : my_cache_pointer(src.my_cache_pointer), my_map_record_ptr(src.my_map_record_ptr) { + __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?"); + src.my_cache_pointer = NULL; + src.my_map_record_ptr = NULL; + } + handle_object& operator=(handle_object&& src) { + __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?"); + if (my_cache_pointer) { + my_cache_pointer->signal_end_of_usage(*my_map_record_ptr); + } + my_cache_pointer = src.my_cache_pointer; + my_map_record_ptr = src.my_map_record_ptr; + src.my_cache_pointer = NULL; + src.my_map_record_ptr = NULL; + return *this; + } +#else + handle_object(handle_move_t m) : my_cache_pointer(&m.my_cache_ref), my_map_record_ptr(&m.my_map_record_ref) {} + handle_object& operator=(handle_move_t m) { + if (my_cache_pointer) { + my_cache_pointer->signal_end_of_usage(*my_map_record_ptr); + } + my_cache_pointer = &m.my_cache_ref; + my_map_record_ptr = &m.my_map_record_ref; + return *this; + } + operator handle_move_t(){ + return move(*this); + } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT value_type& value(){ - __TBB_ASSERT(my_cache_pointer,"get value from moved from object?"); - return my_map_record_ref.second.my_value; + __TBB_ASSERT(my_cache_pointer,"get value from already moved object?"); + __TBB_ASSERT(my_map_record_ptr,"get value from an invalid or already moved object?"); + return my_map_record_ptr->second.my_value; } ~handle_object(){ if (my_cache_pointer){ - my_cache_pointer->signal_end_of_usage(my_map_record_ref); + my_cache_pointer->signal_end_of_usage(*my_map_record_ptr); } } private: +#if __TBB_CPP11_RVALUE_REF_PRESENT + // For source compatibility with C++03 + friend handle_object&& move(handle_object& h){ + return std::move(h); + } +#else friend handle_move_t move(handle_object& h){ return handle_object::move(h); } + // TODO: add check for double moved objects by special dedicated field static handle_move_t move(handle_object& h){ - __TBB_ASSERT(h.my_cache_pointer,"move from the same object twice ?"); + __TBB_ASSERT((h.my_cache_pointer && h.my_map_record_ptr) || (!h.my_cache_pointer && !h.my_map_record_ptr), "invalid state of moving object?"); concurrent_lru_cache * cache_pointer = h.my_cache_pointer; + typename map_storage_type::pointer map_record_ptr = h.my_map_record_ptr; h.my_cache_pointer = NULL; - return handle_move_t(*cache_pointer,h.my_map_record_ref); + h.my_map_record_ptr = NULL; + return handle_move_t(*cache_pointer, *map_record_ptr); } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT private: void operator=(handle_object&); #if __SUNPRO_CC @@ -232,4 +283,8 @@ class concurrent_lru_cache : internal::no_assign{ using interface6::concurrent_lru_cache; } // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_lru_cache_H_include_area + #endif //__TBB_concurrent_lru_cache_H diff --git a/inst/include/tbb/concurrent_map.h b/inst/include/tbb/concurrent_map.h new file mode 100644 index 00000000..32fbe684 --- /dev/null +++ b/inst/include/tbb/concurrent_map.h @@ -0,0 +1,389 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_map_H +#define __TBB_concurrent_map_H + +#define __TBB_concurrent_map_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if !TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS +#error Set TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS to include concurrent_map.h +#endif + +#include "tbb_config.h" + +// concurrent_map requires C++11 support +#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + +#include "internal/_concurrent_skip_list_impl.h" + +namespace tbb { + +namespace interface10 { + +template +class map_traits { +public: + static constexpr size_t MAX_LEVEL = MAX_LEVELS; + using random_level_generator_type = RandomGenerator; + using key_type = Key; + using mapped_type = Value; + using compare_type = KeyCompare; + using value_type = std::pair; + using reference = value_type&; + using const_reference = const value_type&; + using allocator_type = Allocator; + using mutex_type = tbb::spin_mutex; + using node_type = tbb::internal::node_handle, allocator_type>; + + static const bool allow_multimapping = AllowMultimapping; + + class value_compare { + public: + // TODO: these member types are deprecated in C++17, do we need to let them + using result_type = bool; + using first_argument_type = value_type; + using second_argument_type = value_type; + + bool operator()(const value_type& lhs, const value_type& rhs) const { + return comp(lhs.first, rhs.first); + } + + protected: + value_compare(compare_type c) : comp(c) {} + + friend class map_traits; + + compare_type comp; + }; + + static value_compare value_comp(compare_type comp) { return value_compare(comp); } + + static const key_type& get_key(const_reference val) { + return val.first; + } +}; // class map_traits + +template +class concurrent_multimap; + +template , typename Allocator = tbb_allocator>> +class concurrent_map + : public internal::concurrent_skip_list, 64, Allocator, false>> { + using traits_type = map_traits, 64, Allocator, false>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::end; + using base_type::find; + using base_type::emplace; + using base_type::insert; + + concurrent_map() = default; + + explicit concurrent_map(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_map(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_map(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(first, last, comp, alloc) {} + + template< class InputIt > + concurrent_map(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {} + + /** Copy constructor */ + concurrent_map(const concurrent_map&) = default; + + concurrent_map(const concurrent_map& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_map(concurrent_map&&) = default; + + concurrent_map(concurrent_map&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_map(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_map(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_map& operator=(const concurrent_map& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_map& operator=(concurrent_map&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + mapped_type& at(const key_type& key) { + iterator it = find(key); + + if (it == end()) { + tbb::internal::throw_exception(tbb::internal::eid_invalid_key); + } + + return it->second; + } + + const mapped_type& at(const key_type& key) const { + const_iterator it = find(key); + + if (it == end()) { + tbb::internal::throw_exception(tbb::internal::eid_invalid_key); + } + + return it->second; + } + + mapped_type& operator[](const key_type& key) { + iterator it = find(key); + + if (it == end()) { + it = emplace(std::piecewise_construct, std::forward_as_tuple(key), std::tuple<>()).first; + } + + return it->second; + } + + mapped_type& operator[](key_type&& key) { + iterator it = find(key); + + if (it == end()) { + it = emplace(std::piecewise_construct, std::forward_as_tuple(std::move(key)), std::tuple<>()).first; + } + + return it->second; + } + + template::value>::type> + std::pair insert(P&& value) { + return emplace(std::forward

(value)); + } + + template::value>::type> + iterator insert(const_iterator hint, P&& value) { + return emplace_hint(hint, std::forward

(value)); + return end(); + } + + template + void merge(concurrent_map& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_map&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multimap& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multimap&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_map + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { + +using namespace tbb::internal; + +template typename Map, typename Key, typename T, typename... Args> +using c_map_t = Map 0) && !is_allocator_v >, + pack_element_t<0, Args...>, std::less >, + std::conditional_t< (sizeof...(Args) > 0) && is_allocator_v >, + pack_element_t, tbb_allocator > > >; +} // namespace internal + +template +concurrent_map(It, It, Args...) +-> internal::c_map_t, internal::iterator_mapped_t, Args...>; + +template +concurrent_map(std::initializer_list>, Args...) +-> internal::c_map_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template , typename Allocator = tbb_allocator>> +class concurrent_multimap + : public internal::concurrent_skip_list, 64, Allocator, true>> { + using traits_type = map_traits, 64, Allocator, true>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::end; + using base_type::find; + using base_type::emplace; + using base_type::insert; + + concurrent_multimap() = default; + + explicit concurrent_multimap(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_multimap(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_multimap(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(first, last, comp, alloc) {} + + template< class InputIt > + concurrent_multimap(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {} + + /** Copy constructor */ + concurrent_multimap(const concurrent_multimap&) = default; + + concurrent_multimap(const concurrent_multimap& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_multimap(concurrent_multimap&&) = default; + + concurrent_multimap(concurrent_multimap&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_multimap(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_multimap(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_multimap& operator=(const concurrent_multimap& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_multimap& operator=(concurrent_multimap&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + template::value>::type> + std::pair insert(P&& value) { + return emplace(std::forward

(value)); + } + + template::value>::type> + iterator insert(const_iterator hint, P&& value) { + return emplace_hint(hint, std::forward

(value)); + return end(); + } + + template + void merge(concurrent_multimap& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multimap&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_map& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_map&& source) { + this->internal_merge(std::move(source)); + } + +}; // class concurrent_multimap + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +concurrent_multimap(It, It, Args...) +-> internal::c_map_t, internal::iterator_mapped_t, Args...>; + +template +concurrent_multimap(std::initializer_list>, Args...) +-> internal::c_map_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +} // namespace interface10 + +using interface10::concurrent_map; +using interface10::concurrent_multimap; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_map_H_include_area + +#endif // __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT +#endif // __TBB_concurrent_map_H diff --git a/inst/include/tbb/concurrent_priority_queue.h b/inst/include/tbb/concurrent_priority_queue.h index 89297d85..9c70098b 100644 --- a/inst/include/tbb/concurrent_priority_queue.h +++ b/inst/include/tbb/concurrent_priority_queue.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,21 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_concurrent_priority_queue_H #define __TBB_concurrent_priority_queue_H +#define __TBB_concurrent_priority_queue_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "atomic.h" #include "cache_aligned_allocator.h" #include "tbb_exception.h" #include "tbb_stddef.h" #include "tbb_profiling.h" #include "internal/_aggregator_impl.h" +#include "internal/_template_helpers.h" +#include "internal/_allocator_traits.h" #include #include #include @@ -85,14 +86,28 @@ class concurrent_priority_queue { typedef A allocator_type; //! Constructs a new concurrent_priority_queue with default capacity - explicit concurrent_priority_queue(const allocator_type& a = allocator_type()) : mark(0), my_size(0), data(a) + explicit concurrent_priority_queue(const allocator_type& a = allocator_type()) : mark(0), my_size(0), compare(), data(a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + } + + //! Constructs a new concurrent_priority_queue with default capacity + explicit concurrent_priority_queue(const Compare& c, const allocator_type& a = allocator_type()) : mark(0), my_size(0), compare(c), data(a) { my_aggregator.initialize_handler(my_functor_t(this)); } //! Constructs a new concurrent_priority_queue with init_sz capacity explicit concurrent_priority_queue(size_type init_capacity, const allocator_type& a = allocator_type()) : - mark(0), my_size(0), data(a) + mark(0), my_size(0), compare(), data(a) + { + data.reserve(init_capacity); + my_aggregator.initialize_handler(my_functor_t(this)); + } + + //! Constructs a new concurrent_priority_queue with init_sz capacity + explicit concurrent_priority_queue(size_type init_capacity, const Compare& c, const allocator_type& a = allocator_type()) : + mark(0), my_size(0), compare(c), data(a) { data.reserve(init_capacity); my_aggregator.initialize_handler(my_functor_t(this)); @@ -101,7 +116,17 @@ class concurrent_priority_queue { //! [begin,end) constructor template concurrent_priority_queue(InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - mark(0), data(begin, end, a) + mark(0), compare(), data(begin, end, a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + my_size = data.size(); + } + + //! [begin,end) constructor + template + concurrent_priority_queue(InputIterator begin, InputIterator end, const Compare& c, const allocator_type& a = allocator_type()) : + mark(0), compare(c), data(begin, end, a) { my_aggregator.initialize_handler(my_functor_t(this)); heapify(); @@ -111,7 +136,16 @@ class concurrent_priority_queue { #if __TBB_INITIALIZER_LISTS_PRESENT //! Constructor from std::initializer_list concurrent_priority_queue(std::initializer_list init_list, const allocator_type &a = allocator_type()) : - mark(0),data(init_list.begin(), init_list.end(), a) + mark(0), compare(), data(init_list.begin(), init_list.end(), a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + my_size = data.size(); + } + + //! Constructor from std::initializer_list + concurrent_priority_queue(std::initializer_list init_list, const Compare& c, const allocator_type &a = allocator_type()) : + mark(0), compare(c), data(init_list.begin(), init_list.end(), a) { my_aggregator.initialize_handler(my_functor_t(this)); heapify(); @@ -121,7 +155,7 @@ class concurrent_priority_queue { //! Copy constructor /** This operation is unsafe if there are pending concurrent operations on the src queue. */ - explicit concurrent_priority_queue(const concurrent_priority_queue& src) : mark(src.mark), + concurrent_priority_queue(const concurrent_priority_queue& src) : mark(src.mark), my_size(src.my_size), data(src.data.begin(), src.data.end(), src.data.get_allocator()) { my_aggregator.initialize_handler(my_functor_t(this)); @@ -481,10 +515,38 @@ class concurrent_priority_queue { } }; +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +namespace internal { + +template +using priority_queue_t = concurrent_priority_queue< + T, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, std::less >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, cache_aligned_allocator > +>; +} + +// Deduction guide for the constructor from two iterators +template::value_type, + typename... Args +> concurrent_priority_queue(InputIterator, InputIterator, Args...) +-> internal::priority_queue_t; + +template +concurrent_priority_queue(std::initializer_list init_list, CompareOrAllocalor) +-> internal::priority_queue_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ } // namespace interface5 using interface5::concurrent_priority_queue; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_priority_queue_H_include_area + #endif /* __TBB_concurrent_priority_queue_H */ diff --git a/inst/include/tbb/concurrent_queue.h b/inst/include/tbb/concurrent_queue.h index 81db58a3..122f98e5 100644 --- a/inst/include/tbb/concurrent_queue.h +++ b/inst/include/tbb/concurrent_queue.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_concurrent_queue_H #define __TBB_concurrent_queue_H +#define __TBB_concurrent_queue_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "internal/_concurrent_queue_impl.h" +#include "internal/_allocator_traits.h" namespace tbb { @@ -36,7 +36,7 @@ class concurrent_queue: public internal::concurrent_queue_base_v3 { template friend class internal::concurrent_queue_iterator; //! Allocator type - typedef typename A::template rebind::other page_allocator_type; + typedef typename tbb::internal::allocator_rebind::type page_allocator_type; page_allocator_type my_allocator; //! Allocates a block of size n (bytes) @@ -177,6 +177,15 @@ class concurrent_queue: public internal::concurrent_queue_base_v3 { const_iterator unsafe_end() const {return const_iterator();} } ; +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// Deduction guide for the constructor from two iterators +template::value_type, + typename A = cache_aligned_allocator +> concurrent_queue(InputIterator, InputIterator, const A& = A()) +-> concurrent_queue; +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + template concurrent_queue::~concurrent_queue() { clear(); @@ -200,9 +209,9 @@ void concurrent_queue::clear() { template > class concurrent_bounded_queue: public internal::concurrent_queue_base_v8 { template friend class internal::concurrent_queue_iterator; + typedef typename tbb::internal::allocator_rebind::type page_allocator_type; //! Allocator type - typedef typename A::template rebind::other page_allocator_type; page_allocator_type my_allocator; typedef typename concurrent_queue_base_v3::padded_page padded_page; @@ -439,6 +448,15 @@ class concurrent_bounded_queue: public internal::concurrent_queue_base_v8 { }; +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// guide for concurrent_bounded_queue(InputIterator, InputIterator, ...) +template::value_type, + typename A = cache_aligned_allocator +> concurrent_bounded_queue(InputIterator, InputIterator, const A& = A()) +-> concurrent_bounded_queue; +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + template concurrent_bounded_queue::~concurrent_bounded_queue() { clear(); @@ -455,4 +473,7 @@ using strict_ppl::concurrent_queue; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_queue_H_include_area + #endif /* __TBB_concurrent_queue_H */ diff --git a/inst/include/tbb/concurrent_set.h b/inst/include/tbb/concurrent_set.h new file mode 100644 index 00000000..ecb21624 --- /dev/null +++ b/inst/include/tbb/concurrent_set.h @@ -0,0 +1,304 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_set_H +#define __TBB_concurrent_set_H + +#define __TBB_concurrent_set_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if !TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS +#error Set TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS to include concurrent_set.h +#endif + +#include "tbb/tbb_config.h" + +// concurrent_set requires C++11 support +#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + +#include "internal/_concurrent_skip_list_impl.h" + +namespace tbb { +namespace interface10 { + +// TODO: test this class +template +class set_traits { +public: + static constexpr size_t MAX_LEVEL = MAX_LEVELS; + using random_level_generator_type = RandomGenerator; + using key_type = Key; + using value_type = key_type; + using compare_type = KeyCompare; + using value_compare = compare_type; + using reference = value_type & ; + using const_reference = const value_type&; + using allocator_type = Allocator; + using mutex_type = tbb::spin_mutex; + using node_type = tbb::internal::node_handle, allocator_type>; + + static const bool allow_multimapping = AllowMultimapping; + + static const key_type& get_key(const_reference val) { + return val; + } + + static value_compare value_comp(compare_type comp) { return comp; } +}; + +template +class concurrent_multiset; + +template , typename Allocator = tbb_allocator> +class concurrent_set + : public internal::concurrent_skip_list, 64, Allocator, false>> { + using traits_type = set_traits, 64, Allocator, false>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::insert; + + concurrent_set() = default; + + explicit concurrent_set(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_set(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_set(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(first, last, comp, alloc) {} + + template< class InputIt > + concurrent_set(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {} + + /** Copy constructor */ + concurrent_set(const concurrent_set&) = default; + + concurrent_set(const concurrent_set& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_set(concurrent_set&&) = default; + + concurrent_set(concurrent_set&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_set(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_set(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_set& operator=(const concurrent_set& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_set& operator=(concurrent_set&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + template + void merge(concurrent_set& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_set&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multiset& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multiset&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_set + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { + +using namespace tbb::internal; + +template typename Set, typename Key, typename... Args> +using c_set_t = Set 0) && !is_allocator_v >, + pack_element_t<0, Args...>, std::less >, + std::conditional_t< (sizeof...(Args) > 0) && is_allocator_v >, + pack_element_t, tbb_allocator > >; +} // namespace internal + +template +concurrent_set(It, It, Args...) +-> internal::c_set_t, Args...>; + +template +concurrent_set(std::initializer_list, Args...) +-> internal::c_set_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template , typename Allocator = tbb_allocator> +class concurrent_multiset + : public internal::concurrent_skip_list, 64, Allocator, true>> { + using traits_type = set_traits, 64, Allocator, true>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::insert; + + concurrent_multiset() = default; + + explicit concurrent_multiset(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_multiset(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_multiset(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(first, last); + } + + template< class InputIt > + concurrent_multiset(InputIt first, InputIt last, const allocator_type& alloc) : base_type(key_compare(), alloc) { + insert(first, last); + } + + /** Copy constructor */ + concurrent_multiset(const concurrent_multiset&) = default; + + concurrent_multiset(const concurrent_multiset& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_multiset(concurrent_multiset&&) = default; + + concurrent_multiset(concurrent_multiset&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_multiset(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_multiset(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_multiset& operator=(const concurrent_multiset& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_multiset& operator=(concurrent_multiset&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + template + void merge(concurrent_set& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_set&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multiset& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multiset&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_multiset + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + + +template +concurrent_multiset(It, It, Args...) +-> internal::c_set_t, Args...>; + +template +concurrent_multiset(std::initializer_list, Args...) +-> internal::c_set_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +} // namespace interface10 + +using interface10::concurrent_set; +using interface10::concurrent_multiset; + +} // namespace tbb + +#endif // __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_set_H_include_area + +#endif // __TBB_concurrent_set_H diff --git a/inst/include/tbb/concurrent_unordered_map.h b/inst/include/tbb/concurrent_unordered_map.h index c959a7ec..a9d8df8a 100644 --- a/inst/include/tbb/concurrent_unordered_map.h +++ b/inst/include/tbb/concurrent_unordered_map.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ /* Container implementations in this header are based on PPL implementations @@ -24,6 +20,9 @@ #ifndef __TBB_concurrent_unordered_map_H #define __TBB_concurrent_unordered_map_H +#define __TBB_concurrent_unordered_map_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "internal/_concurrent_unordered_impl.h" namespace tbb @@ -39,7 +38,13 @@ class concurrent_unordered_map_traits typedef std::pair value_type; typedef Key key_type; typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; + typedef typename tbb::internal::allocator_rebind::type allocator_type; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef tbb::internal::node_handle::node, + allocator_type> node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + enum { allow_multimapping = Allow_multimapping }; concurrent_unordered_map_traits() : my_hash_compare() {} @@ -53,6 +58,9 @@ class concurrent_unordered_map_traits hash_compare my_hash_compare; // the comparator predicate for keys }; +template +class concurrent_unordered_multimap; + template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > > class concurrent_unordered_map : @@ -93,12 +101,23 @@ class concurrent_unordered_map : typedef typename base_type::const_iterator const_iterator; typedef typename base_type::iterator local_iterator; typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT // Construction/destruction/copying explicit concurrent_unordered_map(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + {} + + concurrent_unordered_map(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_map(size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) {} explicit concurrent_unordered_map(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) @@ -106,9 +125,24 @@ class concurrent_unordered_map : template concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(first, last); + } + + template + concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) { insert(first, last); } @@ -116,16 +150,30 @@ class concurrent_unordered_map : #if __TBB_INITIALIZER_LISTS_PRESENT //! Constructor from initializer_list concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(il.begin(),il.end()); + } + + concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(il.begin(), il.end()); + } + + concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) { - this->insert(il.begin(),il.end()); + insert(il.begin(), il.end()); } + #endif //# __TBB_INITIALIZER_LISTS_PRESENT -#if __TBB_CPP11_RVALUE_REF_PRESENT -#if !__TBB_IMPLICIT_MOVE_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT concurrent_unordered_map(const concurrent_unordered_map& table) : base_type(table) {} @@ -143,11 +191,31 @@ class concurrent_unordered_map : { return static_cast(base_type::operator=(std::move(table))); } -#endif //!__TBB_IMPLICIT_MOVE_PRESENT +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT +#if __TBB_CPP11_RVALUE_REF_PRESENT concurrent_unordered_map(concurrent_unordered_map&& table, const Allocator& a) : base_type(std::move(table), a) {} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_map& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_map&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a) : base_type(table, a) @@ -191,6 +259,45 @@ class concurrent_unordered_map : } }; +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { +using namespace tbb::internal; + +template typename Map, typename Key, typename Element, typename... Args> +using cu_map_t = Map< + Key, Element, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, tbb_hash >, + std::conditional_t< (sizeof...(Args)>1) && !is_allocator_v< pack_element_t<1, Args...> >, + pack_element_t<1, Args...>, std::equal_to >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, tbb_allocator > > +>; +} + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_map (I, I) +-> internal::cu_map_t, internal::iterator_mapped_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_map(I, I, size_t, Args...) +-> internal::cu_map_t, internal::iterator_mapped_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_map(std::initializer_list>) +-> internal::cu_map_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_map(std::initializer_list>, size_t, Args...) +-> internal::cu_map_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + template < typename Key, typename T, typename Hasher = tbb::tbb_hash, typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > > class concurrent_unordered_multimap : @@ -229,12 +336,23 @@ class concurrent_unordered_multimap : typedef typename base_type::const_iterator const_iterator; typedef typename base_type::iterator local_iterator; typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT // Construction/destruction/copying explicit concurrent_unordered_multimap(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + {} + + concurrent_unordered_multimap(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_multimap(size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) {} explicit concurrent_unordered_multimap(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) @@ -242,9 +360,24 @@ class concurrent_unordered_multimap : template concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets,key_compare(_Hasher,_Key_equality), a) + : base_type(n_of_buckets,key_compare(a_hasher,a_keyeq), a) + { + insert(first, last); + } + + template + concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) { insert(first, last); } @@ -252,16 +385,29 @@ class concurrent_unordered_multimap : #if __TBB_INITIALIZER_LISTS_PRESENT //! Constructor from initializer_list concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(il.begin(),il.end()); + } + + concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) { - this->insert(il.begin(),il.end()); + insert(il.begin(), il.end()); } + + concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(il.begin(), il.end()); + } + #endif //# __TBB_INITIALIZER_LISTS_PRESENT -#if __TBB_CPP11_RVALUE_REF_PRESENT -#if !__TBB_IMPLICIT_MOVE_PRESENT +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT concurrent_unordered_multimap(const concurrent_unordered_multimap& table) : base_type(table) {} @@ -279,16 +425,60 @@ class concurrent_unordered_multimap : { return static_cast(base_type::operator=(std::move(table))); } -#endif //!__TBB_IMPLICIT_MOVE_PRESENT +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT +#if __TBB_CPP11_RVALUE_REF_PRESENT concurrent_unordered_multimap(concurrent_unordered_multimap&& table, const Allocator& a) : base_type(std::move(table), a) {} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_map& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_map&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT concurrent_unordered_multimap(const concurrent_unordered_multimap& table, const Allocator& a) : base_type(table, a) {} }; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_multimap (I, I) +-> internal::cu_map_t, internal::iterator_mapped_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_multimap(I, I, size_t, Args...) +-> internal::cu_map_t, internal::iterator_mapped_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_multimap(std::initializer_list>) +-> internal::cu_map_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_multimap(std::initializer_list>, size_t, Args...) +-> internal::cu_map_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ } // namespace interface5 using interface5::concurrent_unordered_map; @@ -296,4 +486,7 @@ using interface5::concurrent_unordered_multimap; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_unordered_map_H_include_area + #endif// __TBB_concurrent_unordered_map_H diff --git a/inst/include/tbb/concurrent_unordered_set.h b/inst/include/tbb/concurrent_unordered_set.h index a26fee1a..edb02565 100644 --- a/inst/include/tbb/concurrent_unordered_set.h +++ b/inst/include/tbb/concurrent_unordered_set.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ /* Container implementations in this header are based on PPL implementations @@ -24,6 +20,9 @@ #ifndef __TBB_concurrent_unordered_set_H #define __TBB_concurrent_unordered_set_H +#define __TBB_concurrent_unordered_set_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "internal/_concurrent_unordered_impl.h" namespace tbb @@ -39,7 +38,13 @@ class concurrent_unordered_set_traits typedef Key value_type; typedef Key key_type; typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; + typedef typename tbb::internal::allocator_rebind::type allocator_type; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef tbb::internal::node_handle::node, + allocator_type> node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + enum { allow_multimapping = Allow_multimapping }; concurrent_unordered_set_traits() : my_hash_compare() {} @@ -52,6 +57,9 @@ class concurrent_unordered_set_traits hash_compare my_hash_compare; // the comparator predicate for keys }; +template +class concurrent_unordered_multiset; + template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > class concurrent_unordered_set : public internal::concurrent_unordered_base< concurrent_unordered_set_traits, Allocator, false> > { @@ -87,6 +95,9 @@ class concurrent_unordered_set : public internal::concurrent_unordered_base< con typedef typename base_type::const_iterator const_iterator; typedef typename base_type::iterator local_iterator; typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif /*__TBB_UNORDERED_NODE_HANDLE_PRESENT*/ // Construction/destruction/copying explicit concurrent_unordered_set(size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), @@ -94,29 +105,63 @@ class concurrent_unordered_set : public internal::concurrent_unordered_base< con : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) {} + concurrent_unordered_set(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_set(size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + {} + explicit concurrent_unordered_set(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) {} template - concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) + concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) { insert(first, last); } + template + concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(first, last); + } + #if __TBB_INITIALIZER_LISTS_PRESENT //! Constructor from initializer_list concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) { - this->insert(il.begin(),il.end()); + insert(il.begin(),il.end()); + } + + concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(il.begin(), il.end()); } + + concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(il.begin(), il.end()); + } + #endif //# __TBB_INITIALIZER_LISTS_PRESENT -#if __TBB_CPP11_RVALUE_REF_PRESENT -#if !__TBB_IMPLICIT_MOVE_PRESENT +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT concurrent_unordered_set(const concurrent_unordered_set& table) : base_type(table) {} @@ -134,12 +179,32 @@ class concurrent_unordered_set : public internal::concurrent_unordered_base< con { return static_cast(base_type::operator=(std::move(table))); } -#endif //!__TBB_IMPLICIT_MOVE_PRESENT +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT +#if __TBB_CPP11_RVALUE_REF_PRESENT concurrent_unordered_set(concurrent_unordered_set&& table, const Allocator& a) : base_type(std::move(table), a) {} -#endif //__TBB_CPP11_RVALUE_REF_PRESENT +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_set& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_set&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT concurrent_unordered_set(const concurrent_unordered_set& table, const Allocator& a) : base_type(table, a) @@ -147,6 +212,45 @@ class concurrent_unordered_set : public internal::concurrent_unordered_base< con }; +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { +using namespace tbb::internal; + +template typename Set, typename T, typename... Args> +using cu_set_t = Set < + T, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, tbb_hash >, + std::conditional_t< (sizeof...(Args)>1) && !is_allocator_v< pack_element_t<1, Args...> >, + pack_element_t<1, Args...>, std::equal_to >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, tbb_allocator > +>; +} + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_set(I, I) +-> internal::cu_set_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_set(I, I, size_t, Args...) +-> internal::cu_set_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_set(std::initializer_list) +-> internal::cu_set_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_set(std::initializer_list, size_t, Args...) +-> internal::cu_set_t; + +#endif /*__TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > class concurrent_unordered_multiset : @@ -185,12 +289,24 @@ class concurrent_unordered_multiset : typedef typename base_type::const_iterator const_iterator; typedef typename base_type::iterator local_iterator; typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT // Construction/destruction/copying explicit concurrent_unordered_multiset(size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + {} + + concurrent_unordered_multiset(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_multiset(size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) {} explicit concurrent_unordered_multiset(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) @@ -198,25 +314,54 @@ class concurrent_unordered_multiset : template concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, - const hasher& _Hasher = hasher(), const key_equal& _Key_equality = key_equal(), + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(_Hasher, _Key_equality), a) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(first, last); + } + + template + concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) { insert(first, last); } #if __TBB_INITIALIZER_LISTS_PRESENT //! Constructor from initializer_list - concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) + concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) { - this->insert(il.begin(),il.end()); + insert(il.begin(),il.end()); + } + + concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(il.begin(), il.end()); } + + concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(il.begin(), il.end()); + } + #endif //# __TBB_INITIALIZER_LISTS_PRESENT -#if __TBB_CPP11_RVALUE_REF_PRESENT -#if !__TBB_IMPLICIT_MOVE_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT concurrent_unordered_multiset(const concurrent_unordered_multiset& table) : base_type(table) {} @@ -234,18 +379,62 @@ class concurrent_unordered_multiset : { return static_cast(base_type::operator=(std::move(table))); } -#endif //!__TBB_IMPLICIT_MOVE_PRESENT +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT +#if __TBB_CPP11_RVALUE_REF_PRESENT concurrent_unordered_multiset(concurrent_unordered_multiset&& table, const Allocator& a) : base_type(std::move(table), a) { } -#endif //__TBB_CPP11_RVALUE_REF_PRESENT +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_set& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_set&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT concurrent_unordered_multiset(const concurrent_unordered_multiset& table, const Allocator& a) : base_type(table, a) {} }; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_multiset(I, I) +-> internal::cu_set_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_multiset(I, I, size_t, Args...) +-> internal::cu_set_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_multiset(std::initializer_list) +-> internal::cu_set_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_multiset(std::initializer_list, size_t, Args...) +-> internal::cu_set_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ } // namespace interface5 using interface5::concurrent_unordered_set; @@ -253,4 +442,7 @@ using interface5::concurrent_unordered_multiset; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_unordered_set_H_include_area + #endif// __TBB_concurrent_unordered_set_H diff --git a/inst/include/tbb/concurrent_vector.h b/inst/include/tbb/concurrent_vector.h index bd93615e..4c53abbd 100644 --- a/inst/include/tbb/concurrent_vector.h +++ b/inst/include/tbb/concurrent_vector.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_concurrent_vector_H #define __TBB_concurrent_vector_H +#define __TBB_concurrent_vector_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb_stddef.h" #include "tbb_exception.h" #include "atomic.h" @@ -31,19 +30,10 @@ #include #include // for memset() #include __TBB_STD_SWAP_HEADER - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - #include #include -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif +#include "internal/_allocator_traits.h" #if _MSC_VER==1500 && !__INTEL_COMPILER // VS2008/VC9 seems to have an issue; limits pull in math.h @@ -85,7 +75,7 @@ namespace internal { //! Exception helper function template void handle_unconstructed_elements(T* array, size_t n_of_elements){ - std::memset( (void*) array, 0, n_of_elements * sizeof( T ) ); + std::memset( static_cast(array), 0, n_of_elements * sizeof( T ) ); } //! Base class of concurrent vector implementation. @@ -105,11 +95,11 @@ namespace internal { pointers_per_short_table = 3, // to fit into 8 words of entire structure pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit }; - public: + struct segment_not_used {}; struct segment_allocated {}; struct segment_allocation_failed {}; - protected: + class segment_t; class segment_value_t { void* array; @@ -351,6 +341,14 @@ namespace internal { my_item(other.my_item) {} + vector_iterator& operator=( const vector_iterator& other ) + { + my_vector=other.my_vector; + my_index=other.my_index; + my_item=other.my_item; + return *this; + } + vector_iterator operator+( ptrdiff_t offset ) const { return vector_iterator( *my_vector, my_index+offset ); } @@ -478,12 +476,9 @@ namespace internal { template class allocator_base { public: - typedef typename A::template - rebind::other allocator_type; + typedef typename tbb::internal::allocator_rebind::type allocator_type; allocator_type my_allocator; - allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {} - }; } // namespace internal @@ -753,9 +748,7 @@ class concurrent_vector: protected internal::allocator_base, if(pocma_t::value || this->my_allocator == other.my_allocator) { concurrent_vector trash (std::move(*this)); internal_swap(other); - if (pocma_t::value) { - this->my_allocator = std::move(other.my_allocator); - } + tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t()); } else { internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array); } @@ -1013,10 +1006,10 @@ class concurrent_vector: protected internal::allocator_base, //! swap two instances void swap(concurrent_vector &vector) { - using std::swap; - if( this != &vector ) { + typedef typename tbb::internal::allocator_traits::propagate_on_container_swap pocs_t; + if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) { concurrent_vector_base_v3::internal_swap(static_cast(vector)); - swap(this->my_allocator, vector.my_allocator); + tbb::internal::allocator_swap(this->my_allocator, vector.my_allocator, pocs_t()); } } @@ -1053,7 +1046,13 @@ class concurrent_vector: protected internal::allocator_base, internal_resize( n, sizeof(T), max_size(), static_cast(p), &destroy_array, p? &initialize_array_by : &initialize_array ); } - //! helper class + //! True/false function override helper + /* Functions declarations: + * void foo(is_integer_tag*); + * void foo(is_integer_tag*); + * Usage example: + * foo(static_cast::is_integer>*>(0)); + */ template class is_integer_tag; //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9 @@ -1167,6 +1166,25 @@ class concurrent_vector: protected internal::allocator_base, }; }; +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// Deduction guide for the constructor from two iterators +template::value_type, + typename A = cache_aligned_allocator +> concurrent_vector(I, I, const A& = A()) +-> concurrent_vector; + +// Deduction guide for the constructor from a vector and allocator +template +concurrent_vector(const concurrent_vector &, const A2 &) +-> concurrent_vector; + +// Deduction guide for the constructor from an initializer_list +template +> concurrent_vector(std::initializer_list, const A& = A()) +-> concurrent_vector; +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) #pragma warning (push) #pragma warning (disable: 4701) // potentially uninitialized local variable "old" @@ -1304,8 +1322,8 @@ void concurrent_vector::move_array_if_noexcept( void* dst, const void* src template template void concurrent_vector::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){ - I & iterator ((*const_cast(static_cast(p_type_erased_iterator)))); - internal_loop_guide loop(n, dst); loop.iterate(iterator); + internal_loop_guide loop(n, dst); + loop.iterate( *(static_cast(const_cast(p_type_erased_iterator))) ); } template @@ -1371,4 +1389,8 @@ inline void swap(concurrent_vector &a, concurrent_vector &b) #pragma warning (pop) #endif // warning 4267,4127 are back + +#undef __TBB_concurrent_vector_H_include_area +#include "internal/_warning_suppress_disable_notice.h" + #endif /* __TBB_concurrent_vector_H */ diff --git a/inst/include/tbb/critical_section.h b/inst/include/tbb/critical_section.h index 324b3e13..095f03bc 100644 --- a/inst/include/tbb/critical_section.h +++ b/inst/include/tbb/critical_section.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_critical_section_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_critical_section_H +#pragma message("TBB Warning: tbb/critical_section.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef _TBB_CRITICAL_SECTION_H_ #define _TBB_CRITICAL_SECTION_H_ +#define __TBB_critical_section_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #if _WIN32||_WIN64 #include "machine/windows_api.h" #else @@ -126,8 +136,12 @@ class critical_section_v4 : internal::no_copy { static const bool is_fair_mutex = true; }; // critical_section_v4 } // namespace internal -typedef internal::critical_section_v4 critical_section; +__TBB_DEPRECATED_VERBOSE_MSG("tbb::critical_section is deprecated, use std::mutex") typedef internal::critical_section_v4 critical_section; __TBB_DEFINE_PROFILING_SET_NAME(critical_section) } // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_critical_section_H_include_area + #endif // _TBB_CRITICAL_SECTION_H_ diff --git a/inst/include/tbb/enumerable_thread_specific.h b/inst/include/tbb/enumerable_thread_specific.h index f31f533b..248597f2 100644 --- a/inst/include/tbb/enumerable_thread_specific.h +++ b/inst/include/tbb/enumerable_thread_specific.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_enumerable_thread_specific_H #define __TBB_enumerable_thread_specific_H +#define __TBB_enumerable_thread_specific_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "atomic.h" #include "concurrent_vector.h" #include "tbb_thread.h" @@ -32,6 +31,10 @@ #include "tbb_profiling.h" #include // for memcpy +#if __TBB_PREVIEW_RESUMABLE_TASKS +#include "task.h" // for task::suspend_point +#endif + #if _WIN32||_WIN64 #include "machine/windows_api.h" #else @@ -45,7 +48,13 @@ namespace tbb { //! enum for selecting between single key and key-per-instance versions -enum ets_key_usage_type { ets_key_per_instance, ets_no_key }; +enum ets_key_usage_type { + ets_key_per_instance + , ets_no_key +#if __TBB_PREVIEW_RESUMABLE_TASKS + , ets_suspend_aware +#endif +}; namespace interface6 { @@ -58,10 +67,33 @@ namespace interface6 { using namespace tbb::internal; + template + struct ets_key_selector { + typedef tbb_thread::id key_type; + static key_type current_key() { + return tbb::internal::thread_get_id_v3(); + } + }; + +#if __TBB_PREVIEW_RESUMABLE_TASKS + template <> + struct ets_key_selector { + typedef task::suspend_point key_type; + static key_type current_key() { + return internal_current_suspend_point(); + } + }; + + inline task::suspend_point atomic_compare_and_swap(task::suspend_point& location, + const task::suspend_point& value, const task::suspend_point& comparand) { + return as_atomic(location).compare_and_swap(value, comparand); + } +#endif + template class ets_base: tbb::internal::no_copy { protected: - typedef tbb_thread::id key_type; + typedef typename ets_key_selector::key_type key_type; #if __TBB_PROTECTED_NESTED_CLASS_BROKEN public: #endif @@ -119,8 +151,9 @@ namespace interface6 { void table_clear(); // The following functions are not used in concurrent context, // so we don't need synchronization and ITT annotations there. + template void table_elementwise_copy( const ets_base& other, - void*(*add_element)(ets_base&, void*) ) { + void*(*add_element)(ets_base&, void*) ) { __TBB_ASSERT(!my_root,NULL); __TBB_ASSERT(!my_count,NULL); if( !other.my_root ) return; @@ -135,7 +168,7 @@ namespace interface6 { for( size_t j = root->start(tbb::tbb_hash()(s1.key)); ; j=(j+1)&mask ) { slot& s2 = root->at(j); if( s2.empty() ) { - s2.ptr = add_element(*this, s1.ptr); + s2.ptr = add_element(static_cast&>(*this), s1.ptr); s2.key = s1.key; break; } @@ -169,7 +202,7 @@ namespace interface6 { template void* ets_base::table_lookup( bool& exists ) { - const key_type k = tbb::this_tbb_thread::get_id(); + const key_type k = ets_key_selector::current_key(); __TBB_ASSERT(k != key_type(),NULL); void* found; @@ -242,7 +275,7 @@ namespace interface6 { //! Specialization that exploits native TLS template <> - class ets_base: protected ets_base { + class ets_base: public ets_base { typedef ets_base super; #if _WIN32||_WIN64 #if __TBB_WIN8UI_SUPPORT @@ -799,7 +832,7 @@ namespace interface6 { return lref.value_committed(); } - static void* create_local_by_copy( internal::ets_base& base, void* p ) { + static void* create_local_by_copy( internal::ets_base& base, void* p ) { enumerable_thread_specific& ets = static_cast(base); padded_element& lref = *ets.my_locals.grow_by(1); new(lref.value()) T(*static_cast(p)); @@ -807,7 +840,7 @@ namespace interface6 { } #if __TBB_ETS_USE_CPP11 - static void* create_local_by_move( internal::ets_base& base, void* p ) { + static void* create_local_by_move( internal::ets_base& base, void* p ) { enumerable_thread_specific& ets = static_cast(base); padded_element& lref = *ets.my_locals.grow_by(1); new(lref.value()) T(std::move(*static_cast(p))); @@ -888,7 +921,7 @@ namespace interface6 { ~enumerable_thread_specific() { if(my_construct_callback) my_construct_callback->destroy(); // Deallocate the hash table before overridden free_array() becomes inaccessible - this->internal::ets_base::table_clear(); + this->internal::ets_base::table_clear(); } //! returns reference to local, discarding exists @@ -1134,4 +1167,7 @@ using interface6::flatten2d; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_enumerable_thread_specific_H_include_area + #endif diff --git a/inst/include/tbb/flow_graph.h b/inst/include/tbb/flow_graph.h index 7b03c0e2..1a6d22b9 100644 --- a/inst/include/tbb/flow_graph.h +++ b/inst/include/tbb/flow_graph.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_flow_graph_H #define __TBB_flow_graph_H +#define __TBB_flow_graph_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb_stddef.h" #include "atomic.h" #include "spin_mutex.h" @@ -34,7 +33,17 @@ #include "internal/_aggregator_impl.h" #include "tbb_profiling.h" #include "task_arena.h" -#include "flow_graph_abstractions.h" + +#if TBB_USE_THREADING_TOOLS && TBB_PREVIEW_FLOW_GRAPH_TRACE && ( __linux__ || __APPLE__ ) + #if __INTEL_COMPILER + // Disabled warning "routine is both inline and noinline" + #pragma warning (push) + #pragma warning( disable: 2196 ) + #endif + #define __TBB_NOINLINE_SYM __attribute__((noinline)) +#else + #define __TBB_NOINLINE_SYM +#endif #if __TBB_PREVIEW_ASYNC_MSG #include // std::vector in internal::async_storage @@ -88,66 +97,62 @@ namespace flow { //! An enumeration the provides the two most common concurrency levels: unlimited and serial enum concurrency { unlimited = 0, serial = 1 }; -namespace internal { -static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1; -} - -namespace interface9 { - -using tbb::flow::internal::SUCCESSFULLY_ENQUEUED; - -namespace internal { - template class successor_cache; - template class broadcast_cache; - template class round_robin_cache; - template class predecessor_cache; - template class reservable_predecessor_cache; -} +namespace interface11 { -//A generic null type +//! A generic null type struct null_type {}; //! An empty class used for messages that mean "I'm done" class continue_msg {}; +//! Forward declaration section template< typename T > class sender; template< typename T > class receiver; class continue_receiver; -template< typename T > class limiter_node; // needed for resetting decrementer +template< typename T, typename U > class limiter_node; // needed for resetting decrementer + template< typename R, typename B > class run_and_put_task; -// flags to modify the behavior of the graph reset(). Can be combined. -enum reset_flags { - rf_reset_protocol = 0, - rf_reset_bodies = 1<<0, // delete the current node body, reset to a copy of the initial node body. - rf_clear_edges = 1<<1 // delete edges -}; +namespace internal { + +template class successor_cache; +template class broadcast_cache; +template class round_robin_cache; +template class predecessor_cache; +template class reservable_predecessor_cache; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +namespace order { +struct following; +struct preceding; +} +template struct node_set; +#endif -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES -//* holder of edges both for caches and for those nodes which do not have predecessor caches. +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION +// Holder of edges both for caches and for those nodes which do not have predecessor caches. // C == receiver< ... > or sender< ... >, depending. -namespace internal { template class edge_container { public: typedef std::list > edge_list_type; - void add_edge( C &s) { - built_edges.push_back( &s ); + void add_edge(C &s) { + built_edges.push_back(&s); } - void delete_edge( C &s) { - for ( typename edge_list_type::iterator i = built_edges.begin(); i != built_edges.end(); ++i ) { - if ( *i == &s ) { + void delete_edge(C &s) { + for (typename edge_list_type::iterator i = built_edges.begin(); i != built_edges.end(); ++i) { + if (*i == &s) { (void)built_edges.erase(i); return; // only remove one predecessor per request } } } - void copy_edges( edge_list_type &v) { + void copy_edges(edge_list_type &v) { v = built_edges; } @@ -161,21 +166,133 @@ class edge_container { // methods remove the statement from all predecessors/successors liste in the edge // container. - template< typename S > void sender_extract( S &s ); - template< typename R > void receiver_extract( R &r ); + template< typename S > void sender_extract(S &s); + template< typename R > void receiver_extract(R &r); -private: +private: edge_list_type built_edges; }; // class edge_container -} // namespace internal -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +} // namespace internal + +} // namespace interfaceX +} // namespace flow +} // namespace tbb + +//! The graph class +#include "internal/_flow_graph_impl.h" + +namespace tbb { +namespace flow { +namespace interface11 { + +// enqueue left task if necessary. Returns the non-enqueued task if there is one. +static inline tbb::task *combine_tasks(graph& g, tbb::task * left, tbb::task * right) { + // if no RHS task, don't change left. + if (right == NULL) return left; + // right != NULL + if (left == NULL) return right; + if (left == SUCCESSFULLY_ENQUEUED) return right; + // left contains a task + if (right != SUCCESSFULLY_ENQUEUED) { + // both are valid tasks + internal::spawn_in_graph_arena(g, *left); + return right; + } + return left; +} #if __TBB_PREVIEW_ASYNC_MSG -#include "internal/_flow_graph_async_msg_impl.h" +template < typename T > class async_msg; namespace internal { +template < typename T > class async_storage; + +template< typename T, typename = void > +struct async_helpers { + typedef async_msg async_type; + typedef T filtered_type; + + static const bool is_async_type = false; + + static const void* to_void_ptr(const T& t) { + return static_cast(&t); + } + + static void* to_void_ptr(T& t) { + return static_cast(&t); + } + + static const T& from_void_ptr(const void* p) { + return *static_cast(p); + } + + static T& from_void_ptr(void* p) { + return *static_cast(p); + } + + static task* try_put_task_wrapper_impl(receiver* const this_recv, const void *p, bool is_async) { + if (is_async) { + // This (T) is NOT async and incoming 'A t' IS async + // Get data from async_msg + const async_msg& msg = async_helpers< async_msg >::from_void_ptr(p); + task* const new_task = msg.my_storage->subscribe(*this_recv, this_recv->graph_reference()); + // finalize() must be called after subscribe() because set() can be called in finalize() + // and 'this_recv' client must be subscribed by this moment + msg.finalize(); + return new_task; + } + else { + // Incoming 't' is NOT async + return this_recv->try_put_task(from_void_ptr(p)); + } + } +}; + +template< typename T > +struct async_helpers< T, typename std::enable_if< std::is_base_of, T>::value >::type > { + typedef T async_type; + typedef typename T::async_msg_data_type filtered_type; + + static const bool is_async_type = true; + + // Receiver-classes use const interfaces + static const void* to_void_ptr(const T& t) { + return static_cast(&static_cast&>(t)); + } + + static void* to_void_ptr(T& t) { + return static_cast(&static_cast&>(t)); + } + + // Sender-classes use non-const interfaces + static const T& from_void_ptr(const void* p) { + return *static_cast(static_cast*>(p)); + } + + static T& from_void_ptr(void* p) { + return *static_cast(static_cast*>(p)); + } + + // Used in receiver class + static task* try_put_task_wrapper_impl(receiver* const this_recv, const void *p, bool is_async) { + if (is_async) { + // Both are async + return this_recv->try_put_task(from_void_ptr(p)); + } + else { + // This (T) is async and incoming 'X t' is NOT async + // Create async_msg for X + const filtered_type& t = async_helpers::from_void_ptr(p); + const T msg(t); + return this_recv->try_put_task(msg); + } + } +}; + class untyped_receiver; class untyped_sender { @@ -203,7 +320,7 @@ class untyped_sender { //! Consumes the reserved item virtual bool try_consume( ) { return false; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION //! interface to record edges for traversal & deletion typedef internal::edge_container built_successors_type; typedef built_successors_type::edge_list_type successor_list_type; @@ -212,7 +329,7 @@ class untyped_sender { virtual void internal_delete_built_successor( successor_type & ) = 0; virtual void copy_successors( successor_list_type &) = 0; virtual size_t successor_count() = 0; -#endif +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: //! Request an item from the sender template< typename X > @@ -232,7 +349,6 @@ class untyped_sender { class untyped_receiver { template< typename, typename > friend class run_and_put_task; - template< typename > friend class limiter_node; template< typename, typename > friend class internal::broadcast_cache; template< typename, typename > friend class internal::round_robin_cache; @@ -253,7 +369,7 @@ class untyped_receiver { bool try_put(const X& t) { task *res = try_put_task(t); if (!res) return false; - if (res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res); + if (res != SUCCESSFULLY_ENQUEUED) internal::spawn_in_graph_arena(graph_reference(), *res); return true; } @@ -267,7 +383,7 @@ class untyped_receiver { //! Remove a predecessor from the node virtual bool remove_predecessor( predecessor_type & ) { return false; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef internal::edge_container built_predecessors_type; typedef built_predecessors_type::edge_list_type predecessor_list_type; virtual built_predecessors_type &built_predecessors() = 0; @@ -275,7 +391,7 @@ class untyped_receiver { virtual void internal_delete_built_predecessor( predecessor_type & ) = 0; virtual void copy_predecessors( predecessor_list_type & ) = 0; virtual size_t predecessor_count() = 0; -#endif +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: template task *try_put_task(const X& t) { @@ -284,6 +400,8 @@ class untyped_receiver { virtual task* try_put_task_wrapper( const void* p, bool is_async ) = 0; + virtual graph& graph_reference() const = 0; + // NOTE: Following part of PROTECTED and PRIVATE sections is copy-paste from original receiver class //! put receiver back in initial state @@ -299,9 +417,9 @@ template< typename T > class sender : public internal::untyped_sender { public: //! The output type of this sender - typedef T output_type; + __TBB_DEPRECATED typedef T output_type; - typedef typename internal::async_helpers::filtered_type filtered_type; + __TBB_DEPRECATED typedef typename internal::async_helpers::filtered_type filtered_type; //! Request an item from the sender virtual bool try_get( T & ) { return false; } @@ -338,9 +456,9 @@ class receiver : public internal::untyped_receiver { template< typename, typename > friend struct internal::async_helpers; public: //! The input type of this receiver - typedef T input_type; + __TBB_DEPRECATED typedef T input_type; - typedef typename internal::async_helpers::filtered_type filtered_type; + __TBB_DEPRECATED typedef typename internal::async_helpers::filtered_type filtered_type; //! Put an item to the receiver bool try_put( const typename internal::async_helpers::filtered_type& t ) { @@ -368,20 +486,20 @@ template< typename T > class sender { public: //! The output type of this sender - typedef T output_type; + __TBB_DEPRECATED typedef T output_type; //! The successor type for this node - typedef receiver successor_type; + __TBB_DEPRECATED typedef receiver successor_type; virtual ~sender() {} // NOTE: Following part of PUBLIC section is partly copy-pasted in sender under #if __TBB_PREVIEW_ASYNC_MSG //! Add a new successor to this node - virtual bool register_successor( successor_type &r ) = 0; + __TBB_DEPRECATED virtual bool register_successor( successor_type &r ) = 0; //! Removes a successor from this node - virtual bool remove_successor( successor_type &r ) = 0; + __TBB_DEPRECATED virtual bool remove_successor( successor_type &r ) = 0; //! Request an item from the sender virtual bool try_get( T & ) { return false; } @@ -395,16 +513,16 @@ class sender { //! Consumes the reserved item virtual bool try_consume( ) { return false; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION //! interface to record edges for traversal & deletion - typedef typename internal::edge_container built_successors_type; - typedef typename built_successors_type::edge_list_type successor_list_type; - virtual built_successors_type &built_successors() = 0; - virtual void internal_add_built_successor( successor_type & ) = 0; - virtual void internal_delete_built_successor( successor_type & ) = 0; - virtual void copy_successors( successor_list_type &) = 0; - virtual size_t successor_count() = 0; -#endif + __TBB_DEPRECATED typedef typename internal::edge_container built_successors_type; + __TBB_DEPRECATED typedef typename built_successors_type::edge_list_type successor_list_type; + __TBB_DEPRECATED virtual built_successors_type &built_successors() = 0; + __TBB_DEPRECATED virtual void internal_add_built_successor( successor_type & ) = 0; + __TBB_DEPRECATED virtual void internal_delete_built_successor( successor_type & ) = 0; + __TBB_DEPRECATED virtual void copy_successors( successor_list_type &) = 0; + __TBB_DEPRECATED virtual size_t successor_count() = 0; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ }; // class sender //! Pure virtual template class that defines a receiver of messages of type T @@ -412,10 +530,10 @@ template< typename T > class receiver { public: //! The input type of this receiver - typedef T input_type; + __TBB_DEPRECATED typedef T input_type; //! The predecessor type for this node - typedef sender predecessor_type; + __TBB_DEPRECATED typedef sender predecessor_type; //! Destructor virtual ~receiver() {} @@ -424,7 +542,7 @@ class receiver { bool try_put( const T& t ) { task *res = try_put_task(t); if (!res) return false; - if (res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res); + if (res != SUCCESSFULLY_ENQUEUED) internal::spawn_in_graph_arena(graph_reference(), *res); return true; } @@ -434,28 +552,28 @@ class receiver { template< typename X, typename Y > friend class internal::broadcast_cache; template< typename X, typename Y > friend class internal::round_robin_cache; virtual task *try_put_task(const T& t) = 0; + virtual graph& graph_reference() const = 0; public: // NOTE: Following part of PUBLIC and PROTECTED sections is copy-pasted in receiver under #if __TBB_PREVIEW_ASYNC_MSG //! Add a predecessor to the node - virtual bool register_predecessor( predecessor_type & ) { return false; } + __TBB_DEPRECATED virtual bool register_predecessor( predecessor_type & ) { return false; } //! Remove a predecessor from the node - virtual bool remove_predecessor( predecessor_type & ) { return false; } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef typename internal::edge_container built_predecessors_type; - typedef typename built_predecessors_type::edge_list_type predecessor_list_type; - virtual built_predecessors_type &built_predecessors() = 0; - virtual void internal_add_built_predecessor( predecessor_type & ) = 0; - virtual void internal_delete_built_predecessor( predecessor_type & ) = 0; - virtual void copy_predecessors( predecessor_list_type & ) = 0; - virtual size_t predecessor_count() = 0; -#endif + __TBB_DEPRECATED virtual bool remove_predecessor( predecessor_type & ) { return false; } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + __TBB_DEPRECATED typedef typename internal::edge_container built_predecessors_type; + __TBB_DEPRECATED typedef typename built_predecessors_type::edge_list_type predecessor_list_type; + __TBB_DEPRECATED virtual built_predecessors_type &built_predecessors() = 0; + __TBB_DEPRECATED virtual void internal_add_built_predecessor( predecessor_type & ) = 0; + __TBB_DEPRECATED virtual void internal_delete_built_predecessor( predecessor_type & ) = 0; + __TBB_DEPRECATED virtual void copy_predecessors( predecessor_list_type & ) = 0; + __TBB_DEPRECATED virtual size_t predecessor_count() = 0; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: //! put receiver back in initial state - template friend class limiter_node; virtual void reset_receiver(reset_flags f = rf_reset_protocol) = 0; template friend class internal::successor_cache; @@ -468,47 +586,34 @@ class receiver { #endif // __TBB_PREVIEW_ASYNC_MSG -// enqueue left task if necessary. Returns the non-enqueued task if there is one. -static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right) { - // if no RHS task, don't change left. - if(right == NULL) return left; - // right != NULL - if(left == NULL) return right; - if(left == SUCCESSFULLY_ENQUEUED) return right; - // left contains a task - if(right != SUCCESSFULLY_ENQUEUED) { - // both are valid tasks - FLOW_SPAWN(*left); - return right; - } - return left; -} - //! Base class for receivers of completion messages /** These receivers automatically reset, but cannot be explicitly waited on */ class continue_receiver : public receiver< continue_msg > { public: //! The input type - typedef continue_msg input_type; + __TBB_DEPRECATED typedef continue_msg input_type; //! The predecessor type for this node - typedef receiver::predecessor_type predecessor_type; + __TBB_DEPRECATED typedef receiver::predecessor_type predecessor_type; //! Constructor - explicit continue_receiver( int number_of_predecessors = 0 ) { + __TBB_DEPRECATED explicit continue_receiver( + __TBB_FLOW_GRAPH_PRIORITY_ARG1(int number_of_predecessors, node_priority_t priority)) { my_predecessor_count = my_initial_predecessor_count = number_of_predecessors; my_current_count = 0; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( my_priority = priority; ) } //! Copy constructor - continue_receiver( const continue_receiver& src ) : receiver() { + __TBB_DEPRECATED continue_receiver( const continue_receiver& src ) : receiver() { my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count; my_current_count = 0; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( my_priority = src.my_priority; ) } //! Increments the trigger threshold - bool register_predecessor( predecessor_type & ) __TBB_override { + __TBB_DEPRECATED bool register_predecessor( predecessor_type & ) __TBB_override { spin_mutex::scoped_lock l(my_mutex); ++my_predecessor_count; return true; @@ -518,38 +623,38 @@ class continue_receiver : public receiver< continue_msg > { /** Does not check to see if the removal of the predecessor now makes the current count exceed the new threshold. So removing a predecessor while the graph is active can cause unexpected results. */ - bool remove_predecessor( predecessor_type & ) __TBB_override { + __TBB_DEPRECATED bool remove_predecessor( predecessor_type & ) __TBB_override { spin_mutex::scoped_lock l(my_mutex); --my_predecessor_count; return true; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef internal::edge_container built_predecessors_type; - typedef built_predecessors_type::edge_list_type predecessor_list_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + __TBB_DEPRECATED typedef internal::edge_container built_predecessors_type; + __TBB_DEPRECATED typedef built_predecessors_type::edge_list_type predecessor_list_type; built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } - void internal_add_built_predecessor( predecessor_type &s) __TBB_override { + __TBB_DEPRECATED void internal_add_built_predecessor( predecessor_type &s) __TBB_override { spin_mutex::scoped_lock l(my_mutex); my_built_predecessors.add_edge( s ); } - void internal_delete_built_predecessor( predecessor_type &s) __TBB_override { + __TBB_DEPRECATED void internal_delete_built_predecessor( predecessor_type &s) __TBB_override { spin_mutex::scoped_lock l(my_mutex); my_built_predecessors.delete_edge(s); } - void copy_predecessors( predecessor_list_type &v) __TBB_override { + __TBB_DEPRECATED void copy_predecessors( predecessor_list_type &v) __TBB_override { spin_mutex::scoped_lock l(my_mutex); my_built_predecessors.copy_edges(v); } - size_t predecessor_count() __TBB_override { + __TBB_DEPRECATED size_t predecessor_count() __TBB_override { spin_mutex::scoped_lock l(my_mutex); return my_built_predecessors.edge_count(); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: template< typename R, typename B > friend class run_and_put_task; @@ -568,7 +673,7 @@ class continue_receiver : public receiver< continue_msg > { return res? res : SUCCESSFULLY_ENQUEUED; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION // continue_receiver must contain its own built_predecessors because it does // not have a node_cache. built_predecessors_type my_built_predecessors; @@ -577,14 +682,15 @@ class continue_receiver : public receiver< continue_msg > { int my_predecessor_count; int my_current_count; int my_initial_predecessor_count; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( node_priority_t my_priority; ) // the friend declaration in the base class did not eliminate the "protected class" // error in gcc 4.1.2 - template friend class limiter_node; + template friend class tbb::flow::interface11::limiter_node; void reset_receiver( reset_flags f ) __TBB_override { my_current_count = 0; if (f & rf_clear_edges) { -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION my_built_predecessors.clear(); #endif my_predecessor_count = my_initial_predecessor_count; @@ -599,7 +705,8 @@ class continue_receiver : public receiver< continue_msg > { bool is_continue_receiver() __TBB_override { return true; } }; // class continue_receiver -} // interface9 + +} // interfaceX #if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING template @@ -608,9 +715,9 @@ class continue_receiver : public receiver< continue_msg > { } #endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ - using interface9::sender; - using interface9::receiver; - using interface9::continue_receiver; + using interface11::sender; + using interface11::receiver; + using interface11::continue_receiver; } // flow } // tbb @@ -619,317 +726,15 @@ class continue_receiver : public receiver< continue_msg > { namespace tbb { namespace flow { -namespace interface9 { +namespace interface11 { -#include "internal/_flow_graph_impl.h" +#include "internal/_flow_graph_body_impl.h" +#include "internal/_flow_graph_cache_impl.h" #include "internal/_flow_graph_types_impl.h" -using namespace internal::graph_policy_namespace; - -class graph; -class graph_node; - -template -class graph_iterator { - friend class graph; - friend class graph_node; -public: - typedef size_t size_type; - typedef GraphNodeType value_type; - typedef GraphNodeType* pointer; - typedef GraphNodeType& reference; - typedef const GraphNodeType& const_reference; - typedef std::forward_iterator_tag iterator_category; - - //! Default constructor - graph_iterator() : my_graph(NULL), current_node(NULL) {} - - //! Copy constructor - graph_iterator(const graph_iterator& other) : - my_graph(other.my_graph), current_node(other.current_node) - {} - - //! Assignment - graph_iterator& operator=(const graph_iterator& other) { - if (this != &other) { - my_graph = other.my_graph; - current_node = other.current_node; - } - return *this; - } - - //! Dereference - reference operator*() const; - - //! Dereference - pointer operator->() const; - - //! Equality - bool operator==(const graph_iterator& other) const { - return ((my_graph == other.my_graph) && (current_node == other.current_node)); - } - - //! Inequality - bool operator!=(const graph_iterator& other) const { return !(operator==(other)); } - - //! Pre-increment - graph_iterator& operator++() { - internal_forward(); - return *this; - } - - //! Post-increment - graph_iterator operator++(int) { - graph_iterator result = *this; - operator++(); - return result; - } - -private: - // the graph over which we are iterating - GraphContainerType *my_graph; - // pointer into my_graph's my_nodes list - pointer current_node; - - //! Private initializing constructor for begin() and end() iterators - graph_iterator(GraphContainerType *g, bool begin); - void internal_forward(); -}; // class graph_iterator - -//! The graph class -/** This class serves as a handle to the graph */ -class graph : tbb::internal::no_copy, public graph_proxy { - friend class graph_node; - - template< typename Body > - class run_task : public task { - public: - run_task( Body& body ) : my_body(body) {} - task *execute() __TBB_override { - my_body(); - return NULL; - } - private: - Body my_body; - }; - - template< typename Receiver, typename Body > - class run_and_put_task : public task { - public: - run_and_put_task( Receiver &r, Body& body ) : my_receiver(r), my_body(body) {} - task *execute() __TBB_override { - task *res = my_receiver.try_put_task( my_body() ); - if (res == SUCCESSFULLY_ENQUEUED) res = NULL; - return res; - } - private: - Receiver &my_receiver; - Body my_body; - }; - typedef std::list task_list_type; - - class wait_functor { - task* graph_root_task; - public: - wait_functor( task* t ) : graph_root_task(t) {} - void operator()() const { graph_root_task->wait_for_all(); } - }; - - void prepare_task_arena( bool reinit = false ) { - if (reinit) { - __TBB_ASSERT( my_task_arena, "task arena is NULL"); - my_task_arena->terminate(); - my_task_arena->initialize(tbb::task_arena::attach()); - } else { - __TBB_ASSERT(my_task_arena == NULL, "task arena is not NULL"); - my_task_arena = new tbb::task_arena(tbb::task_arena::attach()); - } - if (!my_task_arena->is_active()) // failed to attach - my_task_arena->initialize(); // create a new, default-initialized arena - __TBB_ASSERT(my_task_arena->is_active(), "task arena is not active"); - } - -public: - //! Constructs a graph with isolated task_group_context - graph() : my_nodes(NULL), my_nodes_last(NULL), my_task_arena(NULL) { - prepare_task_arena(); - own_context = true; - cancelled = false; - caught_exception = false; - my_context = new task_group_context(); - my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task ); - my_root_task->set_ref_count(1); - tbb::internal::fgt_graph( this ); - my_is_active = true; - } - - //! Constructs a graph with use_this_context as context - explicit graph(task_group_context& use_this_context) : - my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL), my_task_arena(NULL) { - prepare_task_arena(); - own_context = false; - my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task ); - my_root_task->set_ref_count(1); - tbb::internal::fgt_graph( this ); - my_is_active = true; - } - - //! Destroys the graph. - /** Calls wait_for_all, then destroys the root task and context. */ - ~graph() { - wait_for_all(); - my_root_task->set_ref_count(0); - task::destroy( *my_root_task ); - if (own_context) delete my_context; - delete my_task_arena; - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { - tbb::internal::fgt_graph_desc( this, name ); - } -#endif - - //! Used to register that an external entity may still interact with the graph. - /** The graph will not return from wait_for_all until a matching number of decrement_wait_count calls - is made. */ - void increment_wait_count() { - if (my_root_task) - my_root_task->increment_ref_count(); - } - - //! Deregisters an external entity that may have interacted with the graph. - /** The graph will not return from wait_for_all until all the number of decrement_wait_count calls - matches the number of increment_wait_count calls. */ - void decrement_wait_count() { - if (my_root_task) - my_root_task->decrement_ref_count(); - } - - void reserve_wait() __TBB_override { - increment_wait_count(); - } - - void release_wait() __TBB_override { - decrement_wait_count(); - } - - //! Spawns a task that runs a body and puts its output to a specific receiver - /** The task is spawned as a child of the graph. This is useful for running tasks - that need to block a wait_for_all() on the graph. For example a one-off source. */ - template< typename Receiver, typename Body > - void run( Receiver &r, Body body ) { - if(is_active()) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *root_task() ) ) - run_and_put_task< Receiver, Body >( r, body )) ); - } - } - - //! Spawns a task that runs a function object - /** The task is spawned as a child of the graph. This is useful for running tasks - that need to block a wait_for_all() on the graph. For example a one-off source. */ - template< typename Body > - void run( Body body ) { - if(is_active()) { - FLOW_SPAWN( * new ( task::allocate_additional_child_of( *root_task() ) ) run_task< Body >( body ) ); - } - } - - //! Wait until graph is idle and decrement_wait_count calls equals increment_wait_count calls. - /** The waiting thread will go off and steal work while it is block in the wait_for_all. */ - void wait_for_all() { - cancelled = false; - caught_exception = false; - if (my_root_task) { -#if TBB_USE_EXCEPTIONS - try { -#endif - my_task_arena->execute(wait_functor(my_root_task)); - cancelled = my_context->is_group_execution_cancelled(); -#if TBB_USE_EXCEPTIONS - } - catch(...) { - my_root_task->set_ref_count(1); - my_context->reset(); - caught_exception = true; - cancelled = true; - throw; - } +#if __TBB_PREVIEW_ASYNC_MSG +#include "internal/_flow_graph_async_msg_impl.h" #endif - // TODO: the "if" condition below is just a work-around to support the concurrent wait - // mode. The cancellation and exception mechanisms are still broken in this mode. - // Consider using task group not to re-implement the same functionality. - if ( !(my_context->traits() & task_group_context::concurrent_wait) ) { - my_context->reset(); // consistent with behavior in catch() - my_root_task->set_ref_count(1); - } - } - } - - //! Returns the root task of the graph - task * root_task() { - return my_root_task; - } - - void set_active(bool a = true) { - my_is_active = a; - } - - bool is_active() { - return my_is_active; - } - - void add_task_to_reset_list(task *tp) { - my_reset_task_list.push_back(tp); - } - - // ITERATORS - template - friend class graph_iterator; - - // Graph iterator typedefs - typedef graph_iterator iterator; - typedef graph_iterator const_iterator; - - // Graph iterator constructors - //! start iterator - iterator begin() { return iterator(this, true); } - //! end iterator - iterator end() { return iterator(this, false); } - //! start const iterator - const_iterator begin() const { return const_iterator(this, true); } - //! end const iterator - const_iterator end() const { return const_iterator(this, false); } - //! start const iterator - const_iterator cbegin() const { return const_iterator(this, true); } - //! end const iterator - const_iterator cend() const { return const_iterator(this, false); } - - //! return status of graph execution - bool is_cancelled() { return cancelled; } - bool exception_thrown() { return caught_exception; } - - // thread-unsafe state reset. - void reset(reset_flags f = rf_reset_protocol); - -private: - task *my_root_task; - task_group_context *my_context; - bool own_context; - bool cancelled; - bool caught_exception; - bool my_is_active; - task_list_type my_reset_task_list; - - graph_node *my_nodes, *my_nodes_last; - - spin_mutex nodelist_mutex; - void register_node(graph_node *n); - void remove_node(graph_node *n); - - template < typename Input, typename Output, typename Policy, typename Allocator > - friend class async_node; - task_arena* my_task_arena; -}; // class graph +using namespace internal::graph_policy_namespace; template graph_iterator::graph_iterator(C *g, bool begin) : my_graph(g), current_node(NULL) @@ -954,36 +759,57 @@ void graph_iterator::internal_forward() { if (current_node) current_node = current_node->next; } -//! The base of all graph nodes. -class graph_node : tbb::internal::no_copy { - friend class graph; - template - friend class graph_iterator; -protected: - graph& my_graph; - graph_node *next, *prev; -public: - explicit graph_node(graph& g) : my_graph(g) { - my_graph.register_node(this); - } - virtual ~graph_node() { - my_graph.remove_node(this); - } +} // namespace interfaceX -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - virtual void set_name( const char *name ) = 0; -#endif +namespace interface10 { +//! Constructs a graph with isolated task_group_context +inline graph::graph() : my_nodes(NULL), my_nodes_last(NULL), my_task_arena(NULL) { + prepare_task_arena(); + own_context = true; + cancelled = false; + caught_exception = false; + my_context = new task_group_context(tbb::internal::FLOW_TASKS); + my_root_task = (new (task::allocate_root(*my_context)) empty_task); + my_root_task->set_ref_count(1); + tbb::internal::fgt_graph(this); + my_is_active = true; +} -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - virtual void extract( ) = 0; -#endif +inline graph::graph(task_group_context& use_this_context) : + my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL), my_task_arena(NULL) { + prepare_task_arena(); + own_context = false; + cancelled = false; + caught_exception = false; + my_root_task = (new (task::allocate_root(*my_context)) empty_task); + my_root_task->set_ref_count(1); + tbb::internal::fgt_graph(this); + my_is_active = true; +} -protected: - // performs the reset on an individual node. - virtual void reset_node(reset_flags f=rf_reset_protocol) = 0; -}; // class graph_node +inline graph::~graph() { + wait_for_all(); + my_root_task->set_ref_count(0); + tbb::task::destroy(*my_root_task); + if (own_context) delete my_context; + delete my_task_arena; +} + +inline void graph::reserve_wait() { + if (my_root_task) { + my_root_task->increment_ref_count(); + tbb::internal::fgt_reserve_wait(this); + } +} + +inline void graph::release_wait() { + if (my_root_task) { + tbb::internal::fgt_release_wait(this); + my_root_task->decrement_ref_count(); + } +} -inline void graph::register_node(graph_node *n) { +inline void graph::register_node(tbb::flow::interface11::graph_node *n) { n->next = NULL; { spin_mutex::scoped_lock lock(nodelist_mutex); @@ -994,7 +820,7 @@ inline void graph::register_node(graph_node *n) { } } -inline void graph::remove_node(graph_node *n) { +inline void graph::remove_node(tbb::flow::interface11::graph_node *n) { { spin_mutex::scoped_lock lock(nodelist_mutex); __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error: no registered nodes"); @@ -1006,30 +832,65 @@ inline void graph::remove_node(graph_node *n) { n->prev = n->next = NULL; } -inline void graph::reset( reset_flags f ) { +inline void graph::reset( tbb::flow::interface11::reset_flags f ) { // reset context - set_active(false); + tbb::flow::interface11::internal::deactivate_graph(*this); + if(my_context) my_context->reset(); cancelled = false; caught_exception = false; // reset all the nodes comprising the graph for(iterator ii = begin(); ii != end(); ++ii) { - graph_node *my_p = &(*ii); + tbb::flow::interface11::graph_node *my_p = &(*ii); my_p->reset_node(f); } // Reattach the arena. Might be useful to run the graph in a particular task_arena // while not limiting graph lifetime to a single task_arena::execute() call. prepare_task_arena( /*reinit=*/true ); - set_active(true); + tbb::flow::interface11::internal::activate_graph(*this); // now spawn the tasks necessary to start the graph for(task_list_type::iterator rti = my_reset_task_list.begin(); rti != my_reset_task_list.end(); ++rti) { - FLOW_SPAWN(*(*rti)); + tbb::flow::interface11::internal::spawn_in_graph_arena(*this, *(*rti)); } my_reset_task_list.clear(); } +inline graph::iterator graph::begin() { return iterator(this, true); } + +inline graph::iterator graph::end() { return iterator(this, false); } + +inline graph::const_iterator graph::begin() const { return const_iterator(this, true); } + +inline graph::const_iterator graph::end() const { return const_iterator(this, false); } + +inline graph::const_iterator graph::cbegin() const { return const_iterator(this, true); } + +inline graph::const_iterator graph::cend() const { return const_iterator(this, false); } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE +inline void graph::set_name(const char *name) { + tbb::internal::fgt_graph_desc(this, name); +} +#endif + +} // namespace interface10 + +namespace interface11 { + +inline graph_node::graph_node(graph& g) : my_graph(g) { + my_graph.register_node(this); +} + +inline graph_node::~graph_node() { + my_graph.remove_node(this); +} + #include "internal/_flow_graph_node_impl.h" +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +using internal::node_set; +#endif + //! An executable node that acts as a source, i.e. it has no predecessors template < typename Output > class source_node : public graph_node, public sender< Output > { @@ -1043,33 +904,41 @@ class source_node : public graph_node, public sender< Output > { //Source node has no input type typedef null_type input_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename sender::built_successors_type built_successors_type; typedef typename sender::successor_list_type successor_list_type; #endif //! Constructor for a node with a successor template< typename Body > - source_node( graph &g, Body body, bool is_active = true ) + __TBB_NOINLINE_SYM source_node( graph &g, Body body, bool is_active = true ) : graph_node(g), my_active(is_active), init_my_active(is_active), my_body( new internal::source_body_leaf< output_type, Body>(body) ), my_init_body( new internal::source_body_leaf< output_type, Body>(body) ), my_reserved(false), my_has_cached_item(false) { my_successors.set_owner(this); - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, static_cast *>(this), this->my_body ); } +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + source_node( const node_set& successors, Body body, bool is_active = true ) + : source_node(successors.graph_reference(), body, is_active) { + make_edges(*this, successors); + } +#endif + //! Copy constructor - source_node( const source_node& src ) : + __TBB_NOINLINE_SYM source_node( const source_node& src ) : graph_node(src.my_graph), sender(), my_active(src.init_my_active), init_my_active(src.init_my_active), my_body( src.my_init_body->clone() ), my_init_body(src.my_init_body->clone() ), my_reserved(false), my_has_cached_item(false) { my_successors.set_owner(this); - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, + tbb::internal::fgt_node_with_body(CODEPTR(), tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, static_cast *>(this), this->my_body ); } @@ -1098,7 +967,7 @@ class source_node : public graph_node, public sender< Output > { return true; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } @@ -1121,7 +990,7 @@ class source_node : public graph_node, public sender< Output > { spin_mutex::scoped_lock l(my_mutex); my_successors.copy_successors(v); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ //! Request an item from the node bool try_get( output_type &v ) __TBB_override { @@ -1183,7 +1052,7 @@ class source_node : public graph_node, public sender< Output > { void activate() { spin_mutex::scoped_lock lock(my_mutex); my_active = true; - if ( !my_successors.empty() ) + if (!my_successors.empty()) spawn_put(); } @@ -1193,7 +1062,7 @@ class source_node : public graph_node, public sender< Output > { return dynamic_cast< internal::source_body_leaf & >(body_ref).get_body(); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract( ) __TBB_override { my_successors.built_successors().sender_extract(*this); // removes "my_owner" == this from each successor my_active = init_my_active; @@ -1218,7 +1087,7 @@ class source_node : public graph_node, public sender< Output > { my_body = tmp; } if(my_active) - this->my_graph.add_task_to_reset_list(create_put_task()); + internal::add_task_to_graph_reset_list(this->my_graph, create_put_task()); } private: @@ -1266,8 +1135,8 @@ class source_node : public graph_node, public sender< Output > { //! Spawns a task that applies the body void spawn_put( ) { - if(this->my_graph.is_active()) { - FLOW_SPAWN( *create_put_task()); + if(internal::is_graph_active(this->my_graph)) { + internal::spawn_in_graph_arena(this->my_graph, *create_put_task()); } } @@ -1287,51 +1156,67 @@ class source_node : public graph_node, public sender< Output > { } }; // class source_node -template -struct allocate_buffer { - static const bool value = false; -}; - -template<> -struct allocate_buffer { - static const bool value = true; -}; - //! Implements a function node that supports Input -> Output template < typename Input, typename Output = continue_msg, typename Policy = queueing, typename Allocator=cache_aligned_allocator > -class function_node : public graph_node, public internal::function_input, public internal::function_output { +class function_node : public graph_node, public internal::function_input, public internal::function_output { public: typedef Input input_type; typedef Output output_type; - typedef internal::function_input fInput_type; + typedef internal::function_input input_impl_type; typedef internal::function_input_queue input_queue_type; typedef internal::function_output fOutput_type; - typedef typename fInput_type::predecessor_type predecessor_type; + typedef typename input_impl_type::predecessor_type predecessor_type; typedef typename fOutput_type::successor_type successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef typename fInput_type::predecessor_list_type predecessor_list_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename input_impl_type::predecessor_list_type predecessor_list_type; typedef typename fOutput_type::successor_list_type successor_list_type; #endif - using fInput_type::my_predecessors; + using input_impl_type::my_predecessors; //! Constructor // input_queue_type is allocated here, but destroyed in the function_input_base. // TODO: pass the graph_buffer_policy to the function_input_base so it can all // be done in one place. This would be an interface-breaking change. template< typename Body > - function_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), fInput_type(g, concurrency, body, allocate_buffer::value ? - new input_queue_type( ) : NULL ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, + __TBB_NOINLINE_SYM function_node( graph &g, size_t concurrency, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority )) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Body body, node_priority_t priority = tbb::flow::internal::no_priority )) +#endif + : graph_node(g), input_impl_type(g, concurrency, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)), + fOutput_type(g) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this), this->my_body ); } +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + function_node( graph& g, size_t concurrency, Body body, node_priority_t priority ) + : function_node(g, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + function_node( const node_set& nodes, size_t concurrency, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority )) + : function_node(nodes.graph_reference(), concurrency, body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + function_node( const node_set& nodes, size_t concurrency, Body body, node_priority_t priority ) + : function_node(nodes, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + //! Copy constructor - function_node( const function_node& src ) : + __TBB_NOINLINE_SYM function_node( const function_node& src ) : graph_node(src.my_graph), - fInput_type(src, allocate_buffer::value ? new input_queue_type : NULL), - fOutput_type() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, + input_impl_type(src), + fOutput_type(src.my_graph) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this), this->my_body ); } @@ -1341,7 +1226,7 @@ class function_node : public graph_node, public internal::function_input friend class run_and_put_task; template friend class internal::broadcast_cache; template friend class internal::round_robin_cache; - using fInput_type::try_put_task; + using input_impl_type::try_put_task; internal::broadcast_cache &successors () __TBB_override { return fOutput_type::my_successors; } void reset_node(reset_flags f) __TBB_override { - fInput_type::reset_function_input(f); + input_impl_type::reset_function_input(f); // TODO: use clear() instead. if(f & rf_clear_edges) { successors().clear(); @@ -1382,6 +1267,7 @@ class multifunction_node : internal::multifunction_output, // wrap this around each element Output // the tuple providing the types >::type, + Policy, Allocator > { protected: @@ -1390,23 +1276,52 @@ class multifunction_node : typedef Input input_type; typedef null_type output_type; typedef typename internal::wrap_tuple_elements::type output_ports_type; - typedef internal::multifunction_input fInput_type; + typedef internal::multifunction_input input_impl_type; typedef internal::function_input_queue input_queue_type; private: - typedef typename internal::multifunction_input base_type; - using fInput_type::my_predecessors; + typedef typename internal::multifunction_input base_type; + using input_impl_type::my_predecessors; public: template - multifunction_node( graph &g, size_t concurrency, Body body ) : - graph_node(g), base_type(g,concurrency, body, allocate_buffer::value ? new input_queue_type : NULL) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, - &this->my_graph, static_cast *>(this), - this->output_ports(), this->my_body ); - } - - multifunction_node( const multifunction_node &other) : - graph_node(other.my_graph), base_type(other, allocate_buffer::value ? new input_queue_type : NULL) { - tbb::internal::fgt_multioutput_node_with_body( tbb::internal::FLOW_MULTIFUNCTION_NODE, + __TBB_NOINLINE_SYM multifunction_node( + graph &g, size_t concurrency, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority ) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body body, node_priority_t priority = tbb::flow::internal::no_priority) +#endif + ) : graph_node(g), base_type(g, concurrency, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)) { + tbb::internal::fgt_multioutput_node_with_body( + CODEPTR(), tbb::internal::FLOW_MULTIFUNCTION_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body + ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + __TBB_NOINLINE_SYM multifunction_node(graph& g, size_t concurrency, Body body, node_priority_t priority) + : multifunction_node(g, concurrency, body, Policy(), priority) {} +#endif // TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM multifunction_node(const node_set& nodes, size_t concurrency, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority)) + : multifunction_node(nodes.graph_reference(), concurrency, body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + __TBB_NOINLINE_SYM multifunction_node(const node_set& nodes, size_t concurrency, Body body, node_priority_t priority) + : multifunction_node(nodes, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + __TBB_NOINLINE_SYM multifunction_node( const multifunction_node &other) : + graph_node(other.my_graph), base_type(other) { + tbb::internal::fgt_multioutput_node_with_body( CODEPTR(), tbb::internal::FLOW_MULTIFUNCTION_NODE, &this->my_graph, static_cast *>(this), this->output_ports(), this->my_body ); } @@ -1417,7 +1332,7 @@ class multifunction_node : } #endif -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract( ) __TBB_override { my_predecessors.built_predecessors().receiver_extract(*this); base_type::extract(); @@ -1437,7 +1352,7 @@ class split_node : public graph_node, public receiver { public: typedef TupleType input_type; typedef Allocator allocator_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename base_type::predecessor_type predecessor_type; typedef typename base_type::predecessor_list_type predecessor_list_type; typedef internal::predecessor_cache predecessor_cache_type; @@ -1450,14 +1365,26 @@ class split_node : public graph_node, public receiver { TupleType // the tuple providing the types >::type output_ports_type; - explicit split_node(graph &g) : graph_node(g) + __TBB_NOINLINE_SYM explicit split_node(graph &g) + : graph_node(g), + my_output_ports(internal::init_output_ports::call(g, my_output_ports)) { - tbb::internal::fgt_multioutput_node(tbb::internal::FLOW_SPLIT_NODE, &this->my_graph, + tbb::internal::fgt_multioutput_node(CODEPTR(), tbb::internal::FLOW_SPLIT_NODE, &this->my_graph, static_cast *>(this), this->output_ports()); } - split_node( const split_node & other) : graph_node(other.my_graph), base_type(other) + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM split_node(const node_set& nodes) : split_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM split_node(const split_node& other) + : graph_node(other.my_graph), base_type(other), + my_output_ports(internal::init_output_ports::call(other.my_graph, my_output_ports)) { - tbb::internal::fgt_multioutput_node(tbb::internal::FLOW_SPLIT_NODE, &this->my_graph, + tbb::internal::fgt_multioutput_node(CODEPTR(), tbb::internal::FLOW_SPLIT_NODE, &this->my_graph, static_cast *>(this), this->output_ports()); } @@ -1471,11 +1398,9 @@ class split_node : public graph_node, public receiver { protected: task *try_put_task(const TupleType& t) __TBB_override { - // Sending split messages in parallel is not justified, as overheads would prevail - internal::emit_element::emit_this(t, output_ports()); - - //we do not have successors here.So we just tell the task is successful. - return SUCCESSFULLY_ENQUEUED; + // Sending split messages in parallel is not justified, as overheads would prevail. + // Also, we do not have successors here. So we just tell the task returned here is successful. + return internal::emit_element::emit_this(this->my_graph, t, output_ports()); } void reset_node(reset_flags f) __TBB_override { if (f & rf_clear_edges) @@ -1484,8 +1409,10 @@ class split_node : public graph_node, public receiver { __TBB_ASSERT(!(f & rf_clear_edges) || internal::clear_element::this_empty(my_output_ports), "split_node reset failed"); } void reset_receiver(reset_flags /*f*/) __TBB_override {} - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES + graph& graph_reference() const __TBB_override { + return my_graph; + } +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION private: //! split_node doesn't use this "predecessors" functionality; so, we have "dummies" here; void extract() __TBB_override {} @@ -1503,47 +1430,105 @@ class split_node : public graph_node, public receiver { //! dummy member built_predecessors_type my_predessors; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ private: output_ports_type my_output_ports; }; //! Implements an executable node that supports continue_msg -> Output -template -class continue_node : public graph_node, public internal::continue_input, public internal::function_output { +template > +class continue_node : public graph_node, public internal::continue_input, + public internal::function_output { public: typedef continue_msg input_type; typedef Output output_type; - typedef internal::continue_input fInput_type; + typedef internal::continue_input input_impl_type; typedef internal::function_output fOutput_type; - typedef typename fInput_type::predecessor_type predecessor_type; + typedef typename input_impl_type::predecessor_type predecessor_type; typedef typename fOutput_type::successor_type successor_type; //! Constructor for executable node with continue_msg -> Output template - continue_node( graph &g, Body body ) : - graph_node(g), internal::continue_input( g, body ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, + __TBB_NOINLINE_SYM continue_node( + graph &g, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority ) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Body body, node_priority_t priority = tbb::flow::internal::no_priority ) +#endif + ) : graph_node(g), input_impl_type( g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority) ), + fOutput_type(g) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this), this->my_body ); } +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + continue_node( graph& g, Body body, node_priority_t priority ) + : continue_node(g, body, Policy(), priority) {} +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + continue_node( const node_set& nodes, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority)) + : continue_node(nodes.graph_reference(), body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority) ) { + make_edges_in_order(nodes, *this); + } +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + continue_node( const node_set& nodes, Body body, node_priority_t priority) + : continue_node(nodes, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET //! Constructor for executable node with continue_msg -> Output template - continue_node( graph &g, int number_of_predecessors, Body body ) : - graph_node(g), internal::continue_input( g, number_of_predecessors, body ) { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, + __TBB_NOINLINE_SYM continue_node( + graph &g, int number_of_predecessors, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority ) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Body body, node_priority_t priority = tbb::flow::internal::no_priority ) +#endif + ) : graph_node(g) + , input_impl_type(g, number_of_predecessors, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)), + fOutput_type(g) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this), this->my_body ); } +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + continue_node( graph& g, int number_of_predecessors, Body body, node_priority_t priority) + : continue_node(g, number_of_predecessors, body, Policy(), priority) {} +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + continue_node( const node_set& nodes, int number_of_predecessors, + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority )) + : continue_node(nodes.graph_reference(), number_of_predecessors, body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + continue_node( const node_set& nodes, int number_of_predecessors, + Body body, node_priority_t priority ) + : continue_node(nodes, number_of_predecessors, body, Policy(), priority) {} +#endif +#endif + //! Copy constructor - continue_node( const continue_node& src ) : - graph_node(src.my_graph), internal::continue_input(src), - internal::function_output() { - tbb::internal::fgt_node_with_body( tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, + __TBB_NOINLINE_SYM continue_node( const continue_node& src ) : + graph_node(src.my_graph), input_impl_type(src), + internal::function_output(src.my_graph) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this), this->my_body ); } @@ -1554,9 +1539,9 @@ class continue_node : public graph_node, public internal::continue_input } #endif -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract() __TBB_override { - fInput_type::my_built_predecessors.receiver_extract(*this); + input_impl_type::my_built_predecessors.receiver_extract(*this); successors().built_successors().sender_extract(*this); } #endif @@ -1565,220 +1550,16 @@ class continue_node : public graph_node, public internal::continue_input template< typename R, typename B > friend class run_and_put_task; template friend class internal::broadcast_cache; template friend class internal::round_robin_cache; - using fInput_type::try_put_task; + using input_impl_type::try_put_task; internal::broadcast_cache &successors () __TBB_override { return fOutput_type::my_successors; } void reset_node(reset_flags f) __TBB_override { - fInput_type::reset_receiver(f); + input_impl_type::reset_receiver(f); if(f & rf_clear_edges)successors().clear(); __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "continue_node not reset"); } }; // continue_node -template< typename T > -class overwrite_node : public graph_node, public receiver, public sender { -public: - typedef T input_type; - typedef T output_type; - typedef typename receiver::predecessor_type predecessor_type; - typedef typename sender::successor_type successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef typename receiver::built_predecessors_type built_predecessors_type; - typedef typename sender::built_successors_type built_successors_type; - typedef typename receiver::predecessor_list_type predecessor_list_type; - typedef typename sender::successor_list_type successor_list_type; -#endif - - explicit overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - // Copy constructor; doesn't take anything from src; default won't work - overwrite_node( const overwrite_node& src ) : - graph_node(src.my_graph), receiver(), sender(), my_buffer_is_valid(false) - { - my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(this) ); - } - - ~overwrite_node() {} - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) __TBB_override { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - - bool register_successor( successor_type &s ) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - if (my_buffer_is_valid && this->my_graph.is_active()) { - // We have a valid value that must be forwarded immediately. - if ( s.try_put( my_buffer ) || !s.register_predecessor( *this ) ) { - // We add the successor: it accepted our put or it rejected it but won't let us become a predecessor - my_successors.register_successor( s ); - } else { - // We don't add the successor: it rejected our put and we became its predecessor instead - return false; - } - } else { - // No valid value yet, just add as successor - my_successors.register_successor( s ); - } - return true; - } - - bool remove_successor( successor_type &s ) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.remove_successor(s); - return true; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } - built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } - - void internal_add_built_successor( successor_type &s) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.internal_add_built_successor(s); - } - - void internal_delete_built_successor( successor_type &s) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.internal_delete_built_successor(s); - } - - size_t successor_count() __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - return my_successors.successor_count(); - } - - void copy_successors(successor_list_type &v) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - my_successors.copy_successors(v); - } - - void internal_add_built_predecessor( predecessor_type &p) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.add_edge(p); - } - - void internal_delete_built_predecessor( predecessor_type &p) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.delete_edge(p); - } - - size_t predecessor_count() __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - return my_built_predecessors.edge_count(); - } - - void copy_predecessors(predecessor_list_type &v) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - my_built_predecessors.copy_edges(v); - } - - void extract() __TBB_override { - my_buffer_is_valid = false; - built_successors().sender_extract(*this); - built_predecessors().receiver_extract(*this); - } - -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - bool try_get( input_type &v ) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - if ( my_buffer_is_valid ) { - v = my_buffer; - return true; - } - return false; - } - - bool is_valid() { - spin_mutex::scoped_lock l( my_mutex ); - return my_buffer_is_valid; - } - - void clear() { - spin_mutex::scoped_lock l( my_mutex ); - my_buffer_is_valid = false; - } - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - task * try_put_task( const input_type &v ) __TBB_override { - spin_mutex::scoped_lock l( my_mutex ); - return try_put_task_impl(v); - } - - task * try_put_task_impl(const input_type &v) { - my_buffer = v; - my_buffer_is_valid = true; - task * rtask = my_successors.try_put_task(v); - if (!rtask) rtask = SUCCESSFULLY_ENQUEUED; - return rtask; - } - - spin_mutex my_mutex; - internal::broadcast_cache< input_type, null_rw_mutex > my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - internal::edge_container my_built_predecessors; -#endif - input_type my_buffer; - bool my_buffer_is_valid; - void reset_receiver(reset_flags /*f*/) __TBB_override {} - - void reset_node( reset_flags f) __TBB_override { - my_buffer_is_valid = false; - if (f&rf_clear_edges) { - my_successors.clear(); - } - } -}; // overwrite_node - -template< typename T > -class write_once_node : public overwrite_node { -public: - typedef T input_type; - typedef T output_type; - typedef typename receiver::predecessor_type predecessor_type; - typedef typename sender::successor_type successor_type; - - //! Constructor - explicit write_once_node(graph& g) : overwrite_node(g) { - tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - - //! Copy constructor: call base class copy constructor - write_once_node( const write_once_node& src ) : overwrite_node(src) { - tbb::internal::fgt_node( tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), - static_cast *>(this), - static_cast *>(this) ); - } - -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) __TBB_override { - tbb::internal::fgt_node_desc( this, name ); - } -#endif - -protected: - template< typename R, typename B > friend class run_and_put_task; - template friend class internal::broadcast_cache; - template friend class internal::round_robin_cache; - task *try_put_task( const T &v ) __TBB_override { - spin_mutex::scoped_lock l( this->my_mutex ); - return this->my_buffer_is_valid ? NULL : this->try_put_task_impl(v); - } -}; - //! Forwards messages of type T to all successors template class broadcast_node : public graph_node, public receiver, public sender { @@ -1787,30 +1568,37 @@ class broadcast_node : public graph_node, public receiver, public sender { typedef T output_type; typedef typename receiver::predecessor_type predecessor_type; typedef typename sender::successor_type successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename receiver::predecessor_list_type predecessor_list_type; typedef typename sender::successor_list_type successor_list_type; #endif private: internal::broadcast_cache my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION internal::edge_container my_built_predecessors; spin_mutex pred_mutex; // serialize accesses on edge_container #endif public: - explicit broadcast_node(graph& g) : graph_node(g) { + __TBB_NOINLINE_SYM explicit broadcast_node(graph& g) : graph_node(g) { my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this) ); } +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + broadcast_node(const node_set& nodes) : broadcast_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - broadcast_node( const broadcast_node& src ) : + __TBB_NOINLINE_SYM broadcast_node( const broadcast_node& src ) : graph_node(src.my_graph), receiver(), sender() { my_successors.set_owner( this ); - tbb::internal::fgt_node( tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this) ); } @@ -1832,7 +1620,7 @@ class broadcast_node : public graph_node, public receiver, public sender { return true; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename sender::built_successors_type built_successors_type; built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } @@ -1855,7 +1643,7 @@ class broadcast_node : public graph_node, public receiver, public sender { typedef typename receiver::built_predecessors_type built_predecessors_type; - built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } void internal_add_built_predecessor( predecessor_type &p) __TBB_override { spin_mutex::scoped_lock l(pred_mutex); @@ -1881,7 +1669,7 @@ class broadcast_node : public graph_node, public receiver, public sender { my_built_predecessors.receiver_extract(*this); my_successors.built_successors().sender_extract(*this); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: template< typename R, typename B > friend class run_and_put_task; @@ -1894,12 +1682,16 @@ class broadcast_node : public graph_node, public receiver, public sender { return new_task; } + graph& graph_reference() const __TBB_override { + return my_graph; + } + void reset_receiver(reset_flags /*f*/) __TBB_override {} void reset_node(reset_flags f) __TBB_override { if (f&rf_clear_edges) { my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION my_built_predecessors.clear(); #endif } @@ -1916,7 +1708,7 @@ class buffer_node : public graph_node, public internal::reservable_item_buffer::predecessor_type predecessor_type; typedef typename sender::successor_type successor_type; typedef buffer_node class_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename receiver::predecessor_list_type predecessor_list_type; typedef typename sender::successor_list_type successor_list_type; #endif @@ -1924,14 +1716,14 @@ class buffer_node : public graph_node, public internal::reservable_item_buffer my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION internal::edge_container my_built_predecessors; #endif friend class internal::forward_task_bypass< buffer_node< T, A > >; enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION , add_blt_succ, del_blt_succ, add_blt_pred, del_blt_pred, blt_succ_cnt, blt_pred_cnt, @@ -1943,7 +1735,7 @@ class buffer_node : public graph_node, public internal::reservable_item_buffer { public: char type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION task * ltask; union { input_type *elem; @@ -1960,7 +1752,7 @@ class buffer_node : public graph_node, public internal::reservable_item_buffer(&e)) #else , elem(const_cast(&e)) , ltask(NULL) @@ -1996,7 +1788,7 @@ class buffer_node : public graph_node, public internal::reservable_item_bufferorder(); if (try_forwarding && !forwarder_busy) { - if(this->my_graph.is_active()) { + if(internal::is_graph_active(this->my_graph)) { forwarder_busy = true; task *new_task = new(task::allocate_additional_child_of(*(this->my_graph.root_task()))) internal:: forward_task_bypass < buffer_node >(*this); // tmp should point to the last item handled by the aggregator. This is the operation // the handling thread enqueued. So modifying that record will be okay. + // workaround for icc bug tbb::task *z = tmp->ltask; - tmp->ltask = combine_tasks(z, new_task); // in case the op generated a task + graph &g = this->my_graph; + tmp->ltask = combine_tasks(g, z, new_task); // in case the op generated a task } } } // handle_operations @@ -2033,7 +1827,7 @@ class buffer_node : public graph_node, public internal::reservable_item_buffermy_graph; + last_task = combine_tasks(g, last_task, xtask); } while (op_data.status ==internal::SUCCEEDED); return last_task; } @@ -2065,7 +1862,7 @@ class buffer_node : public graph_node, public internal::reservable_item_bufferstatus, internal::SUCCEEDED); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename sender::built_successors_type built_successors_type; built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } @@ -2082,7 +1879,7 @@ class buffer_node : public graph_node, public internal::reservable_item_buffer::built_predecessors_type built_predecessors_type; - built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } virtual void internal_add_built_pred(buffer_operation *op) { my_built_predecessors.add_edge(*(op->p)); @@ -2114,7 +1911,7 @@ class buffer_node : public graph_node, public internal::reservable_item_bufferstatus, internal::SUCCEEDED); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ private: void order() {} @@ -2126,7 +1923,9 @@ class buffer_node : public graph_node, public internal::reservable_item_bufferback()); if (new_task) { - last_task = combine_tasks(last_task, new_task); + // workaround for icc bug + graph& g = this->my_graph; + last_task = combine_tasks(g, last_task, new_task); this->destroy_back(); } } @@ -2198,21 +1997,28 @@ class buffer_node : public graph_node, public internal::reservable_item_buffer(), + __TBB_NOINLINE_SYM explicit buffer_node( graph &g ) : graph_node(g), internal::reservable_item_buffer(), forwarder_busy(false) { my_successors.set_owner(this); my_aggregator.initialize_handler(handler_type(this)); - tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this) ); } +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + buffer_node(const node_set& nodes) : buffer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + //! Copy constructor - buffer_node( const buffer_node& src ) : graph_node(src.my_graph), + __TBB_NOINLINE_SYM buffer_node( const buffer_node& src ) : graph_node(src.my_graph), internal::reservable_item_buffer(), receiver(), sender() { forwarder_busy = false; my_successors.set_owner(this); my_aggregator.initialize_handler(handler_type(this)); - tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, static_cast *>(this), static_cast *>(this) ); } @@ -2236,7 +2042,7 @@ class buffer_node : public graph_node, public internal::reservable_item_buffer { void try_put_and_add_task(task*& last_task) { task *new_task = this->my_successors.try_put_task(this->front()); if (new_task) { - last_task = combine_tasks(last_task, new_task); + // workaround for icc bug + graph& graph_ref = this->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); this->destroy_front(); } } @@ -2450,15 +2262,22 @@ class queue_node : public buffer_node { typedef typename sender::successor_type successor_type; //! Constructor - explicit queue_node( graph &g ) : base_type(g) { - tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), + __TBB_NOINLINE_SYM explicit queue_node( graph &g ) : base_type(g) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), static_cast *>(this), static_cast *>(this) ); } +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + queue_node( const node_set& nodes) : queue_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + //! Copy constructor - queue_node( const queue_node& src) : base_type(src) { - tbb::internal::fgt_node( tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), + __TBB_NOINLINE_SYM queue_node( const queue_node& src) : base_type(src) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), static_cast *>(this), static_cast *>(this) ); } @@ -2489,17 +2308,25 @@ class sequencer_node : public queue_node { //! Constructor template< typename Sequencer > - sequencer_node( graph &g, const Sequencer& s ) : queue_node(g), + __TBB_NOINLINE_SYM sequencer_node( graph &g, const Sequencer& s ) : queue_node(g), my_sequencer(new internal::function_body_leaf< T, size_t, Sequencer>(s) ) { - tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), static_cast *>(this), static_cast *>(this) ); } +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + sequencer_node( const node_set& nodes, const Sequencer& s) + : sequencer_node(nodes.graph_reference(), s) { + make_edges_in_order(nodes, *this); + } +#endif + //! Copy constructor - sequencer_node( const sequencer_node& src ) : queue_node(src), + __TBB_NOINLINE_SYM sequencer_node( const sequencer_node& src ) : queue_node(src), my_sequencer( src.my_sequencer->clone() ) { - tbb::internal::fgt_node( tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), static_cast *>(this), static_cast *>(this) ); } @@ -2553,15 +2380,24 @@ class priority_queue_node : public buffer_node { typedef typename sender::successor_type successor_type; //! Constructor - explicit priority_queue_node( graph &g ) : buffer_node(g), mark(0) { - tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), + __TBB_NOINLINE_SYM explicit priority_queue_node( graph &g, const Compare& comp = Compare() ) + : buffer_node(g), compare(comp), mark(0) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), static_cast *>(this), static_cast *>(this) ); } +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + priority_queue_node(const node_set& nodes, const Compare& comp = Compare()) + : priority_queue_node(nodes.graph_reference(), comp) { + make_edges_in_order(nodes, *this); + } +#endif + //! Copy constructor - priority_queue_node( const priority_queue_node &src ) : buffer_node(src), mark(0) { - tbb::internal::fgt_node( tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), + __TBB_NOINLINE_SYM priority_queue_node( const priority_queue_node &src ) : buffer_node(src), mark(0) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), static_cast *>(this), static_cast *>(this) ); } @@ -2652,7 +2488,9 @@ class priority_queue_node : public buffer_node { void try_put_and_add_task(task*& last_task) { task * new_task = this->my_successors.try_put_task(this->prio()); if (new_task) { - last_task = combine_tasks(last_task, new_task); + // workaround for icc bug + graph& graph_ref = this->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); prio_pop(); } } @@ -2750,18 +2588,22 @@ class priority_queue_node : public buffer_node { } }; // priority_queue_node +} // interfaceX + +namespace interface11 { + //! Forwards messages only if the threshold has not been reached /** This node forwards items until its threshold is reached. It contains no buffering. If the downstream node rejects, the message is dropped. */ -template< typename T > +template< typename T, typename DecrementType=continue_msg > class limiter_node : public graph_node, public receiver< T >, public sender< T > { public: typedef T input_type; typedef T output_type; typedef typename receiver::predecessor_type predecessor_type; typedef typename sender::successor_type successor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename receiver::built_predecessors_type built_predecessors_type; typedef typename sender::built_successors_type built_successors_type; typedef typename receiver::predecessor_list_type predecessor_list_type; @@ -2776,12 +2618,12 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > internal::reservable_predecessor_cache< T, spin_mutex > my_predecessors; spin_mutex my_mutex; internal::broadcast_cache< T > my_successors; - int init_decrement_predecessors; + __TBB_DEPRECATED_LIMITER_EXPR( int init_decrement_predecessors; ) - friend class internal::forward_task_bypass< limiter_node >; + friend class internal::forward_task_bypass< limiter_node >; // Let decrementer call decrement_counter() - friend class internal::decrementer< limiter_node >; + friend class internal::decrementer< limiter_node, DecrementType >; bool check_conditions() { // always called under lock return ( my_count + my_tries < my_threshold && !my_predecessors.empty() && !my_successors.empty() ); @@ -2812,10 +2654,10 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > --my_tries; my_predecessors.try_consume(); if ( check_conditions() ) { - if ( this->my_graph.is_active() ) { + if ( internal::is_graph_active(this->my_graph) ) { task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) - internal::forward_task_bypass< limiter_node >( *this ); - FLOW_SPAWN (*rtask); + internal::forward_task_bypass< limiter_node >( *this ); + internal::spawn_in_graph_arena(graph_reference(), *rtask); } } } @@ -2830,9 +2672,9 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > --my_tries; if (reserved) my_predecessors.try_release(); if ( check_conditions() ) { - if ( this->my_graph.is_active() ) { + if ( internal::is_graph_active(this->my_graph) ) { task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) - internal::forward_task_bypass< limiter_node >( *this ); + internal::forward_task_bypass< limiter_node >( *this ); __TBB_ASSERT(!rval, "Have two tasks to handle"); return rtask; } @@ -2846,45 +2688,67 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > return; } - task * decrement_counter() { + task* decrement_counter( long long delta ) { { spin_mutex::scoped_lock lock(my_mutex); - if(my_count) --my_count; + if( delta > 0 && size_t(delta) > my_count ) + my_count = 0; + else if( delta < 0 && size_t(delta) > my_threshold - my_count ) + my_count = my_threshold; + else + my_count -= size_t(delta); // absolute value of delta is sufficiently small } return forward_task(); } -public: - //! The internal receiver< continue_msg > that decrements the count - internal::decrementer< limiter_node > decrement; - - //! Constructor - limiter_node(graph &g, size_t threshold, int num_decrement_predecessors=0) : - graph_node(g), my_threshold(threshold), my_count(0), my_tries(0), - init_decrement_predecessors(num_decrement_predecessors), - decrement(num_decrement_predecessors) - { + void initialize() { my_predecessors.set_owner(this); my_successors.set_owner(this); decrement.set_owner(this); - tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(&decrement), - static_cast *>(this) ); + tbb::internal::fgt_node( + CODEPTR(), tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(&decrement), + static_cast *>(this) + ); } +public: + //! The internal receiver< DecrementType > that decrements the count + internal::decrementer< limiter_node, DecrementType > decrement; - //! Copy constructor - limiter_node( const limiter_node& src ) : +#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR + __TBB_STATIC_ASSERT( (tbb::internal::is_same_type::value), + "Deprecated interface of the limiter node can be used only in conjunction " + "with continue_msg as the type of DecrementType template parameter." ); +#endif // Check for incompatible interface + + //! Constructor + limiter_node(graph &g, + __TBB_DEPRECATED_LIMITER_ARG2(size_t threshold, int num_decrement_predecessors=0)) + : graph_node(g), my_threshold(threshold), my_count(0), + __TBB_DEPRECATED_LIMITER_ARG4( + my_tries(0), decrement(), + init_decrement_predecessors(num_decrement_predecessors), + decrement(num_decrement_predecessors)) { + initialize(); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + limiter_node(const node_set& nodes, size_t threshold) + : limiter_node(nodes.graph_reference(), threshold) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + limiter_node( const limiter_node& src ) : graph_node(src.my_graph), receiver(), sender(), - my_threshold(src.my_threshold), my_count(0), my_tries(0), - init_decrement_predecessors(src.init_decrement_predecessors), - decrement(src.init_decrement_predecessors) - { - my_predecessors.set_owner(this); - my_successors.set_owner(this); - decrement.set_owner(this); - tbb::internal::fgt_node( tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, - static_cast *>(this), static_cast *>(&decrement), - static_cast *>(this) ); + my_threshold(src.my_threshold), my_count(0), + __TBB_DEPRECATED_LIMITER_ARG4( + my_tries(0), decrement(), + init_decrement_predecessors(src.init_decrement_predecessors), + decrement(src.init_decrement_predecessors)) { + initialize(); } #if TBB_PREVIEW_FLOW_GRAPH_TRACE @@ -2900,9 +2764,10 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > my_successors.register_successor(r); //spawn a forward task if this is the only successor if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) { - if ( this->my_graph.is_active() ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) - internal::forward_task_bypass < limiter_node >( *this ) ) ); + if ( internal::is_graph_active(this->my_graph) ) { + task* task = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal::forward_task_bypass < limiter_node >( *this ); + internal::spawn_in_graph_arena(graph_reference(), *task); } } return true; @@ -2916,7 +2781,7 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > return true; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } built_predecessors_type &built_predecessors() __TBB_override { return my_predecessors.built_predecessors(); } @@ -2954,15 +2819,16 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > my_predecessors.built_predecessors().receiver_extract(*this); decrement.built_predecessors().receiver_extract(decrement); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ //! Adds src to the list of cached predecessors. bool register_predecessor( predecessor_type &src ) __TBB_override { spin_mutex::scoped_lock lock(my_mutex); my_predecessors.add( src ); - if ( my_count + my_tries < my_threshold && !my_successors.empty() && this->my_graph.is_active() ) { - FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) - internal::forward_task_bypass < limiter_node >( *this ) ) ); + if ( my_count + my_tries < my_threshold && !my_successors.empty() && internal::is_graph_active(this->my_graph) ) { + task* task = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal::forward_task_bypass < limiter_node >( *this ); + internal::spawn_in_graph_arena(graph_reference(), *task); } return true; } @@ -2993,9 +2859,9 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > if ( !rtask ) { // try_put_task failed. spin_mutex::scoped_lock lock(my_mutex); --my_tries; - if ( check_conditions() && this->my_graph.is_active() ) { + if (check_conditions() && internal::is_graph_active(this->my_graph)) { rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) - internal::forward_task_bypass< limiter_node >( *this ); + internal::forward_task_bypass< limiter_node >( *this ); } } else { @@ -3006,6 +2872,8 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > return rtask; } + graph& graph_reference() const __TBB_override { return my_graph; } + void reset_receiver(reset_flags /*f*/) __TBB_override { __TBB_ASSERT(false,NULL); // should never be called } @@ -3042,12 +2910,20 @@ class join_node: public internal::unfolded_join_node( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, + __TBB_NOINLINE_SYM explicit join_node(graph &g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, reserving = reserving()) : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3067,12 +2943,20 @@ class join_node: public internal::unfolded_join_node( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, + __TBB_NOINLINE_SYM explicit join_node(graph &g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, queueing = queueing()) : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3098,76 +2982,94 @@ class join_node > : public internal::unfolde #if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING join_node(graph &g) : unfolded_type(g) {} + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + join_node(const node_set& nodes, key_matching = key_matching()) + : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + #endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) : + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) : unfolded_type(g, b0, b1, b2, b3, b4) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #if __TBB_VARIADIC_MAX >= 6 template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) : + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) : unfolded_type(g, b0, b1, b2, b3, b4, b5) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #endif #if __TBB_VARIADIC_MAX >= 7 template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) : + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #endif #if __TBB_VARIADIC_MAX >= 8 template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, __TBB_B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #endif #if __TBB_VARIADIC_MAX >= 9 template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, __TBB_B7 b7, __TBB_B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #endif #if __TBB_VARIADIC_MAX >= 10 template - join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, __TBB_B7 b7, __TBB_B8 b8, __TBB_B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #endif - join_node(const join_node &other) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, Bodies... bodies) + : join_node(nodes.graph_reference(), bodies...) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3196,18 +3098,26 @@ class indexer_node : public internal::unfolded_indexer_node > { typedef tuple InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { + void set_name( const char *name ) __TBB_override { tbb::internal::fgt_node_desc( this, name ); } #endif @@ -3221,18 +3131,26 @@ class indexer_node : public internal::unfolded_indexer_node InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { + void set_name( const char *name ) __TBB_override { tbb::internal::fgt_node_desc( this, name ); } #endif @@ -3246,18 +3164,26 @@ class indexer_node : public internal::unfolded_indexer_node InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } #if TBB_PREVIEW_FLOW_GRAPH_TRACE - void set_name( const char *name ) { + void set_name( const char *name ) __TBB_override { tbb::internal::fgt_node_desc( this, name ); } #endif @@ -3271,13 +3197,21 @@ class indexer_node : public internal::unfolded_indexer_node InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3296,13 +3230,21 @@ class indexer_node : public internal::unfolded_indexer_node< typedef tuple InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3322,13 +3264,21 @@ class indexer_node : public internal::unfolded_indexer_n typedef tuple InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3350,13 +3300,21 @@ class indexer_node : public internal::unfolded_index typedef tuple InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3379,12 +3337,20 @@ class indexer_node : public internal::unfolded_i typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3406,13 +3372,21 @@ class indexer_node : public internal::unfold typedef tuple InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3434,13 +3408,21 @@ class indexer_node/*default*/ : public internal::unfolded_indexer_node InputTuple; typedef typename internal::tagged_msg output_type; typedef typename internal::unfolded_indexer_node unfolded_type; - indexer_node(graph& g) : unfolded_type(g) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + // Copy constructor - indexer_node( const indexer_node& other ) : unfolded_type(other) { - tbb::internal::fgt_multiinput_node( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, this->input_ports(), static_cast< sender< output_type > *>(this) ); } @@ -3458,7 +3440,7 @@ inline void internal_make_edge( internal::untyped_sender &p, internal::untyped_r template< typename T > inline void internal_make_edge( sender &p, receiver &s ) { #endif -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION s.internal_add_built_predecessor(p); p.internal_add_built_successor(s); #endif @@ -3522,7 +3504,7 @@ template< typename T > inline void internal_remove_edge( sender &p, receiver &s ) { #endif p.remove_successor( s ); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION // TODO: should we try to remove p from the predecessor list of s, in case the edge is reversed? p.internal_delete_built_successor(s); s.internal_delete_built_predecessor(p); @@ -3577,7 +3559,7 @@ inline void remove_edge( sender& output, V& input) { } #endif -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION template template< typename S > void internal::edge_container::sender_extract( S &s ) { @@ -3595,7 +3577,7 @@ void internal::edge_container::receiver_extract( R &r ) { remove_edge(**i, r); } } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ //! Returns a copy of the body from a function or continue node template< typename Body, typename Node > @@ -3616,11 +3598,8 @@ class composite_node , tbb::flow::tuple&... > output_ports_type; private: -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - const char *my_type_name; -#endif - input_ports_type *my_input_ports; - output_ports_type *my_output_ports; + std::unique_ptr my_input_ports; + std::unique_ptr my_output_ports; static const size_t NUM_INPUTS = sizeof...(InputTypes); static const size_t NUM_OUTPUTS = sizeof...(OutputTypes); @@ -3630,37 +3609,32 @@ class composite_node , tbb::flow::tuplemy_graph ); + tbb::internal::fgt_multiinput_multioutput_node_desc( this, type_name ); } #else - composite_node( graph &g) : graph_node(g), my_input_ports(NULL), my_output_ports(NULL) {} + composite_node( graph &g ) : graph_node(g) { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_COMPOSITE_NODE, this, &this->my_graph ); + } #endif - template - void set_external_ports(T1&& input_ports_tuple, T2&& output_ports_tuple) { - __TBB_STATIC_ASSERT(NUM_INPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of input ports"); - __TBB_STATIC_ASSERT(NUM_OUTPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of output ports"); - my_input_ports = new input_ports_type(std::forward(input_ports_tuple)); - my_output_ports = new output_ports_type(std::forward(output_ports_tuple)); + template + void set_external_ports(T1&& input_ports_tuple, T2&& output_ports_tuple) { + __TBB_STATIC_ASSERT(NUM_INPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of input ports"); + __TBB_STATIC_ASSERT(NUM_OUTPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of output ports"); + my_input_ports = tbb::internal::make_unique(std::forward(input_ports_tuple)); + my_output_ports = tbb::internal::make_unique(std::forward(output_ports_tuple)); -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - tbb::internal::fgt_internal_input_helper::register_port( this, input_ports_tuple); - tbb::internal::fgt_internal_output_helper::register_port( this, output_ports_tuple); -#endif - } + tbb::internal::fgt_internal_input_alias_helper::alias_port( this, input_ports_tuple); + tbb::internal::fgt_internal_output_alias_helper::alias_port( this, output_ports_tuple); + } -#if TBB_PREVIEW_FLOW_GRAPH_TRACE template< typename... NodeTypes > void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); } template< typename... NodeTypes > void add_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); } -#else - template void add_nodes(Nodes&...) { } - template void add_visible_nodes(Nodes&...) { } -#endif #if TBB_PREVIEW_FLOW_GRAPH_TRACE void set_name( const char *name ) __TBB_override { @@ -3668,22 +3642,17 @@ class composite_node , tbb::flow::tuple, tbb::flow::tuple<> > : pu typedef tbb::flow::tuple< receiver&... > input_ports_type; private: -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - const char *my_type_name; -#endif - input_ports_type *my_input_ports; + std::unique_ptr my_input_ports; static const size_t NUM_INPUTS = sizeof...(InputTypes); protected: @@ -3708,35 +3674,30 @@ class composite_node , tbb::flow::tuple<> > : pu public: #if TBB_PREVIEW_FLOW_GRAPH_TRACE - composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g), my_type_name(type_name), my_input_ports(NULL) { - tbb::internal::itt_make_task_group( tbb::internal::ITT_DOMAIN_FLOW, this, tbb::internal::FLOW_NODE, &g, tbb::internal::FLOW_GRAPH, tbb::internal::FLOW_COMPOSITE_NODE ); - tbb::internal::fgt_multiinput_multioutput_node_desc( this, my_type_name ); + composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + tbb::internal::fgt_multiinput_multioutput_node_desc( this, type_name ); } #else - composite_node( graph &g) : graph_node(g), my_input_ports(NULL) {} + composite_node( graph &g ) : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + } #endif template void set_external_ports(T&& input_ports_tuple) { __TBB_STATIC_ASSERT(NUM_INPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of input ports"); - my_input_ports = new input_ports_type(std::forward(input_ports_tuple)); + my_input_ports = tbb::internal::make_unique(std::forward(input_ports_tuple)); -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - tbb::internal::fgt_internal_input_helper::register_port( this, std::forward(input_ports_tuple)); -#endif + tbb::internal::fgt_internal_input_alias_helper::alias_port( this, std::forward(input_ports_tuple)); } -#if TBB_PREVIEW_FLOW_GRAPH_TRACE template< typename... NodeTypes > void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); } template< typename... NodeTypes > void add_nodes( const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); } -#else - template void add_nodes(Nodes&...) {} - template void add_visible_nodes(Nodes&...) {} -#endif #if TBB_PREVIEW_FLOW_GRAPH_TRACE void set_name( const char *name ) __TBB_override { @@ -3744,16 +3705,12 @@ class composite_node , tbb::flow::tuple<> > : pu } #endif - input_ports_type input_ports() { + input_ports_type& input_ports() { __TBB_ASSERT(my_input_ports, "input ports not set, call set_external_ports to set input ports"); return *my_input_ports; } - virtual ~composite_node() { - if(my_input_ports) delete my_input_ports; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract() __TBB_override { __TBB_ASSERT(false, "Current composite_node implementation does not support extract"); } @@ -3768,10 +3725,7 @@ class composite_node , tbb::flow::tuple > : p typedef tbb::flow::tuple< sender&... > output_ports_type; private: -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - const char *my_type_name; -#endif - output_ports_type *my_output_ports; + std::unique_ptr my_output_ports; static const size_t NUM_OUTPUTS = sizeof...(OutputTypes); protected: @@ -3779,35 +3733,30 @@ class composite_node , tbb::flow::tuple > : p public: #if TBB_PREVIEW_FLOW_GRAPH_TRACE - composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g), my_type_name(type_name), my_output_ports(NULL) { - tbb::internal::itt_make_task_group( tbb::internal::ITT_DOMAIN_FLOW, this, tbb::internal::FLOW_NODE, &g, tbb::internal::FLOW_GRAPH, tbb::internal::FLOW_COMPOSITE_NODE ); - tbb::internal::fgt_multiinput_multioutput_node_desc( this, my_type_name ); + __TBB_NOINLINE_SYM composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + tbb::internal::fgt_multiinput_multioutput_node_desc( this, type_name ); } #else - composite_node( graph &g) : graph_node(g), my_output_ports(NULL) {} + __TBB_NOINLINE_SYM composite_node( graph &g ) : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + } #endif template void set_external_ports(T&& output_ports_tuple) { __TBB_STATIC_ASSERT(NUM_OUTPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of output ports"); - my_output_ports = new output_ports_type(std::forward(output_ports_tuple)); + my_output_ports = tbb::internal::make_unique(std::forward(output_ports_tuple)); -#if TBB_PREVIEW_FLOW_GRAPH_TRACE - tbb::internal::fgt_internal_output_helper::register_port( this, std::forward(output_ports_tuple)); -#endif + tbb::internal::fgt_internal_output_alias_helper::alias_port( this, std::forward(output_ports_tuple)); } -#if TBB_PREVIEW_FLOW_GRAPH_TRACE template void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); } template void add_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); } -#else - template void add_nodes(Nodes&...) {} - template void add_visible_nodes(Nodes&...) {} -#endif #if TBB_PREVIEW_FLOW_GRAPH_TRACE void set_name( const char *name ) __TBB_override { @@ -3815,16 +3764,12 @@ class composite_node , tbb::flow::tuple > : p } #endif - output_ports_type output_ports() { + output_ports_type& output_ports() { __TBB_ASSERT(my_output_ports, "output ports not set, call set_external_ports to set output ports"); return *my_output_ports; } - virtual ~composite_node() { - if(my_output_ports) delete my_output_ports; - } - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract() __TBB_override { __TBB_ASSERT(false, "Current composite_node implementation does not support extract"); } @@ -3856,7 +3801,7 @@ class async_body: public async_body_base { typedef async_body_base base_type; typedef Gateway gateway_type; - async_body(const Body &body, gateway_type *gateway) + async_body(const Body &body, gateway_type *gateway) : base_type(gateway), my_body(body) { } void operator()( const Input &v, Ports & ) { @@ -3869,13 +3814,18 @@ class async_body: public async_body_base { Body my_body; }; -} +} // namespace internal + +} // namespace interfaceX +namespace interface11 { //! Implements async node -template < typename Input, typename Output, typename Policy = queueing, typename Allocator=cache_aligned_allocator > +template < typename Input, typename Output, + typename Policy = queueing_lightweight, + typename Allocator=cache_aligned_allocator > class async_node : public multifunction_node< Input, tuple< Output >, Policy, Allocator >, public sender< Output > { typedef multifunction_node< Input, tuple< Output >, Policy, Allocator > base_type; - typedef typename internal::multifunction_input mfn_input_type; + typedef typename internal::multifunction_input mfn_input_type; public: typedef Input input_type; @@ -3885,11 +3835,13 @@ class async_node : public multifunction_node< Input, tuple< Output >, Policy, Al typedef typename sender::successor_type successor_type; typedef receiver_gateway gateway_type; typedef internal::async_body_base async_body_base_type; + typedef typename base_type::output_ports_type output_ports_type; private: struct try_put_functor { typedef internal::multifunction_output output_port_type; output_port_type *port; + // TODO: pass value by copy since we do not want to block asynchronous thread. const Output *value; bool result; try_put_functor(output_port_type &p, const Output &v) : port(&p), value(&v), result(false) { } @@ -3902,8 +3854,8 @@ class async_node : public multifunction_node< Input, tuple< Output >, Policy, Al public: receiver_gateway_impl(async_node* node): my_node(node) {} void reserve_wait() __TBB_override { - my_node->my_graph.reserve_wait(); tbb::internal::fgt_async_reserve(static_cast(my_node), &my_node->my_graph); + my_node->my_graph.reserve_wait(); } void release_wait() __TBB_override { @@ -3926,29 +3878,69 @@ class async_node : public multifunction_node< Input, tuple< Output >, Policy, Al //! Implements gateway_type::try_put for an external activity to submit a message to FG bool try_put_impl(const Output &i) { internal::multifunction_output &port_0 = internal::output_port<0>(*this); + internal::broadcast_cache& port_successors = port_0.successors(); tbb::internal::fgt_async_try_put_begin(this, &port_0); - __TBB_ASSERT(this->my_graph.my_task_arena && this->my_graph.my_task_arena->is_active(), NULL); - try_put_functor tpf(port_0, i); - this->my_graph.my_task_arena->execute(tpf); + task_list tasks; + bool is_at_least_one_put_successful = port_successors.gather_successful_try_puts(i, tasks); + __TBB_ASSERT( is_at_least_one_put_successful || tasks.empty(), + "Return status is inconsistent with the method operation." ); + + while( !tasks.empty() ) { + internal::enqueue_in_graph_arena(this->my_graph, tasks.pop_front()); + } tbb::internal::fgt_async_try_put_end(this, &port_0); - return tpf.result; + return is_at_least_one_put_successful; } public: template - async_node( graph &g, size_t concurrency, Body body ) : - base_type( g, concurrency, internal::async_body(body, &my_gateway) ), my_gateway(self()) { - tbb::internal::fgt_multioutput_node<1>( tbb::internal::FLOW_ASYNC_NODE, - &this->my_graph, - static_cast *>(this), - this->output_ports() ); - } - - async_node( const async_node &other ) : base_type(other), sender(), my_gateway(self()) { + __TBB_NOINLINE_SYM async_node( + graph &g, size_t concurrency, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body body, node_priority_t priority = tbb::flow::internal::no_priority) +#endif + ) : base_type( + g, concurrency, + internal::async_body + (body, &my_gateway) __TBB_FLOW_GRAPH_PRIORITY_ARG0(priority) ), my_gateway(self()) { + tbb::internal::fgt_multioutput_node_with_body<1>( + CODEPTR(), tbb::internal::FLOW_ASYNC_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body + ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + __TBB_NOINLINE_SYM async_node(graph& g, size_t concurrency, Body body, node_priority_t priority) + : async_node(g, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM async_node( + const node_set& nodes, size_t concurrency, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority) + ) : async_node(nodes.graph_reference(), concurrency, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + __TBB_NOINLINE_SYM async_node(const node_set& nodes, size_t concurrency, Body body, node_priority_t priority) + : async_node(nodes, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + __TBB_NOINLINE_SYM async_node( const async_node &other ) : base_type(other), sender(), my_gateway(self()) { static_cast(this->my_body->get_body_ptr())->set_gateway(&my_gateway); static_cast(this->my_init_body->get_body_ptr())->set_gateway(&my_gateway); - tbb::internal::fgt_multioutput_node<1>( tbb::internal::FLOW_ASYNC_NODE, &this->my_graph, static_cast *>(this), this->output_ports() ); + tbb::internal::fgt_multioutput_node_with_body<1>( CODEPTR(), tbb::internal::FLOW_ASYNC_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body ); } gateway_type& gateway() { @@ -3957,7 +3949,7 @@ class async_node : public multifunction_node< Input, tuple< Output >, Policy, Al #if TBB_PREVIEW_FLOW_GRAPH_TRACE void set_name( const char *name ) __TBB_override { - tbb::internal::fgt_node_desc( this, name ); + tbb::internal::fgt_multioutput_node_desc( this, name ); } #endif @@ -3982,7 +3974,7 @@ class async_node : public multifunction_node< Input, tuple< Output >, Policy, Al return ab.get_body(); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION //! interface to record edges for traversal & deletion typedef typename internal::edge_container built_successors_type; typedef typename built_successors_type::edge_list_type successor_list_type; @@ -4005,7 +3997,7 @@ class async_node : public multifunction_node< Input, tuple< Output >, Policy, Al size_t successor_count() __TBB_override { return internal::output_port<0>(*this).successor_count(); } -#endif +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: @@ -4018,58 +4010,339 @@ class async_node : public multifunction_node< Input, tuple< Output >, Policy, Al #include "internal/_flow_graph_streaming_node.h" #endif // __TBB_PREVIEW_STREAMING_NODE -} // interface9 - - using interface9::reset_flags; - using interface9::rf_reset_protocol; - using interface9::rf_reset_bodies; - using interface9::rf_clear_edges; - - using interface9::graph; - using interface9::graph_node; - using interface9::continue_msg; - - using interface9::source_node; - using interface9::function_node; - using interface9::multifunction_node; - using interface9::split_node; - using interface9::internal::output_port; - using interface9::indexer_node; - using interface9::internal::tagged_msg; - using interface9::internal::cast_to; - using interface9::internal::is_a; - using interface9::continue_node; - using interface9::overwrite_node; - using interface9::write_once_node; - using interface9::broadcast_node; - using interface9::buffer_node; - using interface9::queue_node; - using interface9::sequencer_node; - using interface9::priority_queue_node; - using interface9::limiter_node; - using namespace interface9::internal::graph_policy_namespace; - using interface9::join_node; - using interface9::input_port; - using interface9::copy_body; - using interface9::make_edge; - using interface9::remove_edge; - using interface9::internal::tag_value; +#include "internal/_flow_graph_node_set_impl.h" + +template< typename T > +class overwrite_node : public graph_node, public receiver, public sender { +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::built_predecessors_type built_predecessors_type; + typedef typename sender::built_successors_type built_successors_type; + typedef typename receiver::predecessor_list_type predecessor_list_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + + __TBB_NOINLINE_SYM explicit overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) { + my_successors.set_owner( this ); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + overwrite_node(const node_set& nodes) : overwrite_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor; doesn't take anything from src; default won't work + __TBB_NOINLINE_SYM overwrite_node( const overwrite_node& src ) : + graph_node(src.my_graph), receiver(), sender(), my_buffer_is_valid(false) + { + my_successors.set_owner( this ); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + + ~overwrite_node() {} + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + + bool register_successor( successor_type &s ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + if (my_buffer_is_valid && internal::is_graph_active( my_graph )) { + // We have a valid value that must be forwarded immediately. + bool ret = s.try_put( my_buffer ); + if ( ret ) { + // We add the successor that accepted our put + my_successors.register_successor( s ); + } else { + // In case of reservation a race between the moment of reservation and register_successor can appear, + // because failed reserve does not mean that register_successor is not ready to put a message immediately. + // We have some sort of infinite loop: reserving node tries to set pull state for the edge, + // but overwrite_node tries to return push state back. That is why we have to break this loop with task creation. + task *rtask = new ( task::allocate_additional_child_of( *( my_graph.root_task() ) ) ) + register_predecessor_task( *this, s ); + internal::spawn_in_graph_arena( my_graph, *rtask ); + } + } else { + // No valid value yet, just add as successor + my_successors.register_successor( s ); + } + return true; + } + + bool remove_successor( successor_type &s ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.remove_successor(s); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + void internal_add_built_successor( successor_type &s) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.internal_add_built_successor(s); + } + + void internal_delete_built_successor( successor_type &s) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.internal_delete_built_successor(s); + } + + size_t successor_count() __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + return my_successors.successor_count(); + } + + void copy_successors(successor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.copy_successors(v); + } + + void internal_add_built_predecessor( predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_built_predecessors.add_edge(p); + } + + void internal_delete_built_predecessor( predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_built_predecessors.delete_edge(p); + } + + size_t predecessor_count() __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + return my_built_predecessors.edge_count(); + } + + void copy_predecessors( predecessor_list_type &v ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_built_predecessors.copy_edges(v); + } + + void extract() __TBB_override { + my_buffer_is_valid = false; + built_successors().sender_extract(*this); + built_predecessors().receiver_extract(*this); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + bool try_get( input_type &v ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + if ( my_buffer_is_valid ) { + v = my_buffer; + return true; + } + return false; + } + + //! Reserves an item + bool try_reserve( T &v ) __TBB_override { + return try_get(v); + } + + //! Releases the reserved item + bool try_release() __TBB_override { return true; } + + //! Consumes the reserved item + bool try_consume() __TBB_override { return true; } + + bool is_valid() { + spin_mutex::scoped_lock l( my_mutex ); + return my_buffer_is_valid; + } + + void clear() { + spin_mutex::scoped_lock l( my_mutex ); + my_buffer_is_valid = false; + } + +protected: + + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task * try_put_task( const input_type &v ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + return try_put_task_impl(v); + } + + task * try_put_task_impl(const input_type &v) { + my_buffer = v; + my_buffer_is_valid = true; + task * rtask = my_successors.try_put_task(v); + if (!rtask) rtask = SUCCESSFULLY_ENQUEUED; + return rtask; + } + + graph& graph_reference() const __TBB_override { + return my_graph; + } + + //! Breaks an infinite loop between the node reservation and register_successor call + struct register_predecessor_task : public graph_task { + + register_predecessor_task(predecessor_type& owner, successor_type& succ) : + o(owner), s(succ) {}; + + tbb::task* execute() __TBB_override { + if (!s.register_predecessor(o)) { + o.register_successor(s); + } + return NULL; + } + + predecessor_type& o; + successor_type& s; + }; + + spin_mutex my_mutex; + internal::broadcast_cache< input_type, null_rw_mutex > my_successors; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + internal::edge_container my_built_predecessors; +#endif + input_type my_buffer; + bool my_buffer_is_valid; + void reset_receiver(reset_flags /*f*/) __TBB_override {} + + void reset_node( reset_flags f) __TBB_override { + my_buffer_is_valid = false; + if (f&rf_clear_edges) { + my_successors.clear(); + } + } +}; // overwrite_node + +template< typename T > +class write_once_node : public overwrite_node { +public: + typedef T input_type; + typedef T output_type; + typedef overwrite_node base_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + __TBB_NOINLINE_SYM explicit write_once_node(graph& g) : base_type(g) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + write_once_node(const node_set& nodes) : write_once_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor: call base class copy constructor + __TBB_NOINLINE_SYM write_once_node( const write_once_node& src ) : base_type(src) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task *try_put_task( const T &v ) __TBB_override { + spin_mutex::scoped_lock l( this->my_mutex ); + return this->my_buffer_is_valid ? NULL : this->try_put_task_impl(v); + } +}; + +} // interfaceX + + using interface11::reset_flags; + using interface11::rf_reset_protocol; + using interface11::rf_reset_bodies; + using interface11::rf_clear_edges; + + using interface11::graph; + using interface11::graph_node; + using interface11::continue_msg; + + using interface11::source_node; + using interface11::function_node; + using interface11::multifunction_node; + using interface11::split_node; + using interface11::internal::output_port; + using interface11::indexer_node; + using interface11::internal::tagged_msg; + using interface11::internal::cast_to; + using interface11::internal::is_a; + using interface11::continue_node; + using interface11::overwrite_node; + using interface11::write_once_node; + using interface11::broadcast_node; + using interface11::buffer_node; + using interface11::queue_node; + using interface11::sequencer_node; + using interface11::priority_queue_node; + using interface11::limiter_node; + using namespace interface11::internal::graph_policy_namespace; + using interface11::join_node; + using interface11::input_port; + using interface11::copy_body; + using interface11::make_edge; + using interface11::remove_edge; + using interface11::internal::tag_value; #if __TBB_FLOW_GRAPH_CPP11_FEATURES - using interface9::composite_node; + using interface11::composite_node; #endif - using interface9::async_node; + using interface11::async_node; #if __TBB_PREVIEW_ASYNC_MSG - using interface9::async_msg; + using interface11::async_msg; #endif #if __TBB_PREVIEW_STREAMING_NODE - using interface9::port_ref; - using interface9::streaming_node; + using interface11::port_ref; + using interface11::streaming_node; #endif // __TBB_PREVIEW_STREAMING_NODE +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + using internal::node_priority_t; + using internal::no_priority; +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + using interface11::internal::follows; + using interface11::internal::precedes; + using interface11::internal::make_node_set; + using interface11::internal::make_edges; +#endif } // flow } // tbb +// Include deduction guides for node classes +#include "internal/_flow_graph_nodes_deduction.h" + #undef __TBB_PFG_RESET_ARG #undef __TBB_COMMA +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_flow_graph_H_include_area + +#if TBB_USE_THREADING_TOOLS && TBB_PREVIEW_FLOW_GRAPH_TRACE && ( __linux__ || __APPLE__ ) + #undef __TBB_NOINLINE_SYM +#endif + #endif // __TBB_flow_graph_H diff --git a/inst/include/tbb/flow_graph_abstractions.h b/inst/include/tbb/flow_graph_abstractions.h index f6eb3fb3..f8ac239c 100644 --- a/inst/include/tbb/flow_graph_abstractions.h +++ b/inst/include/tbb/flow_graph_abstractions.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_flow_graph_abstractions_H @@ -23,7 +19,7 @@ namespace tbb { namespace flow { -namespace interface9 { +namespace interface11 { //! Pure virtual template classes that define interfaces for async communication class graph_proxy { @@ -47,10 +43,10 @@ class receiver_gateway : public graph_proxy { virtual bool try_put(const input_type&) = 0; }; -} //interface9 +} //interfaceX -using interface9::graph_proxy; -using interface9::receiver_gateway; +using interface11::graph_proxy; +using interface11::receiver_gateway; } //flow } //tbb diff --git a/inst/include/tbb/flow_graph_opencl_node.h b/inst/include/tbb/flow_graph_opencl_node.h index 89f4da7b..33e1cdef 100644 --- a/inst/include/tbb/flow_graph_opencl_node.h +++ b/inst/include/tbb/flow_graph_opencl_node.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_flow_graph_opencl_node_H #define __TBB_flow_graph_opencl_node_H +#define __TBB_flow_graph_opencl_node_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb/tbb_config.h" #if __TBB_PREVIEW_OPENCL_NODE @@ -43,94 +42,64 @@ namespace tbb { namespace flow { -namespace interface9 { - -class opencl_foundation; -class opencl_device_list; - -template -class opencl_buffer_impl; - -template -class opencl_program; - -class default_opencl_factory; - -class opencl_graph : public graph { -public: - //! Constructs a graph with isolated task_group_context - opencl_graph() : my_opencl_foundation( NULL ) {} - //! Constructs a graph with an user context - explicit opencl_graph( task_group_context& context ) : graph( context ), my_opencl_foundation( NULL ) {} - //! Destroys a graph - ~opencl_graph(); - //! Available devices - const opencl_device_list& available_devices(); - default_opencl_factory& opencl_factory(); -protected: - opencl_foundation *my_opencl_foundation; - opencl_foundation &get_opencl_foundation(); - - template - friend class opencl_buffer; - template - friend class opencl_image2d; - template - friend class opencl_node; - template - friend class opencl_factory; -}; +namespace interface11 { template class opencl_factory; -template -class dependency_msg; +namespace opencl_info { +class default_opencl_factory; +} +template +class opencl_program; -inline void enforce_cl_retcode( cl_int err, std::string msg ) { - if ( err != CL_SUCCESS ) { +inline void enforce_cl_retcode(cl_int err, std::string msg) { + if (err != CL_SUCCESS) { std::cerr << msg << "; error code: " << err << std::endl; throw msg; } } template -T event_info( cl_event e, cl_event_info i ) { +T event_info(cl_event e, cl_event_info i) { T res; - enforce_cl_retcode( clGetEventInfo( e, i, sizeof( res ), &res, NULL ), "Failed to get OpenCL event information" ); + enforce_cl_retcode(clGetEventInfo(e, i, sizeof(res), &res, NULL), "Failed to get OpenCL event information"); return res; } template -T device_info( cl_device_id d, cl_device_info i ) { +T device_info(cl_device_id d, cl_device_info i) { T res; - enforce_cl_retcode( clGetDeviceInfo( d, i, sizeof( res ), &res, NULL ), "Failed to get OpenCL device information" ); + enforce_cl_retcode(clGetDeviceInfo(d, i, sizeof(res), &res, NULL), "Failed to get OpenCL device information"); return res; } + template <> -std::string device_info( cl_device_id d, cl_device_info i ) { +inline std::string device_info(cl_device_id d, cl_device_info i) { size_t required; - enforce_cl_retcode( clGetDeviceInfo( d, i, 0, NULL, &required ), "Failed to get OpenCL device information" ); + enforce_cl_retcode(clGetDeviceInfo(d, i, 0, NULL, &required), "Failed to get OpenCL device information"); - char *buff = (char*)alloca( required ); - enforce_cl_retcode( clGetDeviceInfo( d, i, required, buff, NULL ), "Failed to get OpenCL device information" ); + char *buff = (char*)alloca(required); + enforce_cl_retcode(clGetDeviceInfo(d, i, required, buff, NULL), "Failed to get OpenCL device information"); return buff; } + template -T platform_info( cl_platform_id p, cl_platform_info i ) { +T platform_info(cl_platform_id p, cl_platform_info i) { T res; - enforce_cl_retcode( clGetPlatformInfo( p, i, sizeof( res ), &res, NULL ), "Failed to get OpenCL platform information" ); + enforce_cl_retcode(clGetPlatformInfo(p, i, sizeof(res), &res, NULL), "Failed to get OpenCL platform information"); return res; } + template <> -std::string platform_info( cl_platform_id p, cl_platform_info i ) { +inline std::string platform_info(cl_platform_id p, cl_platform_info i) { size_t required; - enforce_cl_retcode( clGetPlatformInfo( p, i, 0, NULL, &required ), "Failed to get OpenCL platform information" ); + enforce_cl_retcode(clGetPlatformInfo(p, i, 0, NULL, &required), "Failed to get OpenCL platform information"); - char *buff = (char*)alloca( required ); - enforce_cl_retcode( clGetPlatformInfo( p, i, required, buff, NULL ), "Failed to get OpenCL platform information" ); + char *buff = (char*)alloca(required); + enforce_cl_retcode(clGetPlatformInfo(p, i, required, buff, NULL), "Failed to get OpenCL platform information"); return buff; } @@ -144,24 +113,26 @@ class opencl_device { host = device_id_type( -1 ) }; - opencl_device() : my_device_id( unknown ) {} + opencl_device() : my_device_id( unknown ), my_cl_device_id( NULL ), my_cl_command_queue( NULL ) {} + + opencl_device( cl_device_id d_id ) : my_device_id( unknown ), my_cl_device_id( d_id ), my_cl_command_queue( NULL ) {} - opencl_device( cl_device_id cl_d_id, device_id_type device_id ) : my_device_id( device_id ), my_cl_device_id( cl_d_id ) {} + opencl_device( cl_device_id cl_d_id, device_id_type device_id ) : my_device_id( device_id ), my_cl_device_id( cl_d_id ), my_cl_command_queue( NULL ) {} std::string platform_profile() const { - return platform_info( platform(), CL_PLATFORM_PROFILE ); + return platform_info( platform_id(), CL_PLATFORM_PROFILE ); } std::string platform_version() const { - return platform_info( platform(), CL_PLATFORM_VERSION ); + return platform_info( platform_id(), CL_PLATFORM_VERSION ); } std::string platform_name() const { - return platform_info( platform(), CL_PLATFORM_NAME ); + return platform_info( platform_id(), CL_PLATFORM_NAME ); } std::string platform_vendor() const { - return platform_info( platform(), CL_PLATFORM_VENDOR ); + return platform_info( platform_id(), CL_PLATFORM_VENDOR ); } std::string platform_extensions() const { - return platform_info( platform(), CL_PLATFORM_EXTENSIONS ); + return platform_info( platform_id(), CL_PLATFORM_EXTENSIONS ); } template @@ -257,13 +228,12 @@ class opencl_device { my_cl_command_queue = cmd_queue; } -private: - opencl_device( cl_device_id d_id ) : my_device_id( unknown ), my_cl_device_id( d_id ) {} - - cl_platform_id platform() const { + cl_platform_id platform_id() const { return device_info( my_cl_device_id, CL_DEVICE_PLATFORM ); } +private: + device_id_type my_device_id; cl_device_id my_cl_device_id; cl_command_queue my_cl_command_queue; @@ -276,7 +246,6 @@ class opencl_device { friend class opencl_memory; template friend class opencl_program; - friend class opencl_foundation; #if TBB_USE_ASSERT template @@ -303,10 +272,66 @@ class opencl_device_list { const_iterator end() const { return my_container.end(); } const_iterator cbegin() const { return my_container.cbegin(); } const_iterator cend() const { return my_container.cend(); } + private: container_type my_container; }; +namespace internal { + +// Retrieve all OpenCL devices from machine +inline opencl_device_list find_available_devices() { + opencl_device_list opencl_devices; + + cl_uint num_platforms; + enforce_cl_retcode(clGetPlatformIDs(0, NULL, &num_platforms), "clGetPlatformIDs failed"); + + std::vector platforms(num_platforms); + enforce_cl_retcode(clGetPlatformIDs(num_platforms, platforms.data(), NULL), "clGetPlatformIDs failed"); + + cl_uint num_devices; + std::vector::iterator platforms_it = platforms.begin(); + cl_uint num_all_devices = 0; + while (platforms_it != platforms.end()) { + cl_int err = clGetDeviceIDs(*platforms_it, CL_DEVICE_TYPE_ALL, 0, NULL, &num_devices); + if (err == CL_DEVICE_NOT_FOUND) { + platforms_it = platforms.erase(platforms_it); + } + else { + enforce_cl_retcode(err, "clGetDeviceIDs failed"); + num_all_devices += num_devices; + ++platforms_it; + } + } + + std::vector devices(num_all_devices); + std::vector::iterator devices_it = devices.begin(); + for (auto p = platforms.begin(); p != platforms.end(); ++p) { + enforce_cl_retcode(clGetDeviceIDs((*p), CL_DEVICE_TYPE_ALL, (cl_uint)std::distance(devices_it, devices.end()), &*devices_it, &num_devices), "clGetDeviceIDs failed"); + devices_it += num_devices; + } + + for (auto d = devices.begin(); d != devices.end(); ++d) { + opencl_devices.add(opencl_device((*d))); + } + + return opencl_devices; +} + +} // namespace internal + +// TODO: consider this namespace as public API +namespace opencl_info { + + inline const opencl_device_list& available_devices() { + // Static storage for all available OpenCL devices on machine + static const opencl_device_list my_devices = internal::find_available_devices(); + return my_devices; + } + +} // namespace opencl_info + + class callback_base : tbb::internal::no_copy { public: virtual void call() = 0; @@ -315,41 +340,30 @@ class callback_base : tbb::internal::no_copy { template class callback : public callback_base { - graph &my_graph; Callback my_callback; T my_data; public: - callback( graph &g, Callback c, const T& t ) : my_graph( g ), my_callback( c ), my_data( t ) { - // Extend the graph lifetime until the callback completion. - my_graph.increment_wait_count(); - } - ~callback() { - // Release the reference to the graph. - my_graph.decrement_wait_count(); - } + callback( Callback c, const T& t ) : my_callback( c ), my_data( t ) {} + void call() __TBB_override { my_callback( my_data ); } }; -template -class dependency_msg : public async_msg { +template +class opencl_async_msg : public async_msg { public: typedef T value_type; - dependency_msg() : my_callback_flag_ptr( std::make_shared< tbb::atomic>() ) { + opencl_async_msg() : my_callback_flag_ptr( std::make_shared< tbb::atomic>() ) { my_callback_flag_ptr->store(false); } - explicit dependency_msg( const T& data ) : my_data(data), my_callback_flag_ptr( std::make_shared>() ) { + explicit opencl_async_msg( const T& data ) : my_data(data), my_callback_flag_ptr( std::make_shared>() ) { my_callback_flag_ptr->store(false); } - dependency_msg( opencl_graph &g, const T& data ) : my_data(data), my_graph(&g), my_callback_flag_ptr( std::make_shared>() ) { - my_callback_flag_ptr->store(false); - } - - dependency_msg( const T& data, cl_event event ) : my_data(data), my_event(event), my_is_event(true), my_callback_flag_ptr( std::make_shared>() ) { + opencl_async_msg( const T& data, cl_event event ) : my_data(data), my_event(event), my_is_event(true), my_callback_flag_ptr( std::make_shared>() ) { my_callback_flag_ptr->store(false); enforce_cl_retcode( clRetainEvent( my_event ), "Failed to retain an event" ); } @@ -372,22 +386,22 @@ class dependency_msg : public async_msg { return my_data; } - dependency_msg( const dependency_msg &dmsg ) : async_msg(dmsg), - my_data(dmsg.my_data), my_event(dmsg.my_event), my_is_event( dmsg.my_is_event ), my_graph( dmsg.my_graph ), + opencl_async_msg( const opencl_async_msg &dmsg ) : async_msg(dmsg), + my_data(dmsg.my_data), my_event(dmsg.my_event), my_is_event( dmsg.my_is_event ), my_callback_flag_ptr(dmsg.my_callback_flag_ptr) { if ( my_is_event ) enforce_cl_retcode( clRetainEvent( my_event ), "Failed to retain an event" ); } - dependency_msg( dependency_msg &&dmsg ) : async_msg(std::move(dmsg)), - my_data(std::move(dmsg.my_data)), my_event(dmsg.my_event), my_is_event(dmsg.my_is_event), my_graph(dmsg.my_graph), + opencl_async_msg( opencl_async_msg &&dmsg ) : async_msg(std::move(dmsg)), + my_data(std::move(dmsg.my_data)), my_event(dmsg.my_event), my_is_event(dmsg.my_is_event), my_callback_flag_ptr( std::move(dmsg.my_callback_flag_ptr) ) { dmsg.my_is_event = false; } - dependency_msg& operator=(const dependency_msg &dmsg) { + opencl_async_msg& operator=(const opencl_async_msg &dmsg) { async_msg::operator =(dmsg); // Release original event @@ -397,7 +411,6 @@ class dependency_msg : public async_msg { my_data = dmsg.my_data; my_event = dmsg.my_event; my_is_event = dmsg.my_is_event; - my_graph = dmsg.my_graph; // Retain copied event if ( my_is_event ) @@ -407,7 +420,7 @@ class dependency_msg : public async_msg { return *this; } - ~dependency_msg() { + ~opencl_async_msg() { if ( my_is_event ) enforce_cl_retcode( clReleaseEvent( my_event ), "Failed to release an event" ); } @@ -425,10 +438,6 @@ class dependency_msg : public async_msg { clRetainEvent( my_event ); } - void set_graph( graph &g ) { - my_graph = &g; - } - void clear_event() const { if ( my_is_event ) { enforce_cl_retcode( clFlush( event_info( my_event, CL_EVENT_COMMAND_QUEUE ) ), "Failed to flush an OpenCL command queue" ); @@ -440,20 +449,19 @@ class dependency_msg : public async_msg { template void register_callback( Callback c ) const { __TBB_ASSERT( my_is_event, "The OpenCL event is not set" ); - __TBB_ASSERT( my_graph, "The graph is not set" ); - enforce_cl_retcode( clSetEventCallback( my_event, CL_COMPLETE, register_callback_func, new callback( *my_graph, c, my_data ) ), "Failed to set an OpenCL callback" ); + enforce_cl_retcode( clSetEventCallback( my_event, CL_COMPLETE, register_callback_func, new callback( c, my_data ) ), "Failed to set an OpenCL callback" ); } operator T&() { return data(); } operator const T&() const { return data(); } protected: - // Overridden in this derived class to inform that + // Overridden in this derived class to inform that // async calculation chain is over void finalize() const __TBB_override { receive_if_memory_object(*this); if (! my_callback_flag_ptr->fetch_and_store(true)) { - dependency_msg a(*this); + opencl_async_msg a(*this); if (my_is_event) { register_callback([a](const T& t) mutable { a.set(t); @@ -479,13 +487,12 @@ class dependency_msg : public async_msg { T my_data; mutable cl_event my_event; mutable bool my_is_event = false; - graph *my_graph = NULL; std::shared_ptr< tbb::atomic > my_callback_flag_ptr; }; template -K key_from_message( const dependency_msg &dmsg ) { +K key_from_message( const opencl_async_msg &dmsg ) { using tbb::flow::key_from_message; const T &t = dmsg.data( false ); __TBB_STATIC_ASSERT( true, "" ); @@ -500,7 +507,7 @@ class opencl_memory { my_curr_device_id = my_factory->devices().begin()->my_device_id; } - ~opencl_memory() { + virtual ~opencl_memory() { if ( my_sending_event_present ) enforce_cl_retcode( clReleaseEvent( my_sending_event ), "Failed to release an event for the OpenCL buffer" ); enforce_cl_retcode( clReleaseMemObject( my_cl_mem ), "Failed to release an memory object" ); } @@ -511,7 +518,7 @@ class opencl_memory { void* get_host_ptr() { if ( !my_host_ptr ) { - dependency_msg d = receive( NULL ); + opencl_async_msg d = receive( NULL ); d.data(); __TBB_ASSERT( d.data() == my_host_ptr, NULL ); } @@ -520,9 +527,55 @@ class opencl_memory { Factory *factory() const { return my_factory; } - dependency_msg send( opencl_device d, const cl_event *e ); - dependency_msg receive( const cl_event *e ); - virtual void map_memory( opencl_device, dependency_msg & ) = 0; + opencl_async_msg receive(const cl_event *e) { + opencl_async_msg d; + if (e) { + d = opencl_async_msg(my_host_ptr, *e); + } else { + d = opencl_async_msg(my_host_ptr); + } + + // Concurrent receives are prohibited so we do not worry about synchronization. + if (my_curr_device_id.load() != opencl_device::host) { + map_memory(*my_factory->devices().begin(), d); + my_curr_device_id.store(opencl_device::host); + my_host_ptr = d.data(false); + } + // Release the sending event + if (my_sending_event_present) { + enforce_cl_retcode(clReleaseEvent(my_sending_event), "Failed to release an event"); + my_sending_event_present = false; + } + return d; + } + + opencl_async_msg send(opencl_device device, const cl_event *e) { + opencl_device::device_id_type device_id = device.my_device_id; + if (!my_factory->is_same_context(my_curr_device_id.load(), device_id)) { + { + tbb::spin_mutex::scoped_lock lock(my_sending_lock); + if (!my_factory->is_same_context(my_curr_device_id.load(), device_id)) { + __TBB_ASSERT(my_host_ptr, "The buffer has not been mapped"); + opencl_async_msg d(my_host_ptr); + my_factory->enqueue_unmap_buffer(device, *this, d); + my_sending_event = *d.get_event(); + my_sending_event_present = true; + enforce_cl_retcode(clRetainEvent(my_sending_event), "Failed to retain an event"); + my_host_ptr = NULL; + my_curr_device_id.store(device_id); + } + } + __TBB_ASSERT(my_sending_event_present, NULL); + } + + // !e means that buffer has come from the host + if (!e && my_sending_event_present) e = &my_sending_event; + + __TBB_ASSERT(!my_host_ptr, "The buffer has not been unmapped"); + return e ? opencl_async_msg(NULL, *e) : opencl_async_msg(NULL); + } + + virtual void map_memory( opencl_device, opencl_async_msg & ) = 0; protected: cl_mem my_cl_mem; tbb::atomic my_curr_device_id; @@ -556,8 +609,8 @@ class opencl_buffer_impl : public opencl_memory { return my_size; } - void map_memory( opencl_device device, dependency_msg &dmsg ) __TBB_override { - this->my_factory->enque_map_buffer( device, *this, dmsg ); + void map_memory( opencl_device device, opencl_async_msg &dmsg ) __TBB_override { + this->my_factory->enqueue_map_buffer( device, *this, dmsg ); } #if TBB_USE_ASSERT @@ -572,10 +625,10 @@ enum access_type { read_only }; -template +template class opencl_subbuffer; -template +template class opencl_buffer { public: typedef cl_mem native_object_type; @@ -604,7 +657,7 @@ class opencl_buffer { T& operator[] ( ptrdiff_t k ) { return begin()[k]; } opencl_buffer() {} - opencl_buffer( opencl_graph &g, size_t size ); + opencl_buffer( size_t size ); opencl_buffer( Factory &f, size_t size ) : my_impl( std::make_shared( size*sizeof(T), f ) ) {} cl_mem native_object() const { @@ -615,16 +668,16 @@ class opencl_buffer { return *this; } - void send( opencl_device device, dependency_msg &dependency ) const { + void send( opencl_device device, opencl_async_msg &dependency ) const { __TBB_ASSERT( dependency.data( /*wait = */false ) == *this, NULL ); - dependency_msg d = my_impl->send( device, dependency.get_event() ); + opencl_async_msg d = my_impl->send( device, dependency.get_event() ); const cl_event *e = d.get_event(); if ( e ) dependency.set_event( *e ); else dependency.clear_event(); } - void receive( const dependency_msg &dependency ) const { + void receive( const opencl_async_msg &dependency ) const { __TBB_ASSERT( dependency.data( /*wait = */false ) == *this, NULL ); - dependency_msg d = my_impl->receive( dependency.get_event() ); + opencl_async_msg d = my_impl->receive( dependency.get_event() ); const cl_event *e = d.get_event(); if ( e ) dependency.set_event( *e ); else dependency.clear_event(); @@ -690,11 +743,11 @@ typename std::enable_if::value, T>::type get_native_ob // send_if_memory_object checks if the T type has memory_object_type and call the send method for the object. template -typename std::enable_if::value>::type send_if_memory_object( opencl_device device, dependency_msg &dmsg ) { +typename std::enable_if::value>::type send_if_memory_object( opencl_device device, opencl_async_msg &dmsg ) { const T &t = dmsg.data( false ); typedef typename T::memory_object_type mem_obj_t; mem_obj_t mem_obj = t.memory_object(); - dependency_msg d( mem_obj ); + opencl_async_msg d( mem_obj ); if ( dmsg.get_event() ) d.set_event( *dmsg.get_event() ); mem_obj.send( device, d ); if ( d.get_event() ) dmsg.set_event( *d.get_event() ); @@ -704,7 +757,7 @@ template typename std::enable_if::value>::type send_if_memory_object( opencl_device device, T &t ) { typedef typename T::memory_object_type mem_obj_t; mem_obj_t mem_obj = t.memory_object(); - dependency_msg dmsg( mem_obj ); + opencl_async_msg dmsg( mem_obj ); mem_obj.send( device, dmsg ); } @@ -713,11 +766,11 @@ typename std::enable_if::value>::type send_if_memory_o // receive_if_memory_object checks if the T type has memory_object_type and call the receive method for the object. template -typename std::enable_if::value>::type receive_if_memory_object( const dependency_msg &dmsg ) { +typename std::enable_if::value>::type receive_if_memory_object( const opencl_async_msg &dmsg ) { const T &t = dmsg.data( false ); typedef typename T::memory_object_type mem_obj_t; mem_obj_t mem_obj = t.memory_object(); - dependency_msg d( mem_obj ); + opencl_async_msg d( mem_obj ); if ( dmsg.get_event() ) d.set_event( *dmsg.get_event() ); mem_obj.receive( d ); if ( d.get_event() ) dmsg.set_event( *d.get_event() ); @@ -736,7 +789,7 @@ class opencl_range { opencl_range(G&& global_work = std::initializer_list({ 0 }), L&& local_work = std::initializer_list({ 0, 0, 0 })) { auto g_it = global_work.begin(); auto l_it = local_work.begin(); - my_global_work_size = { size_t(-1), size_t(-1), size_t(-1) }; + my_global_work_size = { {size_t(-1), size_t(-1), size_t(-1)} }; // my_local_work_size is still uninitialized for (int s = 0; s < 3 && g_it != global_work.end(); ++g_it, ++l_it, ++s) { __TBB_ASSERT(l_it != local_work.end(), "global_work & local_work must have same size"); @@ -756,7 +809,7 @@ class opencl_range { template class opencl_factory { public: - template using async_msg_type = dependency_msg>; + template using async_msg_type = opencl_async_msg>; typedef opencl_device device_type; class kernel : tbb::internal::no_assign { @@ -805,7 +858,7 @@ class opencl_factory { // it affects expectations for enqueue_kernel(.....) interface method typedef opencl_range range_type; - opencl_factory( opencl_graph &g ) : my_graph( g ) {} + opencl_factory() {} ~opencl_factory() { if ( my_devices.size() ) { for ( auto d = my_devices.begin(); d != my_devices.end(); ++d ) { @@ -827,7 +880,7 @@ class opencl_factory { private: template - void enque_map_buffer( opencl_device device, opencl_buffer_impl &buffer, dependency_msg& dmsg ) { + void enqueue_map_buffer( opencl_device device, opencl_buffer_impl &buffer, opencl_async_msg& dmsg ) { cl_event const* e1 = dmsg.get_event(); cl_event e2; cl_int err; @@ -841,7 +894,7 @@ class opencl_factory { template - void enque_unmap_buffer( opencl_device device, opencl_memory &memory, dependency_msg& dmsg ) { + void enqueue_unmap_buffer( opencl_device device, opencl_memory &memory, opencl_async_msg& dmsg ) { cl_event const* e1 = dmsg.get_event(); cl_event e2; enforce_cl_retcode( @@ -859,7 +912,7 @@ class opencl_factory { } template - void process_one_arg( const kernel_type& kernel, std::array& events, int& num_events, int& place, const dependency_msg& msg ) { + void process_one_arg( const kernel_type& kernel, std::array& events, int& num_events, int& place, const opencl_async_msg& msg ) { __TBB_ASSERT((static_cast::size_type>(num_events) < events.size()), NULL); const cl_event * const e = msg.get_event(); @@ -883,9 +936,8 @@ class opencl_factory { void update_one_arg( cl_event, T& ) {} template - void update_one_arg( cl_event e, dependency_msg& msg ) { + void update_one_arg( cl_event e, opencl_async_msg& msg ) { msg.set_event( e ); - msg.set_graph( my_graph ); } template @@ -945,7 +997,7 @@ class opencl_factory { } template - bool get_event_from_one_arg( cl_event& e, const dependency_msg& msg) { + bool get_event_from_one_arg( cl_event& e, const opencl_async_msg& msg) { cl_event const *e_ptr = msg.get_event(); if ( e_ptr != NULL ) { @@ -1028,12 +1080,79 @@ class opencl_factory { return my_cl_context; } - void init_once(); + void init_once() { + { + tbb::spin_mutex::scoped_lock lock(my_devices_mutex); + if (!my_devices.size()) + my_devices = DeviceFilter()( opencl_info::available_devices() ); + } + + enforce_cl_retcode(my_devices.size() ? CL_SUCCESS : CL_INVALID_DEVICE, "No devices in the device list"); + cl_platform_id platform_id = my_devices.begin()->platform_id(); + for (opencl_device_list::iterator it = ++my_devices.begin(); it != my_devices.end(); ++it) + enforce_cl_retcode(it->platform_id() == platform_id ? CL_SUCCESS : CL_INVALID_PLATFORM, "All devices should be in the same platform"); + + std::vector cl_device_ids; + for (auto d = my_devices.begin(); d != my_devices.end(); ++d) { + cl_device_ids.push_back((*d).my_cl_device_id); + } + + cl_context_properties context_properties[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)platform_id, (cl_context_properties)NULL }; + cl_int err; + cl_context ctx = clCreateContext(context_properties, + (cl_uint)cl_device_ids.size(), + cl_device_ids.data(), + NULL, NULL, &err); + enforce_cl_retcode(err, "Failed to create context"); + my_cl_context = ctx; + + size_t device_counter = 0; + for (auto d = my_devices.begin(); d != my_devices.end(); d++) { + (*d).my_device_id = device_counter++; + cl_int err2; + cl_command_queue cq; +#if CL_VERSION_2_0 + if ((*d).major_version() >= 2) { + if ((*d).out_of_order_exec_mode_on_host_present()) { + cl_queue_properties props[] = { CL_QUEUE_PROPERTIES, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, 0 }; + cq = clCreateCommandQueueWithProperties(ctx, (*d).my_cl_device_id, props, &err2); + } else { + cl_queue_properties props[] = { 0 }; + cq = clCreateCommandQueueWithProperties(ctx, (*d).my_cl_device_id, props, &err2); + } + } else +#endif + { + cl_command_queue_properties props = (*d).out_of_order_exec_mode_on_host_present() ? CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE : 0; + // Suppress "declared deprecated" warning for the next line. +#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif +#if _MSC_VER || __INTEL_COMPILER +#pragma warning( push ) +#if __INTEL_COMPILER +#pragma warning (disable: 1478) +#else +#pragma warning (disable: 4996) +#endif +#endif + cq = clCreateCommandQueue(ctx, (*d).my_cl_device_id, props, &err2); +#if _MSC_VER || __INTEL_COMPILER +#pragma warning( pop ) +#endif +#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT +#pragma GCC diagnostic pop +#endif + } + enforce_cl_retcode(err2, "Failed to create command queue"); + (*d).my_cl_command_queue = cq; + } + } std::once_flag my_once_flag; opencl_device_list my_devices; cl_context my_cl_context; - opencl_graph &my_graph; tbb::spin_mutex my_devices_mutex; @@ -1043,238 +1162,62 @@ class opencl_factory { friend class opencl_buffer_impl; template friend class opencl_memory; -}; +}; // class opencl_factory -template -dependency_msg opencl_memory::receive( const cl_event *e ) { - dependency_msg d = e ? dependency_msg( my_host_ptr, *e ) : dependency_msg( my_host_ptr ); - // Concurrent receives are prohibited so we do not worry about synchronization. - if ( my_curr_device_id.load() != opencl_device::host ) { - map_memory( *my_factory->devices().begin(), d ); - my_curr_device_id.store( opencl_device::host ); - my_host_ptr = d.data( false ); - } - // Release the sending event - if ( my_sending_event_present ) { - enforce_cl_retcode( clReleaseEvent( my_sending_event ), "Failed to release an event" ); - my_sending_event_present = false; - } - return d; -} +// TODO: consider this namespace as public API +namespace opencl_info { + +// Default types template -dependency_msg opencl_memory::send( opencl_device device, const cl_event *e ) { - opencl_device::device_id_type device_id = device.my_device_id; - if ( !my_factory->is_same_context( my_curr_device_id.load(), device_id ) ) { - { - tbb::spin_mutex::scoped_lock lock( my_sending_lock ); - if ( !my_factory->is_same_context( my_curr_device_id.load(), device_id ) ) { - __TBB_ASSERT( my_host_ptr, "The buffer has not been mapped" ); - dependency_msg d( my_host_ptr ); - my_factory->enque_unmap_buffer( device, *this, d ); - my_sending_event = *d.get_event(); - my_sending_event_present = true; - enforce_cl_retcode( clRetainEvent( my_sending_event ), "Failed to retain an event" ); - my_host_ptr = NULL; - my_curr_device_id.store(device_id); - } - } - __TBB_ASSERT( my_sending_event_present, NULL ); +struct default_device_selector { + opencl_device operator()(Factory& f) { + __TBB_ASSERT(!f.devices().empty(), "No available devices"); + return *(f.devices().begin()); } +}; - // !e means that buffer has come from the host - if ( !e && my_sending_event_present ) e = &my_sending_event; - - __TBB_ASSERT( !my_host_ptr, "The buffer has not been unmapped" ); - return e ? dependency_msg( NULL, *e ) : dependency_msg( NULL ); -} - -struct default_opencl_factory_device_filter { - opencl_device_list operator()( const opencl_device_list &devices ) { +struct default_device_filter { + opencl_device_list operator()(const opencl_device_list &devices) { opencl_device_list dl; - dl.add( *devices.begin() ); + cl_platform_id platform_id = devices.begin()->platform_id(); + for (opencl_device_list::const_iterator it = devices.cbegin(); it != devices.cend(); ++it) { + if (it->platform_id() == platform_id) { + dl.add(*it); + } + } return dl; } }; -class default_opencl_factory : public opencl_factory < default_opencl_factory_device_filter > { +class default_opencl_factory : public opencl_factory < default_device_filter >, tbb::internal::no_copy { public: - template using async_msg_type = dependency_msg; - - default_opencl_factory( opencl_graph &g ) : opencl_factory( g ) {} -private: - default_opencl_factory( const default_opencl_factory& ); - default_opencl_factory& operator=(const default_opencl_factory&); -}; + template using async_msg_type = opencl_async_msg; -class opencl_foundation : tbb::internal::no_assign { - struct default_device_selector_type { - opencl_device operator()( default_opencl_factory& f ) { - __TBB_ASSERT( ! f.devices().empty(), "No available devices" ); - return *( f.devices().begin() ); - } - }; -public: - opencl_foundation(opencl_graph &g) : my_default_opencl_factory(g), my_default_device_selector() { - cl_uint num_platforms; - enforce_cl_retcode(clGetPlatformIDs(0, NULL, &num_platforms), "clGetPlatformIDs failed"); - - std::vector platforms(num_platforms); - enforce_cl_retcode(clGetPlatformIDs(num_platforms, platforms.data(), NULL), "clGetPlatformIDs failed"); - - cl_uint num_devices; - std::vector::iterator platforms_it = platforms.begin(); - cl_uint num_all_devices = 0; - while (platforms_it != platforms.end()) { - cl_int err = clGetDeviceIDs(*platforms_it, CL_DEVICE_TYPE_ALL, 0, NULL, &num_devices); - if (err == CL_DEVICE_NOT_FOUND) { - platforms_it = platforms.erase(platforms_it); - } else { - enforce_cl_retcode(err, "clGetDeviceIDs failed"); - num_all_devices += num_devices; - ++platforms_it; - } - } - - std::vector devices(num_all_devices); - std::vector::iterator devices_it = devices.begin(); - for (auto p = platforms.begin(); p != platforms.end(); ++p) { - enforce_cl_retcode(clGetDeviceIDs((*p), CL_DEVICE_TYPE_ALL, (cl_uint)std::distance(devices_it, devices.end()), &*devices_it, &num_devices), "clGetDeviceIDs failed"); - devices_it += num_devices; - } - - for (auto d = devices.begin(); d != devices.end(); ++d) { - my_devices.add(opencl_device((*d))); - } - } - - default_opencl_factory &get_default_opencl_factory() { - return my_default_opencl_factory; - } - - const opencl_device_list &get_all_devices() { - return my_devices; - } - - default_device_selector_type get_default_device_selector() { return my_default_device_selector; } + friend default_opencl_factory& default_factory(); private: - default_opencl_factory my_default_opencl_factory; - opencl_device_list my_devices; - - const default_device_selector_type my_default_device_selector; + default_opencl_factory() = default; }; -opencl_foundation &opencl_graph::get_opencl_foundation() { - opencl_foundation* INITIALIZATION = (opencl_foundation*)1; - if ( my_opencl_foundation <= INITIALIZATION ) { - if ( tbb::internal::as_atomic( my_opencl_foundation ).compare_and_swap( INITIALIZATION, NULL ) == 0 ) { - my_opencl_foundation = new opencl_foundation( *this ); - } - else { - tbb::internal::spin_wait_while_eq( my_opencl_foundation, INITIALIZATION ); - } - } - - __TBB_ASSERT( my_opencl_foundation > INITIALIZATION, "opencl_foundation is not initialized"); - return *my_opencl_foundation; -} - -opencl_graph::~opencl_graph() { - if ( my_opencl_foundation ) - delete my_opencl_foundation; -} - -template -void opencl_factory::init_once() { - { - tbb::spin_mutex::scoped_lock lock( my_devices_mutex ); - if ( !my_devices.size() ) - my_devices = DeviceFilter()(my_graph.get_opencl_foundation().get_all_devices()); - } - - enforce_cl_retcode( my_devices.size() ? CL_SUCCESS : CL_INVALID_DEVICE, "No devices in the device list" ); - cl_platform_id platform_id = my_devices.begin()->platform(); - for ( opencl_device_list::iterator it = ++my_devices.begin(); it != my_devices.end(); ++it ) - enforce_cl_retcode( it->platform() == platform_id ? CL_SUCCESS : CL_INVALID_PLATFORM, "All devices should be in the same platform" ); - - std::vector cl_device_ids; - for (auto d = my_devices.begin(); d != my_devices.end(); ++d) { - cl_device_ids.push_back((*d).my_cl_device_id); - } - - cl_context_properties context_properties[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)platform_id, (cl_context_properties)NULL }; - cl_int err; - cl_context ctx = clCreateContext( context_properties, - (cl_uint)cl_device_ids.size(), - cl_device_ids.data(), - NULL, NULL, &err ); - enforce_cl_retcode( err, "Failed to create context" ); - my_cl_context = ctx; - - size_t device_counter = 0; - for ( auto d = my_devices.begin(); d != my_devices.end(); d++ ) { - (*d).my_device_id = device_counter++; - cl_int err2; - cl_command_queue cq; -#if CL_VERSION_2_0 - if ( (*d).major_version() >= 2 ) { - if ( (*d).out_of_order_exec_mode_on_host_present() ) { - cl_queue_properties props[] = { CL_QUEUE_PROPERTIES, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, 0 }; - cq = clCreateCommandQueueWithProperties( ctx, (*d).my_cl_device_id, props, &err2 ); - } else { - cl_queue_properties props[] = { 0 }; - cq = clCreateCommandQueueWithProperties( ctx, (*d).my_cl_device_id, props, &err2 ); - } - } else -#endif - { - cl_command_queue_properties props = (*d).out_of_order_exec_mode_on_host_present() ? CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE : 0; - // Suppress "declared deprecated" warning for the next line. -#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT -#pragma GCC diagnostic push -// #pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif -#if _MSC_VER || __INTEL_COMPILER -#pragma warning( push ) -#if __INTEL_COMPILER -#pragma warning (disable: 1478) -#else -#pragma warning (disable: 4996) -#endif -#endif - cq = clCreateCommandQueue( ctx, (*d).my_cl_device_id, props, &err2 ); -#if _MSC_VER || __INTEL_COMPILER -#pragma warning( pop ) -#endif -#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT -#pragma GCC diagnostic pop -#endif - } - enforce_cl_retcode( err2, "Failed to create command queue" ); - (*d).my_cl_command_queue = cq; - } +inline default_opencl_factory& default_factory() { + static default_opencl_factory default_factory; + return default_factory; } -const opencl_device_list &opencl_graph::available_devices() { - return get_opencl_foundation().get_all_devices(); -} - -default_opencl_factory &opencl_graph::opencl_factory() { - return get_opencl_foundation().get_default_opencl_factory(); -} +} // namespace opencl_info template -opencl_buffer::opencl_buffer( opencl_graph &g, size_t size ) : my_impl( std::make_shared( size*sizeof(T), g.get_opencl_foundation().get_default_opencl_factory() ) ) {} +opencl_buffer::opencl_buffer( size_t size ) : my_impl( std::make_shared( size*sizeof(T), opencl_info::default_factory() ) ) {} + - enum class opencl_program_type { SOURCE, PRECOMPILED, SPIR }; -template +template class opencl_program : tbb::internal::no_assign { public: typedef typename Factory::kernel_type kernel_type; @@ -1283,10 +1226,10 @@ class opencl_program : tbb::internal::no_assign { opencl_program( Factory& factory, const char* program_name ) : opencl_program( factory, std::string( program_name ) ) {} opencl_program( Factory& factory, const std::string& program_name ) : opencl_program( factory, opencl_program_type::SOURCE, program_name ) {} - opencl_program( opencl_graph& graph, opencl_program_type type, const std::string& program_name ) : opencl_program( graph.opencl_factory(), type, program_name ) {} - opencl_program( opencl_graph& graph, const char* program_name ) : opencl_program( graph.opencl_factory(), program_name ) {} - opencl_program( opencl_graph& graph, const std::string& program_name ) : opencl_program( graph.opencl_factory(), program_name ) {} - opencl_program( opencl_graph& graph, opencl_program_type type ) : opencl_program( graph.opencl_factory(), type ) {} + opencl_program( opencl_program_type type, const std::string& program_name ) : opencl_program( opencl_info::default_factory(), type, program_name ) {} + opencl_program( const char* program_name ) : opencl_program( opencl_info::default_factory(), program_name ) {} + opencl_program( const std::string& program_name ) : opencl_program( opencl_info::default_factory(), program_name ) {} + opencl_program( opencl_program_type type ) : opencl_program( opencl_info::default_factory(), type ) {} opencl_program( const opencl_program &src ) : my_factory( src.my_factory ), my_type( src.type ), my_arg_str( src.my_arg_str ), my_cl_program( src.my_cl_program ) { // Set my_do_once_flag to the called state. @@ -1454,8 +1397,7 @@ class opencl_program : tbb::internal::no_assign { template friend class opencl_factory; - template - friend class opencl_factory::kernel; + friend class Factory::kernel; }; template @@ -1467,71 +1409,79 @@ class opencl_node< tuple, JP, Factory > : public streaming_node< tuple public: typedef typename base_type::kernel_type kernel_type; - opencl_node( opencl_graph &g, const kernel_type& kernel ) - : base_type( g, kernel, g.get_opencl_foundation().get_default_device_selector(), g.get_opencl_foundation().get_default_opencl_factory() ) - {} + opencl_node( graph &g, const kernel_type& kernel ) + : base_type( g, kernel, opencl_info::default_device_selector< opencl_info::default_opencl_factory >(), opencl_info::default_factory() ) + { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_OPENCL_NODE, this, &this->my_graph ); + } - opencl_node( opencl_graph &g, const kernel_type& kernel, Factory &f ) - : base_type( g, kernel, g.get_opencl_foundation().get_default_device_selector(), f ) - {} + opencl_node( graph &g, const kernel_type& kernel, Factory &f ) + : base_type( g, kernel, opencl_info::default_device_selector (), f ) + { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_OPENCL_NODE, this, &this->my_graph ); + } template - opencl_node( opencl_graph &g, const kernel_type& kernel, DeviceSelector d, Factory &f) + opencl_node( graph &g, const kernel_type& kernel, DeviceSelector d, Factory &f) : base_type( g, kernel, d, f) - {} + { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_OPENCL_NODE, this, &this->my_graph ); + } }; template -class opencl_node< tuple, JP > : public opencl_node < tuple, JP, default_opencl_factory > { - typedef opencl_node < tuple, JP, default_opencl_factory > base_type; +class opencl_node< tuple, JP > : public opencl_node < tuple, JP, opencl_info::default_opencl_factory > { + typedef opencl_node < tuple, JP, opencl_info::default_opencl_factory > base_type; public: typedef typename base_type::kernel_type kernel_type; - opencl_node( opencl_graph &g, const kernel_type& kernel ) - : base_type( g, kernel, g.get_opencl_foundation().get_default_device_selector(), g.get_opencl_foundation().get_default_opencl_factory() ) + opencl_node( graph &g, const kernel_type& kernel ) + : base_type( g, kernel, opencl_info::default_device_selector< opencl_info::default_opencl_factory >(), opencl_info::default_factory() ) {} template - opencl_node( opencl_graph &g, const kernel_type& kernel, DeviceSelector d ) - : base_type( g, kernel, d, g.get_opencl_foundation().get_default_opencl_factory() ) + opencl_node( graph &g, const kernel_type& kernel, DeviceSelector d ) + : base_type( g, kernel, d, opencl_info::default_factory() ) {} }; template -class opencl_node< tuple > : public opencl_node < tuple, queueing, default_opencl_factory > { - typedef opencl_node < tuple, queueing, default_opencl_factory > base_type; +class opencl_node< tuple > : public opencl_node < tuple, queueing, opencl_info::default_opencl_factory > { + typedef opencl_node < tuple, queueing, opencl_info::default_opencl_factory > base_type; public: typedef typename base_type::kernel_type kernel_type; - opencl_node( opencl_graph &g, const kernel_type& kernel ) - : base_type( g, kernel, g.get_opencl_foundation().get_default_device_selector(), g.get_opencl_foundation().get_default_opencl_factory() ) + opencl_node( graph &g, const kernel_type& kernel ) + : base_type( g, kernel, opencl_info::default_device_selector< opencl_info::default_opencl_factory >(), opencl_info::default_factory() ) {} template - opencl_node( opencl_graph &g, const kernel_type& kernel, DeviceSelector d ) - : base_type( g, kernel, d, g.get_opencl_foundation().get_default_opencl_factory() ) + opencl_node( graph &g, const kernel_type& kernel, DeviceSelector d ) + : base_type( g, kernel, d, opencl_info::default_factory() ) {} }; -} // namespace interface9 - -using interface9::opencl_graph; -using interface9::opencl_node; -using interface9::read_only; -using interface9::read_write; -using interface9::write_only; -using interface9::opencl_buffer; -using interface9::opencl_subbuffer; -using interface9::opencl_device; -using interface9::opencl_device_list; -using interface9::opencl_program; -using interface9::opencl_program_type; -using interface9::dependency_msg; -using interface9::opencl_factory; -using interface9::opencl_range; +} // namespace interfaceX + +using interface11::opencl_node; +using interface11::read_only; +using interface11::read_write; +using interface11::write_only; +using interface11::opencl_buffer; +using interface11::opencl_subbuffer; +using interface11::opencl_device; +using interface11::opencl_device_list; +using interface11::opencl_program; +using interface11::opencl_program_type; +using interface11::opencl_async_msg; +using interface11::opencl_factory; +using interface11::opencl_range; } // namespace flow } // namespace tbb #endif /* __TBB_PREVIEW_OPENCL_NODE */ +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_flow_graph_opencl_node_H_include_area + #endif // __TBB_flow_graph_opencl_node_H diff --git a/inst/include/tbb/gfx_factory.h b/inst/include/tbb/gfx_factory.h deleted file mode 100644 index 76ca3559..00000000 --- a/inst/include/tbb/gfx_factory.h +++ /dev/null @@ -1,359 +0,0 @@ -/* - Copyright (c) 2005-2017 Intel Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - - - -*/ - -#ifndef __TBB_flow_graph_gfx_factory_H -#define __TBB_flow_graph_gfx_factory_H - -#include "tbb/tbb_config.h" - -#if __TBB_PREVIEW_GFX_FACTORY - -#include -#include -#include -#include - -#include -#include -#include - -namespace tbb { - -namespace flow { - -namespace interface9 { - -template -class gfx_buffer; - -namespace gfx_offload { - - typedef GfxTaskId task_id_type; - - //----------------------------------------------------------------------- - // GFX errors checkers. - // For more debug output, set GFX_LOG_OFFLOAD=2 macro - //----------------------------------------------------------------------- - - // TODO: reconsider error handling approach. If exception is the right way - // then need to define and document a specific exception type. - inline void throw_gfx_exception() { - std::string msg = "GFX error occurred: " + std::to_string(_GFX_get_last_error()); - std::cerr << msg << std::endl; - throw msg; - } - - inline void check_enqueue_retcode(task_id_type err) { - if (err == 0) { - throw_gfx_exception(); - } - } - - inline void check_gfx_retcode(task_id_type err) { - if (err != GFX_SUCCESS) { - throw_gfx_exception(); - } - } - - //--------------------------------------------------------------------- - // GFX asynchronous offload and share API - //--------------------------------------------------------------------- - - // Sharing and unsharing data API - template - void share(DataType* p, SizeType n) { check_gfx_retcode(_GFX_share(p, sizeof(*p)*n)); } - template - void unshare(DataType* p) { check_gfx_retcode(_GFX_unshare(p)); } - - // Retrieving array pointer from shared gfx_buffer - // Other types remain the same - template - T* raw_data(gfx_buffer& buffer) { return buffer.data(); } - template - const T* raw_data(const gfx_buffer& buffer) { return buffer.data(); } - template - T& raw_data(T& data) { return data; } - template - const T& raw_data(const T& data) { return data; } - - // Kernel enqueuing on device with arguments - template - task_id_type run_kernel(F ptr, ArgType&... args) { - task_id_type id = _GFX_offload(ptr, raw_data(args)...); - - // Check if something during offload went wrong (ex: driver initialization failure) - gfx_offload::check_enqueue_retcode(id); - - return id; - } - - // Waiting for tasks completion - void wait_for_task(task_id_type id) { check_gfx_retcode(_GFX_wait(id)); } - -} // namespace gfx_offload - -template -class gfx_buffer { -public: - - typedef typename std::vector::iterator iterator; - typedef typename std::vector::const_iterator const_iterator; - - typedef std::size_t size_type; - - gfx_buffer() : my_vector_ptr(std::make_shared< std::vector >()) {} - gfx_buffer(size_type size) : my_vector_ptr(std::make_shared< std::vector >(size)) {} - - T* data() { return &(my_vector_ptr->front()); } - const T* data() const { return &(my_vector_ptr->front()); } - - size_type size() const { return my_vector_ptr->size(); } - - const_iterator cbegin() const { return my_vector_ptr->cbegin(); } - const_iterator cend() const { return my_vector_ptr->cend(); } - iterator begin() { return my_vector_ptr->begin(); } - iterator end() { return my_vector_ptr->end(); } - - T& operator[](size_type pos) { return (*my_vector_ptr)[pos]; } - const T& operator[](size_type pos) const { return (*my_vector_ptr)[pos]; } - -private: - std::shared_ptr< std::vector > my_vector_ptr; -}; - -template -class gfx_async_msg : public tbb::flow::async_msg { -public: - typedef gfx_offload::task_id_type kernel_id_type; - - gfx_async_msg() : my_task_id(0) {} - gfx_async_msg(const T& input_data) : my_data(input_data), my_task_id(0) {} - - T& data() { return my_data; } - const T& data() const { return my_data; } - - void set_task_id(kernel_id_type id) { my_task_id = id; } - kernel_id_type task_id() const { return my_task_id; } - -private: - T my_data; - kernel_id_type my_task_id; -}; - -class gfx_factory { -private: - - // Wrapper for GFX kernel which is just a function - class func_wrapper { - public: - - template - func_wrapper(F ptr) { my_ptr = reinterpret_cast(ptr); } - - template - void operator()(Args&&... args) {} - - operator void*() { return my_ptr; } - - private: - void* my_ptr; - }; - -public: - - // Device specific types - template using async_msg_type = gfx_async_msg; - - typedef func_wrapper kernel_type; - - // Empty device type that is needed for Factory Concept - // but is not used in gfx_factory - typedef struct {} device_type; - - typedef gfx_offload::task_id_type kernel_id_type; - - gfx_factory(tbb::flow::graph& g) : m_graph(g), current_task_id(0) {} - - // Upload data to the device - template - void send_data(device_type /*device*/, Args&... args) { - send_data_impl(args...); - } - - // Run kernel on the device - template - void send_kernel(device_type /*device*/, const kernel_type& kernel, Args&... args) { - // Get packed T data from async_msg and pass it to kernel - kernel_id_type id = gfx_offload::run_kernel(kernel, args.data()...); - - // Set id to async_msg - set_kernel_id(id, args...); - - // Extend the graph lifetime until the callback completion. - m_graph.reserve_wait(); - - // Mutex for future assignment - std::lock_guard lock(future_assignment_mutex); - - // Set callback that waits for kernel execution - callback_future = std::async(std::launch::async, &gfx_factory::callback, this, id, args...); - } - - // Finalization action after the kernel run - template - void finalize(device_type /*device*/, FinalizeFn fn, Args&... /*args*/) { - fn(); - } - - // Empty device selector. - // No way to choose a device with GFX API. - class dummy_device_selector { - public: - device_type operator()(gfx_factory& /*factory*/) { - return device_type(); - } - }; - -private: - - //--------------------------------------------------------------------- - // Callback for kernel result - //--------------------------------------------------------------------- - - template - void callback(kernel_id_type id, Args... args) { - // Waiting for specific tasks id to complete - { - std::lock_guard lock(task_wait_mutex); - if (current_task_id < id) { - gfx_offload::wait_for_task(id); - current_task_id = id; - } - } - - // Get result from device and set to async_msg (args) - receive_data(args...); - - // Data was sent to the graph, release the reference - m_graph.release_wait(); - } - - //--------------------------------------------------------------------- - // send_data() arguments processing - //--------------------------------------------------------------------- - - // GFX buffer shared data with device that will be executed on - template - void share_data(T) {} - - template - void share_data(gfx_buffer& buffer) { - gfx_offload::share(buffer.data(), buffer.size()); - } - - template - void send_arg(T) {} - - template - void send_arg(async_msg_type& msg) { - share_data(msg.data()); - } - - void send_data_impl() {} - - template - void send_data_impl(T& arg, Rest&... args) { - send_arg(arg); - send_data_impl(args...); - } - - //---------------------------------------------------------------------- - // send_kernel() arguments processing - //---------------------------------------------------------------------- - - template - void set_kernel_id_arg(kernel_id_type, T) {} - - template - void set_kernel_id_arg(kernel_id_type id, async_msg_type& msg) { - msg.set_task_id(id); - } - - void set_kernel_id(kernel_id_type) {} - - template - void set_kernel_id(kernel_id_type id, T& arg, Rest&... args) { - set_kernel_id_arg(id, arg); - set_kernel_id(id, args...); - } - - //----------------------------------------------------------------------- - // Arguments processing after kernel execution. - // Unsharing buffers and forwarding results to the graph - //----------------------------------------------------------------------- - - // After kernel execution the data should be unshared - template - void unshare_data(T) {} - - template - void unshare_data(gfx_buffer& buffer) { - gfx_offload::unshare(buffer.data()); - } - - template - void receive_arg(T) {} - - template - void receive_arg(async_msg_type& msg) { - unshare_data(msg.data()); - msg.set(msg.data()); - } - - void receive_data() {} - - template - void receive_data(T& arg, Rest&... args) { - receive_arg(arg); - receive_data(args...); - } - - //----------------------------------------------------------------------- - int current_task_id; - - std::future callback_future; - tbb::flow::graph& m_graph; - - std::mutex future_assignment_mutex; - std::mutex task_wait_mutex; -}; - -} // namespace interface9 - -using interface9::gfx_factory; -using interface9::gfx_buffer; - -} // namespace flow - -} // namespace tbb - -#endif // __TBB_PREVIEW_GFX_FACTORY - -#endif // __TBB_flow_graph_gfx_factory_H diff --git a/inst/include/tbb/global_control.h b/inst/include/tbb/global_control.h index fe742020..bdcd59a5 100644 --- a/inst/include/tbb/global_control.h +++ b/inst/include/tbb/global_control.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,19 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_global_control_H #define __TBB_global_control_H -#if !TBB_PREVIEW_GLOBAL_CONTROL && !__TBB_BUILD -#error Set TBB_PREVIEW_GLOBAL_CONTROL before including global_control.h -#endif - #include "tbb_stddef.h" namespace tbb { @@ -41,8 +33,8 @@ class global_control { global_control(parameter p, size_t value) : my_value(value), my_next(NULL), my_param(p) { __TBB_ASSERT(my_param < parameter_max, "Invalid parameter"); -#if __TBB_WIN8UI_SUPPORT - // For Windows Store* apps it's impossible to set stack size +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + // For Windows 8 Store* apps it's impossible to set stack size if (p==thread_stack_size) return; #elif __TBB_x86_64 && (_WIN32 || _WIN64) @@ -56,8 +48,8 @@ class global_control { ~global_control() { __TBB_ASSERT(my_param < parameter_max, "Invalid parameter. Probably the object was corrupted."); -#if __TBB_WIN8UI_SUPPORT - // For Windows Store* apps it's impossible to set stack size +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + // For Windows 8 Store* apps it's impossible to set stack size if (my_param==thread_stack_size) return; #endif diff --git a/inst/include/tbb/index.html b/inst/include/tbb/index.html index 2eddd450..9ead0e24 100644 --- a/inst/include/tbb/index.html +++ b/inst/include/tbb/index.html @@ -19,7 +19,7 @@

Directories


Up to parent directory

-Copyright © 2005-2017 Intel Corporation. All Rights Reserved. +Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel is a registered trademark or trademark of Intel Corporation or its subsidiaries in the United States and other countries. diff --git a/inst/include/tbb/info.h b/inst/include/tbb/info.h new file mode 100644 index 00000000..84b32092 --- /dev/null +++ b/inst/include/tbb/info.h @@ -0,0 +1,52 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_info_H +#define __TBB_info_H + +#include "tbb_config.h" + +#if __TBB_NUMA_SUPPORT + +#include + +namespace tbb { + namespace internal { + namespace numa_topology { + unsigned nodes_count(); + void fill(int* indexes_array); + int default_concurrency(int node_id); + } //namespace numa_topology + } // namespace internal + + typedef int numa_node_id; + + namespace info { + inline std::vector numa_nodes() { + std::vector nodes_indexes(tbb::internal::numa_topology::nodes_count()); + internal::numa_topology::fill(&nodes_indexes.front()); + return nodes_indexes; + } + + inline int default_concurrency(numa_node_id id = -1) { + return internal::numa_topology::default_concurrency(id); + } + } // namespace info +} // namespace tbb + +#endif /*__TBB_NUMA_SUPPORT*/ + +#endif /*__TBB_info_H*/ diff --git a/inst/include/tbb/internal/_aggregator_impl.h b/inst/include/tbb/internal/_aggregator_impl.h index 40bbd491..6d89c055 100644 --- a/inst/include/tbb/internal/_aggregator_impl.h +++ b/inst/include/tbb/internal/_aggregator_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__aggregator_impl_H @@ -164,7 +160,7 @@ template class aggregating_functor { aggregating_class *fi; public: - aggregating_functor() {} + aggregating_functor() : fi() {} aggregating_functor(aggregating_class *fi_) : fi(fi_) {} void operator()(operation_list* op_list) { fi->handle_operations(op_list); } }; diff --git a/inst/include/tbb/internal/_concurrent_queue_impl.h b/inst/include/tbb/internal/_concurrent_queue_impl.h index a99a68f6..e3bef772 100644 --- a/inst/include/tbb/internal/_concurrent_queue_impl.h +++ b/inst/include/tbb/internal/_concurrent_queue_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__concurrent_queue_impl_H @@ -34,19 +30,8 @@ #include "../tbb_profiling.h" #include #include __TBB_STD_SWAP_HEADER - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - #include -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - namespace tbb { #if !__TBB_TEMPLATE_FRIENDS_BROKEN @@ -525,7 +510,7 @@ concurrent_queue_base_v3::concurrent_queue_base_v3() { __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); + memset(static_cast(my_rep),0,sizeof(concurrent_queue_rep)); my_rep->item_size = item_size; my_rep->items_per_page = item_size<= 8 ? 32 : item_size<= 16 ? 16 : @@ -658,7 +643,7 @@ bool concurrent_queue_iterator_rep::get_item( T*& item, size_t k ) { //! Constness-independent portion of concurrent_queue_iterator. /** @ingroup containers */ template -class concurrent_queue_iterator_base_v3 : no_assign { +class concurrent_queue_iterator_base_v3 { //! Represents concurrent_queue over which we are iterating. /** NULL if one past last element in queue. */ concurrent_queue_iterator_rep* my_rep; @@ -681,8 +666,13 @@ class concurrent_queue_iterator_base_v3 : no_assign { //! Copy constructor concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) - : no_assign(), my_rep(NULL), my_item(NULL) { + : my_rep(NULL), my_item(NULL) { + assign(i); + } + + concurrent_queue_iterator_base_v3& operator=( const concurrent_queue_iterator_base_v3& i ) { assign(i); + return *this; } //! Construct iterator pointing to head of queue. @@ -779,8 +769,8 @@ class concurrent_queue_iterator: public concurrent_queue_iterator_base_v3assign(other); + concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { + concurrent_queue_iterator_base_v3::type>::operator=(other); return *this; } @@ -900,7 +890,7 @@ class concurrent_queue_base_v3: no_copy { //! Get size of queue ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - //! Check if the queue is emtpy + //! Check if the queue is empty bool __TBB_EXPORTED_METHOD internal_empty() const; //! Set the queue capacity @@ -991,6 +981,11 @@ class concurrent_queue_iterator_base_v3 { assign(i); } + concurrent_queue_iterator_base_v3& operator=( const concurrent_queue_iterator_base_v3& i ) { + assign(i); + return *this; + } + //! Obsolete entry point for constructing iterator pointing to head of queue. /** Does not work correctly for SSE types. */ __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ); @@ -1040,8 +1035,8 @@ class concurrent_queue_iterator: public concurrent_queue_iterator_base, {} //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); + concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { + concurrent_queue_iterator_base_v3::operator=(other); return *this; } diff --git a/inst/include/tbb/internal/_concurrent_unordered_impl.h b/inst/include/tbb/internal/_concurrent_unordered_impl.h index e7d324e0..b4bbefa5 100644 --- a/inst/include/tbb/internal/_concurrent_unordered_impl.h +++ b/inst/include/tbb/internal/_concurrent_unordered_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ /* Container implementations in this header are based on PPL implementations @@ -29,12 +25,6 @@ #include "../tbb_stddef.h" -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - #include #include // Need std::pair #include // Need std::equal_to (in ../concurrent_unordered_*.h) @@ -42,10 +32,6 @@ #include // Need std::memset #include __TBB_STD_SWAP_HEADER -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - #include "../atomic.h" #include "../tbb_exception.h" #include "../tbb_allocator.h" @@ -54,7 +40,17 @@ #include #endif +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN + #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1 +#endif + +#include "_allocator_traits.h" #include "_tbb_hash_compare_impl.h" +#include "_template_helpers.h" + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT +#include "_node_handle_impl.h" +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT namespace tbb { namespace interface5 { @@ -88,6 +84,11 @@ class flist_iterator : public std::iterator flist_iterator( const flist_iterator &other ) : my_node_ptr(other.my_node_ptr) {} + flist_iterator& operator=( const flist_iterator &other ) { + my_node_ptr = other.my_node_ptr; + return *this; + } + reference operator*() const { return my_node_ptr->my_element; } pointer operator->() const { return &**this; } @@ -134,6 +135,8 @@ class solist_iterator : public flist_iterator friend class split_ordered_list; template friend class solist_iterator; + template + friend class concurrent_unordered_base; template friend bool operator==( const solist_iterator &i, const solist_iterator &j ); template @@ -149,9 +152,15 @@ class solist_iterator : public flist_iterator typedef typename Solist::reference reference; solist_iterator() {} - solist_iterator(const solist_iterator &other ) + solist_iterator( const solist_iterator &other ) : base_type(other), my_list_ptr(other.my_list_ptr) {} + solist_iterator& operator=( const solist_iterator &other ) { + base_type::my_node_ptr = other.get_node_ptr(); + my_list_ptr = other.my_list_ptr; + return *this; + } + reference operator*() const { return this->base_type::operator*(); } @@ -195,17 +204,20 @@ class split_ordered_list { public: typedef split_ordered_list self_type; - typedef typename Allocator::template rebind::other allocator_type; + + typedef typename tbb::internal::allocator_rebind::type allocator_type; + struct node; typedef node *nodeptr_t; - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::value_type value_type; + typedef typename tbb::internal::allocator_traits::value_type value_type; + typedef typename tbb::internal::allocator_traits::size_type size_type; + typedef typename tbb::internal::allocator_traits::difference_type difference_type; + typedef typename tbb::internal::allocator_traits::pointer pointer; + typedef typename tbb::internal::allocator_traits::const_pointer const_pointer; + // No support for reference/const_reference in allocator traits + typedef value_type& reference; + typedef const value_type& const_reference; typedef solist_iterator const_iterator; typedef solist_iterator iterator; @@ -230,6 +242,15 @@ class split_ordered_list return my_order_key; } + // get() and value() is a common interface for getting access to node`s element (required by node_handle) + value_type* storage() { + return reinterpret_cast(&my_element); + } + + value_type& value() { + return *storage(); + } + // Inserts the new element in the list in an atomic fashion nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node) { @@ -574,27 +595,49 @@ class split_ordered_list } - // This erase function can handle both real and dummy nodes - void erase_node(raw_iterator previous, raw_const_iterator& where) - { + nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator& where) { nodeptr_t pnode = (where++).get_node_ptr(); nodeptr_t prevnode = previous.get_node_ptr(); __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators"); prevnode->my_next = pnode->my_next; + return pnode; + } + // This erase function can handle both real and dummy nodes + void erase_node(raw_iterator previous, raw_const_iterator& where, + /*allow_destroy*/tbb::internal::true_type) + { + nodeptr_t pnode = erase_node_impl(previous, where); destroy_node(pnode); } + void erase_node(raw_iterator previous, raw_const_iterator& where, + /*allow_destroy*/tbb::internal::false_type) + { + erase_node_impl(previous, where); + } + + void erase_node(raw_iterator previous, raw_const_iterator& where) { + erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type()); + } + // Erase the element (previous node needs to be passed because this is a forward only list) - iterator erase_node(raw_iterator previous, const_iterator where) + template + iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy) { raw_const_iterator it = where; - erase_node(previous, it); + erase_node(previous, it, AllowDestroy()); my_element_count--; return get_iterator(first_real_iterator(it)); } + iterator erase_node(raw_iterator previous, const_iterator& where) { + return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type()); + } + + + // Move all elements from the passed in split-ordered list to this one void move_all(self_type& source) { @@ -649,7 +692,7 @@ class split_ordered_list #endif } - typename allocator_type::template rebind::other my_node_allocator; // allocator object for nodes + typename tbb::internal::allocator_rebind::type my_node_allocator; // allocator object for nodes size_type my_element_count; // Total item count, not counting dummy nodes nodeptr_t my_head; // pointer to head node }; @@ -671,12 +714,15 @@ class concurrent_unordered_base : public Traits typedef typename Traits::allocator_type allocator_type; typedef typename hash_compare::hasher hasher; typedef typename hash_compare::key_equal key_equal; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; + + typedef typename tbb::internal::allocator_traits::size_type size_type; + typedef typename tbb::internal::allocator_traits::difference_type difference_type; + typedef typename tbb::internal::allocator_traits::pointer pointer; + typedef typename tbb::internal::allocator_traits::const_pointer const_pointer; + // No support for reference/const_reference in allocator + typedef typename allocator_type::value_type& reference; + typedef const typename allocator_type::value_type& const_reference; + typedef split_ordered_list solist_t; typedef typename solist_t::nodeptr_t nodeptr_t; // Iterators that walk the entire split-order list, including dummy nodes @@ -686,12 +732,19 @@ class concurrent_unordered_base : public Traits typedef typename solist_t::const_iterator const_iterator; typedef iterator local_iterator; typedef const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename Traits::node_type node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT using Traits::my_hash_compare; using Traits::get_key; using Traits::allow_multimapping; static const size_type initial_bucket_number = 8; // Initial number of buckets + private: + template + friend class concurrent_unordered_base; + typedef std::pair pairii_t; typedef std::pair paircc_t; @@ -737,8 +790,10 @@ class concurrent_unordered_base : public Traits #if __TBB_CPP11_RVALUE_REF_PRESENT concurrent_unordered_base(concurrent_unordered_base&& right) - : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) + : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()), + my_maximum_bucket_size(float(initial_bucket_load)) { + my_number_of_buckets = initial_bucket_number; internal_init(); swap(right); } @@ -750,6 +805,8 @@ class concurrent_unordered_base : public Traits internal_init(); if (a == right.get_allocator()){ + my_number_of_buckets = initial_bucket_number; + my_maximum_bucket_size = float(initial_bucket_load); this->swap(right); }else{ my_maximum_bucket_size = right.my_maximum_bucket_size; @@ -830,6 +887,49 @@ class concurrent_unordered_base : public Traits internal_clear(); } +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void internal_merge(SourceType& source) { + typedef typename SourceType::iterator source_iterator; + __TBB_STATIC_ASSERT((tbb::internal::is_same_type::value), + "Incompatible containers cannot be merged"); + + for(source_iterator it = source.begin(); it != source.end();) { + source_iterator where = it++; + if (allow_multimapping || find(get_key(*where)) == end()) { + std::pair extract_result = source.internal_extract(where); + + // Remember the old order key + sokey_t old_order_key = extract_result.first.my_node->get_order_key(); + + // If the insertion fails, it returns ownership of the node to extract_result.first + // extract_result.first remains valid node handle + if (!insert(std::move(extract_result.first)).second) { + raw_iterator next = extract_result.second; + raw_iterator current = next++; + + // Revert order key to old value + extract_result.first.my_node->init(old_order_key); + + __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(), + "Wrong nodes order in source container"); + __TBB_ASSERT(next==source.my_solist.raw_end() || + extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(), + "Wrong nodes order in source container"); + + size_t new_count = 0;// To use try_insert() + bool insert_result = + source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second; + __TBB_ASSERT_EX(insert_result, "Return to source must be successful. " + "Changing source container while merging is unsafe."); + } + extract_result.first.deactivate(); + } + } + } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + public: allocator_type get_allocator() const { return my_solist.get_allocator(); @@ -966,7 +1066,8 @@ class concurrent_unordered_base : public Traits // Modifiers std::pair insert(const value_type& value) { - return internal_insert(value); + return internal_insert(value); } iterator insert(const_iterator, const value_type& value) { @@ -976,23 +1077,43 @@ class concurrent_unordered_base : public Traits #if __TBB_CPP11_RVALUE_REF_PRESENT std::pair insert(value_type&& value) { - return internal_insert(std::move(value)); + return internal_insert(std::move(value)); } iterator insert(const_iterator, value_type&& value) { // Ignore hint return insert(std::move(value)).first; } +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + std::pair insert(node_type&& nh) { + if (!nh.empty()) { + nodeptr_t handled_node = nh.my_node; + std::pair insert_result = + internal_insert + (handled_node->my_element, handled_node); + if (insert_result.second) + nh.deactivate(); + return insert_result; + } + return std::pair(end(), false); + } -#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + iterator insert(const_iterator, node_type&& nh) { + return insert(std::move(nh)).first; + } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT template std::pair emplace(Args&&... args) { nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward(args)...); - const sokey_t hashed_element_key = (sokey_t) my_hash_compare(get_key(pnode->my_element)); - const sokey_t order_key = split_order_key_regular(hashed_element_key); - pnode->init(order_key); - return internal_insert(pnode->my_element, pnode); + return internal_insert(pnode->my_element, pnode); } template @@ -1000,9 +1121,8 @@ class concurrent_unordered_base : public Traits // Ignore hint return emplace(tbb::internal::forward(args)...).first; } +#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT -#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT -#endif // __TBB_CPP11_RVALUE_REF_PRESENT template void insert(Iterator first, Iterator last) { @@ -1034,9 +1154,21 @@ class concurrent_unordered_base : public Traits return item_count; } +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + node_type unsafe_extract(const_iterator where) { + return internal_extract(where).first; + } + + node_type unsafe_extract(const key_type& key) { + pairii_t where = equal_range(key); + if (where.first == end()) return node_type(); // element was not found + return internal_extract(where.first).first; + } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + void swap(concurrent_unordered_base& right) { if (this != &right) { - std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here + std::swap(my_hash_compare, right.my_hash_compare); my_solist.swap(right.my_solist); internal_swap_buckets(right); std::swap(my_number_of_buckets, right.my_number_of_buckets); @@ -1209,8 +1341,8 @@ class concurrent_unordered_base : public Traits // Initialize the hash and keep the first bucket open void internal_init() { - // Allocate an array of segment pointers - memset(my_buckets, 0, pointers_per_table * sizeof(void *)); + // Initialize the array of segment pointers + memset(my_buckets, 0, sizeof(my_buckets)); // Initialize bucket 0 raw_iterator dummy_node = my_solist.raw_begin(); @@ -1268,7 +1400,7 @@ class concurrent_unordered_base : public Traits } // Insert an element in the hash given its value - template + template std::pair internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL) { const key_type *pkey = &get_key(value); @@ -1279,6 +1411,11 @@ class concurrent_unordered_base : public Traits raw_iterator last = my_solist.raw_end(); __TBB_ASSERT(previous != last, "Invalid head node"); + if (pnode) { + // Set new order_key to node + pnode->init(order_key); + } + // First node is a dummy node for (raw_iterator where = previous;;) { @@ -1317,7 +1454,7 @@ class concurrent_unordered_base : public Traits else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation { // Element already in the list, return it - if (pnode) + if (pnode && AllowDestroy::value) my_solist.destroy_node(pnode); return std::pair(my_solist.get_iterator(where), false); } @@ -1363,14 +1500,33 @@ class concurrent_unordered_base : public Traits __TBB_ASSERT(previous != last, "Invalid head node"); // First node is a dummy node - for (raw_iterator where = previous; ; previous = where) { + for (raw_iterator where = previous; where != last; previous = where) { ++where; - if (where == last) - return end(); - else if (my_solist.get_iterator(where) == it) + if (my_solist.get_iterator(where) == it) return my_solist.erase_node(previous, it); } + return end(); + } + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + std::pair internal_extract(const_iterator it) { + sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it))); + raw_iterator previous = prepare_bucket(hash_key); + raw_iterator last = my_solist.raw_end(); + __TBB_ASSERT(previous != last, "Invalid head node"); + + for(raw_iterator where = previous; where != last; previous = where) { + ++where; + if (my_solist.get_iterator(where) == it) { + const_iterator result = it; + my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type()); + return std::pair( node_type(result.get_node_ptr()), + previous); + } + } + return std::pair(node_type(), end()); } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT // Return the [begin, end) pair of iterators with the same key values. // This operation makes sense only if mapping is many-to-one. @@ -1478,7 +1634,7 @@ class concurrent_unordered_base : public Traits if (my_buckets[segment] == NULL) { size_type sz = segment_size(segment); raw_iterator * new_segment = my_allocator.allocate(sz); - std::memset((void*) new_segment, 0, sz*sizeof(raw_iterator)); + std::memset(static_cast(new_segment), 0, sz*sizeof(raw_iterator)); if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL) my_allocator.deallocate(new_segment, sz); @@ -1513,7 +1669,7 @@ class concurrent_unordered_base : public Traits // Shared variables atomic my_number_of_buckets; // Current table size solist_t my_solist; // List where all the elements are kept - typename allocator_type::template rebind::other my_allocator; // Allocator object for segments + typename tbb::internal::allocator_rebind::type my_allocator; // Allocator object for segments float my_maximum_bucket_size; // Maximum size of the bucket atomic my_buckets[pointers_per_table]; // The segment table }; diff --git a/inst/include/tbb/internal/_flow_graph_async_msg_impl.h b/inst/include/tbb/internal/_flow_graph_async_msg_impl.h index 7d2c3aef..1995c92c 100644 --- a/inst/include/tbb/internal/_flow_graph_async_msg_impl.h +++ b/inst/include/tbb/internal/_flow_graph_async_msg_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_async_msg_impl_H @@ -25,101 +21,28 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif -// included in namespace tbb::flow::interfaceX (in flow_graph.h) - -template< typename T > class async_msg; - namespace internal { -template< typename T, typename = void > -struct async_helpers { - typedef async_msg async_type; - typedef T filtered_type; - - static const bool is_async_type = false; - - static const void* to_void_ptr(const T& t) { - return static_cast(&t); - } - - static void* to_void_ptr(T& t) { - return static_cast(&t); - } - - static const T& from_void_ptr(const void* p) { - return *static_cast(p); - } - - static T& from_void_ptr(void* p) { - return *static_cast(p); - } - - static task* try_put_task_wrapper_impl( receiver* const this_recv, const void *p, bool is_async ) { - if ( is_async ) { - // This (T) is NOT async and incoming 'A t' IS async - // Get data from async_msg - const async_msg& msg = async_helpers< async_msg >::from_void_ptr(p); - task* const new_task = msg.my_storage->subscribe(*this_recv); - // finalize() must be called after subscribe() because set() can be called in finalize() - // and 'this_recv' client must be subscribed by this moment - msg.finalize(); - return new_task; - } else { - // Incoming 't' is NOT async - return this_recv->try_put_task( from_void_ptr(p) ); - } - } -}; - -template< typename T > -struct async_helpers< T, typename std::enable_if< std::is_base_of, T>::value >::type > { - typedef T async_type; - typedef typename T::async_msg_data_type filtered_type; - - static const bool is_async_type = true; - - // Receiver-classes use const interfaces - static const void* to_void_ptr(const T& t) { - return static_cast( &static_cast&>(t) ); - } - - static void* to_void_ptr(T& t) { - return static_cast( &static_cast&>(t) ); - } - - // Sender-classes use non-const interfaces - static const T& from_void_ptr(const void* p) { - return *static_cast( static_cast*>(p) ); - } - - static T& from_void_ptr(void* p) { - return *static_cast( static_cast*>(p) ); - } - - // Used in receiver class - static task* try_put_task_wrapper_impl(receiver* const this_recv, const void *p, bool is_async) { - if ( is_async ) { - // Both are async - return this_recv->try_put_task( from_void_ptr(p) ); - } else { - // This (T) is async and incoming 'X t' is NOT async - // Create async_msg for X - const filtered_type& t = async_helpers::from_void_ptr(p); - const T msg(t); - return this_recv->try_put_task(msg); - } - } -}; - template class async_storage { public: typedef receiver async_storage_client; - async_storage() { my_data_ready.store(false); } + async_storage() : my_graph(nullptr) { + my_data_ready.store(false); + } + + ~async_storage() { + // Release reference to the graph if async_storage + // was destructed before set() call + if (my_graph) { + my_graph->release_wait(); + my_graph = nullptr; + } + } template - async_storage(C&& data) : my_data( std::forward(data) ) { + async_storage(C&& data) : my_graph(nullptr), my_data( std::forward(data) ) { using namespace tbb::internal; __TBB_STATIC_ASSERT( (is_same_type::type, typename strip::type>::value), "incoming type must be T" ); @@ -148,10 +71,16 @@ class async_storage { (*it)->try_put(my_data); } + // Data was sent, release reference to the graph + if (my_graph) { + my_graph->release_wait(); + my_graph = nullptr; + } + return true; } - task* subscribe(async_storage_client& client) { + task* subscribe(async_storage_client& client, graph& g) { if (! my_data_ready.load()) { tbb::spin_mutex::scoped_lock locker(my_mutex); @@ -163,6 +92,10 @@ class async_storage { } #endif // TBB_USE_ASSERT + // Increase graph lifetime + my_graph = &g; + my_graph->reserve_wait(); + // Subscribe my_clients.push_back(&client); return SUCCESSFULLY_ENQUEUED; @@ -174,11 +107,10 @@ class async_storage { } private: + graph* my_graph; tbb::spin_mutex my_mutex; - tbb::atomic my_data_ready; T my_data; - typedef std::vector subscriber_list_type; subscriber_list_type my_clients; }; diff --git a/inst/include/tbb/internal/_flow_graph_body_impl.h b/inst/include/tbb/internal/_flow_graph_body_impl.h index 54f37411..5d3ad6a0 100644 --- a/inst/include/tbb/internal/_flow_graph_body_impl.h +++ b/inst/include/tbb/internal/_flow_graph_body_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_body_impl_H @@ -33,11 +29,46 @@ typedef tbb::internal::uint64_t tag_value; using tbb::internal::strip; +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + +template struct Policy {}; + +template struct has_policy; + +template +struct has_policy : + tbb::internal::bool_constant::value || + has_policy::value> {}; + +template +struct has_policy : + tbb::internal::bool_constant::value> {}; + +template +struct has_policy > : has_policy {}; + +#else + +template struct Policy {}; + +template +struct has_policy : tbb::internal::bool_constant::value> {}; + +template +struct has_policy > : has_policy {}; + +template +struct has_policy > : + tbb::internal::bool_constant::value || has_policy::value> {}; + +#endif + namespace graph_policy_namespace { struct rejecting { }; struct reserving { }; struct queueing { }; + struct lightweight { }; // K == type of field used for key-matching. Each tag-matching port will be provided // functor that, given an object accepted by the port, will return the @@ -52,6 +83,10 @@ namespace graph_policy_namespace { // old tag_matching join's new specifier typedef key_matching tag_matching; + // Aliases for Policy combinations + typedef interface11::internal::Policy queueing_lightweight; + typedef interface11::internal::Policy rejecting_lightweight; + } // namespace graph_policy_namespace // -------------- function_body containers ---------------------- @@ -232,13 +267,20 @@ class type_to_key_function_body_leaf : public type_to_key_funct //! A task that calls a node's forward_task function template< typename NodeType > -class forward_task_bypass : public task { +class forward_task_bypass : public graph_task { NodeType &my_node; public: - forward_task_bypass( NodeType &n ) : my_node(n) {} + forward_task_bypass( NodeType &n +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + , node_priority_t node_priority = no_priority + ) : graph_task(node_priority), +#else + ) : +#endif + my_node(n) {} task *execute() __TBB_override { task * new_task = my_node.forward_task(); @@ -250,14 +292,21 @@ class forward_task_bypass : public task { //! A task that calls a node's apply_body_bypass function, passing in an input of type Input // return the task* unless it is SUCCESSFULLY_ENQUEUED, in which case return NULL template< typename NodeType, typename Input > -class apply_body_task_bypass : public task { +class apply_body_task_bypass : public graph_task { NodeType &my_node; Input my_input; public: - apply_body_task_bypass( NodeType &n, const Input &i ) : my_node(n), my_input(i) {} + apply_body_task_bypass( NodeType &n, const Input &i +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + , node_priority_t node_priority = no_priority + ) : graph_task(node_priority), +#else + ) : +#endif + my_node(n), my_input(i) {} task *execute() __TBB_override { task * next_task = my_node.apply_body_bypass( my_input ); @@ -268,7 +317,7 @@ class apply_body_task_bypass : public task { //! A task that calls a node's apply_body_bypass function with no input template< typename NodeType > -class source_task_bypass : public task { +class source_task_bypass : public graph_task { NodeType &my_node; @@ -291,18 +340,90 @@ struct empty_body { Output operator()( const Input & ) const { return Output(); } }; +template +class decrementer; + +template +class decrementer::value, void>::type + > : public receiver, tbb::internal::no_copy { + T* my_node; +protected: + + task* try_put_task( const DecrementType& value ) __TBB_override { + task* result = my_node->decrement_counter( value ); + if( !result ) + result = SUCCESSFULLY_ENQUEUED; + return result; + } + + graph& graph_reference() const __TBB_override { + return my_node->my_graph; + } + + template friend class tbb::flow::interface11::limiter_node; + void reset_receiver( reset_flags f ) __TBB_override { +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + if (f & rf_clear_edges) + my_built_predecessors.clear(); +#else + tbb::internal::suppress_unused_warning( f ); +#endif + } + +public: + // Since decrementer does not make use of possibly unconstructed owner inside its + // constructor, my_node can be directly initialized with 'this' pointer passed from the + // owner, hence making method 'set_owner' needless. + decrementer() : my_node(NULL) {} + void set_owner( T *node ) { my_node = node; } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + spin_mutex my_mutex; + //! The predecessor type for this node + typedef typename receiver::predecessor_type predecessor_type; + + typedef internal::edge_container built_predecessors_type; + typedef typename built_predecessors_type::edge_list_type predecessor_list_type; + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + void internal_add_built_predecessor( predecessor_type &s) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.add_edge( s ); + } + + void internal_delete_built_predecessor( predecessor_type &s) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.delete_edge(s); + } + + void copy_predecessors( predecessor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.copy_edges(v); + } + + size_t predecessor_count() __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + return my_built_predecessors.edge_count(); + } +protected: + built_predecessors_type my_built_predecessors; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ +}; + template -class decrementer : public continue_receiver, tbb::internal::no_copy { +class decrementer : public continue_receiver, tbb::internal::no_copy { T *my_node; task *execute() __TBB_override { - return my_node->decrement_counter(); + return my_node->decrement_counter( 1 ); } protected: - graph& graph_reference() __TBB_override { + graph& graph_reference() const __TBB_override { return my_node->my_graph; } @@ -310,7 +431,15 @@ class decrementer : public continue_receiver, tbb::internal::no_copy { typedef continue_msg input_type; typedef continue_msg output_type; - decrementer( int number_of_predecessors = 0 ) : continue_receiver( number_of_predecessors ) { } + decrementer( int number_of_predecessors = 0 ) + : continue_receiver( + __TBB_FLOW_GRAPH_PRIORITY_ARG1(number_of_predecessors, tbb::flow::internal::no_priority) + ) + // Since decrementer does not make use of possibly unconstructed owner inside its + // constructor, my_node can be directly initialized with 'this' pointer passed from the + // owner, hence making method 'set_owner' needless. + , my_node(NULL) + {} void set_owner( T *node ) { my_node = node; } }; diff --git a/inst/include/tbb/internal/_flow_graph_cache_impl.h b/inst/include/tbb/internal/_flow_graph_cache_impl.h index 7fac84e8..b670ae65 100644 --- a/inst/include/tbb/internal/_flow_graph_cache_impl.h +++ b/inst/include/tbb/internal/_flow_graph_cache_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_cache_impl_H @@ -57,12 +53,12 @@ class node_cache { void clear() { while( !my_q.empty()) (void)my_q.pop(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION my_built_predecessors.clear(); #endif } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef edge_container built_predecessors_type; built_predecessors_type &built_predecessors() { return my_built_predecessors; } @@ -86,14 +82,14 @@ class node_cache { typename mutex_type::scoped_lock lock(my_mutex); return (size_t)(my_built_predecessors.edge_count()); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: typedef M mutex_type; mutex_type my_mutex; std::queue< T * > my_q; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_predecessors_type my_built_predecessors; #endif @@ -189,7 +185,7 @@ class predecessor_cache : public node_cache< sender, M > { protected: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION using node_cache< predecessor_type, M >::my_built_predecessors; #endif successor_type *my_owner; @@ -290,7 +286,7 @@ class successor_cache : tbb::internal::no_copy { typedef sender owner_type; #endif // __TBB_PREVIEW_ASYNC_MSG typedef std::list< pointer_type > successors_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION edge_container my_built_successors; #endif successors_type my_successors; @@ -298,7 +294,7 @@ class successor_cache : tbb::internal::no_copy { owner_type *my_owner; public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename edge_container::edge_list_type successor_list_type; edge_container &built_successors() { return my_built_successors; } @@ -323,7 +319,7 @@ class successor_cache : tbb::internal::no_copy { return my_built_successors.edge_count(); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ successor_cache( ) : my_owner(NULL) {} @@ -354,7 +350,7 @@ class successor_cache : tbb::internal::no_copy { void clear() { my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION my_built_successors.clear(); #endif } @@ -365,11 +361,11 @@ class successor_cache : tbb::internal::no_copy { }; // successor_cache //! An abstract cache of successors, specialized to continue_msg -template<> -class successor_cache< continue_msg > : tbb::internal::no_copy { +template +class successor_cache< continue_msg, M > : tbb::internal::no_copy { protected: - typedef spin_rw_mutex mutex_type; + typedef M mutex_type; mutex_type my_mutex; #if __TBB_PREVIEW_ASYNC_MSG @@ -381,7 +377,7 @@ class successor_cache< continue_msg > : tbb::internal::no_copy { #endif // __TBB_PREVIEW_ASYNC_MSG typedef std::list< pointer_type > successors_type; successors_type my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION edge_container my_built_successors; typedef edge_container::edge_list_type successor_list_type; #endif @@ -390,31 +386,31 @@ class successor_cache< continue_msg > : tbb::internal::no_copy { public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION edge_container &built_successors() { return my_built_successors; } void internal_add_built_successor( successor_type &r) { - mutex_type::scoped_lock l(my_mutex, true); + typename mutex_type::scoped_lock l(my_mutex, true); my_built_successors.add_edge( r ); } void internal_delete_built_successor( successor_type &r) { - mutex_type::scoped_lock l(my_mutex, true); + typename mutex_type::scoped_lock l(my_mutex, true); my_built_successors.delete_edge(r); } void copy_successors( successor_list_type &v) { - mutex_type::scoped_lock l(my_mutex, false); + typename mutex_type::scoped_lock l(my_mutex, false); my_built_successors.copy_edges(v); } size_t successor_count() { - mutex_type::scoped_lock l(my_mutex,false); + typename mutex_type::scoped_lock l(my_mutex,false); return my_built_successors.edge_count(); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ successor_cache( ) : my_owner(NULL) {} @@ -423,7 +419,7 @@ class successor_cache< continue_msg > : tbb::internal::no_copy { virtual ~successor_cache() {} void register_successor( successor_type &r ) { - mutex_type::scoped_lock l(my_mutex, true); + typename mutex_type::scoped_lock l(my_mutex, true); my_successors.push_back( &r ); if ( my_owner && r.is_continue_receiver() ) { r.register_predecessor( *my_owner ); @@ -431,7 +427,7 @@ class successor_cache< continue_msg > : tbb::internal::no_copy { } void remove_successor( successor_type &r ) { - mutex_type::scoped_lock l(my_mutex, true); + typename mutex_type::scoped_lock l(my_mutex, true); for ( successors_type::iterator i = my_successors.begin(); i != my_successors.end(); ++i ) { if ( *i == & r ) { @@ -446,13 +442,13 @@ class successor_cache< continue_msg > : tbb::internal::no_copy { } bool empty() { - mutex_type::scoped_lock l(my_mutex, false); + typename mutex_type::scoped_lock l(my_mutex, false); return my_successors.empty(); } void clear() { my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION my_built_successors.clear(); #endif } @@ -508,6 +504,40 @@ class broadcast_cache : public successor_cache { return last_task; } + // call try_put_task and return list of received tasks +#if __TBB_PREVIEW_ASYNC_MSG + template + bool gather_successful_try_puts( const X &t, task_list &tasks ) { +#else + bool gather_successful_try_puts( const T &t, task_list &tasks ) { +#endif // __TBB_PREVIEW_ASYNC_MSG + bool upgraded = true; + bool is_at_least_one_put_successful = false; + typename mutex_type::scoped_lock l(this->my_mutex, upgraded); + typename successors_type::iterator i = this->my_successors.begin(); + while ( i != this->my_successors.end() ) { + task * new_task = (*i)->try_put_task(t); + if(new_task) { + ++i; + if(new_task != SUCCESSFULLY_ENQUEUED) { + tasks.push_back(*new_task); + } + is_at_least_one_put_successful = true; + } + else { // failed + if ( (*i)->register_predecessor(*this->my_owner) ) { + if (!upgraded) { + l.upgrade_to_writer(); + upgraded = true; + } + i = this->my_successors.erase(i); + } else { + ++i; + } + } + } + return is_at_least_one_put_successful; + } }; //! A cache of successors that are put in a round-robin fashion diff --git a/inst/include/tbb/internal/_flow_graph_impl.h b/inst/include/tbb/internal/_flow_graph_impl.h index 92278caf..2f18676e 100644 --- a/inst/include/tbb/internal/_flow_graph_impl.h +++ b/inst/include/tbb/internal/_flow_graph_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,828 +12,536 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#ifndef __TBB_flow_graph_impl_H +#define __TBB_flow_graph_impl_H +#include "../tbb_stddef.h" +#include "../task.h" +#include "../task_arena.h" +#include "../flow_graph_abstractions.h" +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#include "../concurrent_priority_queue.h" +#endif -*/ - -#ifndef __TBB__flow_graph_impl_H -#define __TBB__flow_graph_impl_H +#include -#ifndef __TBB_flow_graph_H -#error Do not #include this internal file directly; use public TBB headers instead. +#if TBB_DEPRECATED_FLOW_ENQUEUE +#define FLOW_SPAWN(a) tbb::task::enqueue((a)) +#else +#define FLOW_SPAWN(a) tbb::task::spawn((a)) #endif -// included in namespace tbb::flow::interfaceX (in flow_graph.h) +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#define __TBB_FLOW_GRAPH_PRIORITY_EXPR( expr ) expr +#define __TBB_FLOW_GRAPH_PRIORITY_ARG0( priority ) , priority +#define __TBB_FLOW_GRAPH_PRIORITY_ARG1( arg1, priority ) arg1, priority +#else +#define __TBB_FLOW_GRAPH_PRIORITY_EXPR( expr ) +#define __TBB_FLOW_GRAPH_PRIORITY_ARG0( priority ) +#define __TBB_FLOW_GRAPH_PRIORITY_ARG1( arg1, priority ) arg1 +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + +#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR +#define __TBB_DEPRECATED_LIMITER_EXPR( expr ) expr +#define __TBB_DEPRECATED_LIMITER_ARG2( arg1, arg2 ) arg1, arg2 +#define __TBB_DEPRECATED_LIMITER_ARG4( arg1, arg2, arg3, arg4 ) arg1, arg3, arg4 +#else +#define __TBB_DEPRECATED_LIMITER_EXPR( expr ) +#define __TBB_DEPRECATED_LIMITER_ARG2( arg1, arg2 ) arg1 +#define __TBB_DEPRECATED_LIMITER_ARG4( arg1, arg2, arg3, arg4 ) arg1, arg2 +#endif // TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR + +namespace tbb { +namespace flow { namespace internal { +static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1; +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +typedef unsigned int node_priority_t; +static const node_priority_t no_priority = node_priority_t(0); +#endif +} - typedef tbb::internal::uint64_t tag_value; +namespace interface10 { +class graph; +} - using tbb::internal::strip; +namespace interface11 { - namespace graph_policy_namespace { +using tbb::flow::internal::SUCCESSFULLY_ENQUEUED; - struct rejecting { }; - struct reserving { }; - struct queueing { }; +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +using tbb::flow::internal::node_priority_t; +using tbb::flow::internal::no_priority; +//! Base class for tasks generated by graph nodes. +struct graph_task : public task { + graph_task( node_priority_t node_priority = no_priority ) : priority( node_priority ) {} + node_priority_t priority; +}; +#else +typedef task graph_task; +#endif /* __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES */ - // K == type of field used for key-matching. Each tag-matching port will be provided - // functor that, given an object accepted by the port, will return the - /// field of type K being used for matching. - template::type > > - struct key_matching { - typedef K key_type; - typedef typename strip::type base_key_type; - typedef KHash hash_compare_type; - }; +class graph_node; - // old tag_matching join's new specifier - typedef key_matching tag_matching; +template +class graph_iterator { + friend class tbb::flow::interface10::graph; + friend class graph_node; +public: + typedef size_t size_type; + typedef GraphNodeType value_type; + typedef GraphNodeType* pointer; + typedef GraphNodeType& reference; + typedef const GraphNodeType& const_reference; + typedef std::forward_iterator_tag iterator_category; + + //! Default constructor + graph_iterator() : my_graph(NULL), current_node(NULL) {} + + //! Copy constructor + graph_iterator(const graph_iterator& other) : + my_graph(other.my_graph), current_node(other.current_node) + {} + + //! Assignment + graph_iterator& operator=(const graph_iterator& other) { + if (this != &other) { + my_graph = other.my_graph; + current_node = other.current_node; + } + return *this; } -// -------------- function_body containers ---------------------- + //! Dereference + reference operator*() const; - //! A functor that takes no input and generates a value of type Output - template< typename Output > - class source_body : tbb::internal::no_assign { - public: - virtual ~source_body() {} - virtual bool operator()(Output &output) = 0; - virtual source_body* clone() = 0; - }; - - //! The leaf for source_body - template< typename Output, typename Body> - class source_body_leaf : public source_body { - public: - source_body_leaf( const Body &_body ) : body(_body) { } - bool operator()(Output &output) __TBB_override { return body( output ); } - source_body_leaf* clone() __TBB_override { - return new source_body_leaf< Output, Body >(body); - } - Body get_body() { return body; } - private: - Body body; - }; - - //! A functor that takes an Input and generates an Output - template< typename Input, typename Output > - class function_body : tbb::internal::no_assign { - public: - virtual ~function_body() {} - virtual Output operator()(const Input &input) = 0; - virtual function_body* clone() = 0; - }; - - //! the leaf for function_body - template - class function_body_leaf : public function_body< Input, Output > { - public: - function_body_leaf( const B &_body ) : body(_body) { } - Output operator()(const Input &i) __TBB_override { return body(i); } - B get_body() { return body; } - function_body_leaf* clone() __TBB_override { - return new function_body_leaf< Input, Output, B >(body); - } - private: - B body; - }; + //! Dereference + pointer operator->() const; - //! the leaf for function_body specialized for Input and output of continue_msg - template - class function_body_leaf< continue_msg, continue_msg, B> : public function_body< continue_msg, continue_msg > { - public: - function_body_leaf( const B &_body ) : body(_body) { } - continue_msg operator()( const continue_msg &i ) __TBB_override { - body(i); - return i; - } - B get_body() { return body; } - function_body_leaf* clone() __TBB_override { - return new function_body_leaf< continue_msg, continue_msg, B >(body); - } - private: - B body; - }; - - //! the leaf for function_body specialized for Output of continue_msg - template - class function_body_leaf< Input, continue_msg, B> : public function_body< Input, continue_msg > { - public: - function_body_leaf( const B &_body ) : body(_body) { } - continue_msg operator()(const Input &i) __TBB_override { - body(i); - return continue_msg(); - } - B get_body() { return body; } - function_body_leaf* clone() __TBB_override { - return new function_body_leaf< Input, continue_msg, B >(body); - } - private: - B body; - }; - - //! the leaf for function_body specialized for Input of continue_msg - template - class function_body_leaf< continue_msg, Output, B > : public function_body< continue_msg, Output > { - public: - function_body_leaf( const B &_body ) : body(_body) { } - Output operator()(const continue_msg &i) __TBB_override { - return body(i); - } - B get_body() { return body; } - function_body_leaf* clone() __TBB_override { - return new function_body_leaf< continue_msg, Output, B >(body); - } - private: - B body; - }; - - //! function_body that takes an Input and a set of output ports - template - class multifunction_body : tbb::internal::no_assign { - public: - virtual ~multifunction_body () {} - virtual void operator()(const Input &/* input*/, OutputSet &/*oset*/) = 0; - virtual multifunction_body* clone() = 0; - virtual void* get_body_ptr() = 0; - }; + //! Equality + bool operator==(const graph_iterator& other) const { + return ((my_graph == other.my_graph) && (current_node == other.current_node)); + } - //! leaf for multifunction. OutputSet can be a std::tuple or a vector. - template - class multifunction_body_leaf : public multifunction_body { - public: - multifunction_body_leaf(const B &_body) : body(_body) { } - void operator()(const Input &input, OutputSet &oset) __TBB_override { - body(input, oset); // body may explicitly put() to one or more of oset. - } - void* get_body_ptr() __TBB_override { return &body; } - multifunction_body_leaf* clone() __TBB_override { - return new multifunction_body_leaf(body); - } + //! Inequality + bool operator!=(const graph_iterator& other) const { return !(operator==(other)); } - private: - B body; - }; + //! Pre-increment + graph_iterator& operator++() { + internal_forward(); + return *this; + } -// ------ function bodies for hash_buffers and key-matching joins. + //! Post-increment + graph_iterator operator++(int) { + graph_iterator result = *this; + operator++(); + return result; + } -template -class type_to_key_function_body : tbb::internal::no_assign { - public: - virtual ~type_to_key_function_body() {} - virtual Output operator()(const Input &input) = 0; // returns an Output - virtual type_to_key_function_body* clone() = 0; +private: + // the graph over which we are iterating + GraphContainerType *my_graph; + // pointer into my_graph's my_nodes list + pointer current_node; + + //! Private initializing constructor for begin() and end() iterators + graph_iterator(GraphContainerType *g, bool begin); + void internal_forward(); +}; // class graph_iterator + +// flags to modify the behavior of the graph reset(). Can be combined. +enum reset_flags { + rf_reset_protocol = 0, + rf_reset_bodies = 1 << 0, // delete the current node body, reset to a copy of the initial node body. + rf_clear_edges = 1 << 1 // delete edges }; -// specialization for ref output -template -class type_to_key_function_body : tbb::internal::no_assign { - public: - virtual ~type_to_key_function_body() {} - virtual const Output & operator()(const Input &input) = 0; // returns a const Output& - virtual type_to_key_function_body* clone() = 0; -}; +namespace internal { -template -class type_to_key_function_body_leaf : public type_to_key_function_body { -public: - type_to_key_function_body_leaf( const B &_body ) : body(_body) { } - Output operator()(const Input &i) __TBB_override { return body(i); } - B get_body() { return body; } - type_to_key_function_body_leaf* clone() __TBB_override { - return new type_to_key_function_body_leaf< Input, Output, B>(body); +void activate_graph(tbb::flow::interface10::graph& g); +void deactivate_graph(tbb::flow::interface10::graph& g); +bool is_graph_active(tbb::flow::interface10::graph& g); +tbb::task& prioritize_task(tbb::flow::interface10::graph& g, tbb::task& arena_task); +void spawn_in_graph_arena(tbb::flow::interface10::graph& g, tbb::task& arena_task); +void enqueue_in_graph_arena(tbb::flow::interface10::graph &g, tbb::task& arena_task); +void add_task_to_graph_reset_list(tbb::flow::interface10::graph& g, tbb::task *tp); + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +struct graph_task_comparator { + bool operator()(const graph_task* left, const graph_task* right) { + return left->priority < right->priority; } -private: - B body; }; -template -class type_to_key_function_body_leaf : public type_to_key_function_body< Input, Output&> { +typedef tbb::concurrent_priority_queue graph_task_priority_queue_t; + +class priority_task_selector : public task { public: - type_to_key_function_body_leaf( const B &_body ) : body(_body) { } - const Output& operator()(const Input &i) __TBB_override { - return body(i); - } - B get_body() { return body; } - type_to_key_function_body_leaf* clone() __TBB_override { - return new type_to_key_function_body_leaf< Input, Output&, B>(body); + priority_task_selector(graph_task_priority_queue_t& priority_queue) + : my_priority_queue(priority_queue) {} + task* execute() __TBB_override { + graph_task* t = NULL; + bool result = my_priority_queue.try_pop(t); + __TBB_ASSERT_EX( result, "Number of critical tasks for scheduler and tasks" + " in graph's priority queue mismatched" ); + __TBB_ASSERT( t && t != SUCCESSFULLY_ENQUEUED, + "Incorrect task submitted to graph priority queue" ); + __TBB_ASSERT( t->priority != tbb::flow::internal::no_priority, + "Tasks from graph's priority queue must have priority" ); + task* t_next = t->execute(); + task::destroy(*t); + return t_next; } private: - B body; + graph_task_priority_queue_t& my_priority_queue; }; +#endif /* __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES */ -// --------------------------- end of function_body containers ------------------------ - -// --------------------------- node task bodies --------------------------------------- - - //! A task that calls a node's forward_task function - template< typename NodeType > - class forward_task_bypass : public task { +} - NodeType &my_node; +} // namespace interfaceX +namespace interface10 { +//! The graph class +/** This class serves as a handle to the graph */ +class graph : tbb::internal::no_copy, public tbb::flow::graph_proxy { + friend class tbb::flow::interface11::graph_node; + template< typename Body > + class run_task : public tbb::flow::interface11::graph_task { public: - - forward_task_bypass( NodeType &n ) : my_node(n) {} - - task *execute() __TBB_override { - task * new_task = my_node.forward_task(); - if (new_task == SUCCESSFULLY_ENQUEUED) new_task = NULL; - return new_task; + run_task(Body& body +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + , tbb::flow::interface11::node_priority_t node_priority = tbb::flow::interface11::no_priority + ) : tbb::flow::interface11::graph_task(node_priority), +#else + ) : +#endif + my_body(body) { } + tbb::task *execute() __TBB_override { + my_body(); + return NULL; } + private: + Body my_body; }; - //! A task that calls a node's apply_body_bypass function, passing in an input of type Input - // return the task* unless it is SUCCESSFULLY_ENQUEUED, in which case return NULL - template< typename NodeType, typename Input > - class apply_body_task_bypass : public task { - - NodeType &my_node; - Input my_input; - + template< typename Receiver, typename Body > + class run_and_put_task : public tbb::flow::interface11::graph_task { public: - - apply_body_task_bypass( NodeType &n, const Input &i ) : my_node(n), my_input(i) {} - - task *execute() __TBB_override { - task * next_task = my_node.apply_body_bypass( my_input ); - if(next_task == SUCCESSFULLY_ENQUEUED) next_task = NULL; - return next_task; + run_and_put_task(Receiver &r, Body& body) : my_receiver(r), my_body(body) {} + tbb::task *execute() __TBB_override { + tbb::task *res = my_receiver.try_put_task(my_body()); + if (res == tbb::flow::interface11::SUCCESSFULLY_ENQUEUED) res = NULL; + return res; } + private: + Receiver &my_receiver; + Body my_body; }; + typedef std::list task_list_type; - //! A task that calls a node's apply_body_bypass function with no input - template< typename NodeType > - class source_task_bypass : public task { - - NodeType &my_node; - + class wait_functor { + tbb::task* graph_root_task; public: - - source_task_bypass( NodeType &n ) : my_node(n) {} - - task *execute() __TBB_override { - task *new_task = my_node.apply_body_bypass( ); - if(new_task == SUCCESSFULLY_ENQUEUED) return NULL; - return new_task; - } + wait_functor(tbb::task* t) : graph_root_task(t) {} + void operator()() const { graph_root_task->wait_for_all(); } }; -// ------------------------ end of node task bodies ----------------------------------- - - //! An empty functor that takes an Input and returns a default constructed Output - template< typename Input, typename Output > - struct empty_body { - Output operator()( const Input & ) const { return Output(); } - }; - - //! A node_cache maintains a std::queue of elements of type T. Each operation is protected by a lock. - template< typename T, typename M=spin_mutex > - class node_cache { - public: - - typedef size_t size_type; - - bool empty() { - typename mutex_type::scoped_lock lock( my_mutex ); - return internal_empty(); - } - - void add( T &n ) { - typename mutex_type::scoped_lock lock( my_mutex ); - internal_push(n); - } - - void remove( T &n ) { - typename mutex_type::scoped_lock lock( my_mutex ); - for ( size_t i = internal_size(); i != 0; --i ) { - T &s = internal_pop(); - if ( &s == &n ) return; // only remove one predecessor per request - internal_push(s); - } - } - - void clear() { - while( !my_q.empty()) (void)my_q.pop(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_built_predecessors.clear(); -#endif + //! A functor that spawns a task + class spawn_functor : tbb::internal::no_assign { + tbb::task& spawn_task; + public: + spawn_functor(tbb::task& t) : spawn_task(t) {} + void operator()() const { + FLOW_SPAWN(spawn_task); } + }; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef edge_container built_predecessors_type; - built_predecessors_type &built_predecessors() { return my_built_predecessors; } - - typedef typename edge_container::edge_list_type predecessor_list_type; - void internal_add_built_predecessor( T &n ) { - typename mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.add_edge(n); + void prepare_task_arena(bool reinit = false) { + if (reinit) { + __TBB_ASSERT(my_task_arena, "task arena is NULL"); + my_task_arena->terminate(); + my_task_arena->initialize(tbb::task_arena::attach()); } - - void internal_delete_built_predecessor( T &n ) { - typename mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.delete_edge(n); + else { + __TBB_ASSERT(my_task_arena == NULL, "task arena is not NULL"); + my_task_arena = new tbb::task_arena(tbb::task_arena::attach()); } + if (!my_task_arena->is_active()) // failed to attach + my_task_arena->initialize(); // create a new, default-initialized arena + __TBB_ASSERT(my_task_arena->is_active(), "task arena is not active"); + } - void copy_predecessors( predecessor_list_type &v) { - typename mutex_type::scoped_lock lock( my_mutex ); - my_built_predecessors.copy_edges(v); - } +public: + //! Constructs a graph with isolated task_group_context + graph(); - size_t predecessor_count() { - typename mutex_type::scoped_lock lock(my_mutex); - return (size_t)(my_built_predecessors.edge_count()); - } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ + //! Constructs a graph with use_this_context as context + explicit graph(tbb::task_group_context& use_this_context); - protected: + //! Destroys the graph. + /** Calls wait_for_all, then destroys the root task and context. */ + ~graph(); - typedef M mutex_type; - mutex_type my_mutex; - std::queue< T * > my_q; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - built_predecessors_type my_built_predecessors; +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name(const char *name); #endif - // Assumes lock is held - inline bool internal_empty( ) { - return my_q.empty(); - } - - // Assumes lock is held - inline size_type internal_size( ) { - return my_q.size(); - } + void increment_wait_count() { + reserve_wait(); + } - // Assumes lock is held - inline void internal_push( T &n ) { - my_q.push(&n); - } + void decrement_wait_count() { + release_wait(); + } - // Assumes lock is held - inline T &internal_pop() { - T *v = my_q.front(); - my_q.pop(); - return *v; + //! Used to register that an external entity may still interact with the graph. + /** The graph will not return from wait_for_all until a matching number of decrement_wait_count calls + is made. */ + void reserve_wait() __TBB_override; + + //! Deregisters an external entity that may have interacted with the graph. + /** The graph will not return from wait_for_all until all the number of decrement_wait_count calls + matches the number of increment_wait_count calls. */ + void release_wait() __TBB_override; + + //! Spawns a task that runs a body and puts its output to a specific receiver + /** The task is spawned as a child of the graph. This is useful for running tasks + that need to block a wait_for_all() on the graph. For example a one-off source. */ + template< typename Receiver, typename Body > + void run(Receiver &r, Body body) { + if (tbb::flow::interface11::internal::is_graph_active(*this)) { + task* rtask = new (task::allocate_additional_child_of(*root_task())) + run_and_put_task< Receiver, Body >(r, body); + my_task_arena->execute(spawn_functor(*rtask)); } + } - }; - - //! A cache of predecessors that only supports try_get - template< typename T, typename M=spin_mutex > -#if __TBB_PREVIEW_ASYNC_MSG - // TODO: make predecessor_cache type T-independent when async_msg becomes regular feature - class predecessor_cache : public node_cache< untyped_sender, M > { -#else - class predecessor_cache : public node_cache< sender, M > { -#endif // __TBB_PREVIEW_ASYNC_MSG - public: - typedef M mutex_type; - typedef T output_type; -#if __TBB_PREVIEW_ASYNC_MSG - typedef untyped_sender predecessor_type; - typedef untyped_receiver successor_type; -#else - typedef sender predecessor_type; - typedef receiver successor_type; -#endif // __TBB_PREVIEW_ASYNC_MSG - - predecessor_cache( ) : my_owner( NULL ) { } - - void set_owner( successor_type *owner ) { my_owner = owner; } - - bool get_item( output_type &v ) { - - bool msg = false; - - do { - predecessor_type *src; - { - typename mutex_type::scoped_lock lock(this->my_mutex); - if ( this->internal_empty() ) { - break; - } - src = &this->internal_pop(); - } - - // Try to get from this sender - msg = src->try_get( v ); - - if (msg == false) { - // Relinquish ownership of the edge - if (my_owner) - src->register_successor( *my_owner ); - } else { - // Retain ownership of the edge - this->add(*src); - } - } while ( msg == false ); - return msg; + //! Spawns a task that runs a function object + /** The task is spawned as a child of the graph. This is useful for running tasks + that need to block a wait_for_all() on the graph. For example a one-off source. */ + template< typename Body > + void run(Body body) { + if (tbb::flow::interface11::internal::is_graph_active(*this)) { + task* rtask = new (task::allocate_additional_child_of(*root_task())) run_task< Body >(body); + my_task_arena->execute(spawn_functor(*rtask)); } + } - // If we are removing arcs (rf_clear_edges), call clear() rather than reset(). - void reset() { - if (my_owner) { - for(;;) { - predecessor_type *src; - { - if (this->internal_empty()) break; - src = &this->internal_pop(); - } - src->register_successor( *my_owner ); - } + //! Wait until graph is idle and decrement_wait_count calls equals increment_wait_count calls. + /** The waiting thread will go off and steal work while it is block in the wait_for_all. */ + void wait_for_all() { + cancelled = false; + caught_exception = false; + if (my_root_task) { +#if TBB_USE_EXCEPTIONS + try { +#endif + my_task_arena->execute(wait_functor(my_root_task)); +#if __TBB_TASK_GROUP_CONTEXT + cancelled = my_context->is_group_execution_cancelled(); +#endif +#if TBB_USE_EXCEPTIONS + } + catch (...) { + my_root_task->set_ref_count(1); + my_context->reset(); + caught_exception = true; + cancelled = true; + throw; } - } - - protected: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - using node_cache< predecessor_type, M >::my_built_predecessors; #endif - successor_type *my_owner; - }; - - //! An cache of predecessors that supports requests and reservations - // TODO: make reservable_predecessor_cache type T-independent when async_msg becomes regular feature - template< typename T, typename M=spin_mutex > - class reservable_predecessor_cache : public predecessor_cache< T, M > { - public: - typedef M mutex_type; - typedef T output_type; -#if __TBB_PREVIEW_ASYNC_MSG - typedef untyped_sender predecessor_type; - typedef untyped_receiver successor_type; -#else - typedef sender predecessor_type; - typedef receiver successor_type; -#endif // __TBB_PREVIEW_ASYNC_MSG - - reservable_predecessor_cache( ) : reserved_src(NULL) { } - - bool - try_reserve( output_type &v ) { - bool msg = false; - - do { - { - typename mutex_type::scoped_lock lock(this->my_mutex); - if ( reserved_src || this->internal_empty() ) - return false; - - reserved_src = &this->internal_pop(); - } - - // Try to get from this sender - msg = reserved_src->try_reserve( v ); - - if (msg == false) { - typename mutex_type::scoped_lock lock(this->my_mutex); - // Relinquish ownership of the edge - reserved_src->register_successor( *this->my_owner ); - reserved_src = NULL; - } else { - // Retain ownership of the edge - this->add( *reserved_src ); - } - } while ( msg == false ); - - return msg; - } - - bool - try_release( ) { - reserved_src->try_release( ); - reserved_src = NULL; - return true; - } - - bool - try_consume( ) { - reserved_src->try_consume( ); - reserved_src = NULL; - return true; - } - - void reset( ) { - reserved_src = NULL; - predecessor_cache::reset( ); - } - - void clear() { - reserved_src = NULL; - predecessor_cache::clear(); - } - - private: - predecessor_type *reserved_src; - }; - - - //! An abstract cache of successors - // TODO: make successor_cache type T-independent when async_msg becomes regular feature - template - class successor_cache : tbb::internal::no_copy { - protected: - - typedef M mutex_type; - mutex_type my_mutex; - -#if __TBB_PREVIEW_ASYNC_MSG - typedef untyped_receiver successor_type; - typedef untyped_receiver *pointer_type; - typedef untyped_sender owner_type; -#else - typedef receiver successor_type; - typedef receiver *pointer_type; - typedef sender owner_type; -#endif // __TBB_PREVIEW_ASYNC_MSG - typedef std::list< pointer_type > successors_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_successors; +#if __TBB_TASK_GROUP_CONTEXT + // TODO: the "if" condition below is just a work-around to support the concurrent wait + // mode. The cancellation and exception mechanisms are still broken in this mode. + // Consider using task group not to re-implement the same functionality. + if (!(my_context->traits() & tbb::task_group_context::concurrent_wait)) { + my_context->reset(); // consistent with behavior in catch() #endif - successors_type my_successors; - - owner_type *my_owner; - - public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - typedef typename edge_container::edge_list_type successor_list_type; - - edge_container &built_successors() { return my_built_successors; } - - void internal_add_built_successor( successor_type &r) { - typename mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.add_edge( r ); - } - - void internal_delete_built_successor( successor_type &r) { - typename mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.delete_edge(r); - } - - void copy_successors( successor_list_type &v) { - typename mutex_type::scoped_lock l(my_mutex, false); - my_built_successors.copy_edges(v); - } - - size_t successor_count() { - typename mutex_type::scoped_lock l(my_mutex,false); - return my_built_successors.edge_count(); - } - -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - - successor_cache( ) : my_owner(NULL) {} - - void set_owner( owner_type *owner ) { my_owner = owner; } - - virtual ~successor_cache() {} - - void register_successor( successor_type &r ) { - typename mutex_type::scoped_lock l(my_mutex, true); - my_successors.push_back( &r ); - } - - void remove_successor( successor_type &r ) { - typename mutex_type::scoped_lock l(my_mutex, true); - for ( typename successors_type::iterator i = my_successors.begin(); - i != my_successors.end(); ++i ) { - if ( *i == & r ) { - my_successors.erase(i); - break; - } + my_root_task->set_ref_count(1); +#if __TBB_TASK_GROUP_CONTEXT } - } - - bool empty() { - typename mutex_type::scoped_lock l(my_mutex, false); - return my_successors.empty(); - } - - void clear() { - my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_built_successors.clear(); #endif } + } -#if !__TBB_PREVIEW_ASYNC_MSG - virtual task * try_put_task( const T &t ) = 0; -#endif // __TBB_PREVIEW_ASYNC_MSG - }; // successor_cache - - //! An abstract cache of successors, specialized to continue_msg - template<> - class successor_cache< continue_msg > : tbb::internal::no_copy { - protected: + //! Returns the root task of the graph + tbb::task * root_task() { + return my_root_task; + } - typedef spin_rw_mutex mutex_type; - mutex_type my_mutex; + // ITERATORS + template + friend class tbb::flow::interface11::graph_iterator; + + // Graph iterator typedefs + typedef tbb::flow::interface11::graph_iterator iterator; + typedef tbb::flow::interface11::graph_iterator const_iterator; + + // Graph iterator constructors + //! start iterator + iterator begin(); + //! end iterator + iterator end(); + //! start const iterator + const_iterator begin() const; + //! end const iterator + const_iterator end() const; + //! start const iterator + const_iterator cbegin() const; + //! end const iterator + const_iterator cend() const; + + //! return status of graph execution + bool is_cancelled() { return cancelled; } + bool exception_thrown() { return caught_exception; } + + // thread-unsafe state reset. + void reset(tbb::flow::interface11::reset_flags f = tbb::flow::interface11::rf_reset_protocol); -#if __TBB_PREVIEW_ASYNC_MSG - typedef untyped_receiver successor_type; - typedef untyped_receiver *pointer_type; -#else - typedef receiver successor_type; - typedef receiver *pointer_type; -#endif // __TBB_PREVIEW_ASYNC_MSG - typedef std::list< pointer_type > successors_type; - successors_type my_successors; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - edge_container my_built_successors; - typedef edge_container::edge_list_type successor_list_type; +private: + tbb::task *my_root_task; +#if __TBB_TASK_GROUP_CONTEXT + tbb::task_group_context *my_context; #endif + bool own_context; + bool cancelled; + bool caught_exception; + bool my_is_active; + task_list_type my_reset_task_list; - sender *my_owner; - - public: - -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - - edge_container &built_successors() { return my_built_successors; } + tbb::flow::interface11::graph_node *my_nodes, *my_nodes_last; - void internal_add_built_successor( successor_type &r) { - mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.add_edge( r ); - } - - void internal_delete_built_successor( successor_type &r) { - mutex_type::scoped_lock l(my_mutex, true); - my_built_successors.delete_edge(r); - } - - void copy_successors( successor_list_type &v) { - mutex_type::scoped_lock l(my_mutex, false); - my_built_successors.copy_edges(v); - } - - size_t successor_count() { - mutex_type::scoped_lock l(my_mutex,false); - return my_built_successors.edge_count(); - } + tbb::spin_mutex nodelist_mutex; + void register_node(tbb::flow::interface11::graph_node *n); + void remove_node(tbb::flow::interface11::graph_node *n); -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ + tbb::task_arena* my_task_arena; - successor_cache( ) : my_owner(NULL) {} +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + tbb::flow::interface11::internal::graph_task_priority_queue_t my_priority_queue; +#endif - void set_owner( sender *owner ) { my_owner = owner; } + friend void tbb::flow::interface11::internal::activate_graph(graph& g); + friend void tbb::flow::interface11::internal::deactivate_graph(graph& g); + friend bool tbb::flow::interface11::internal::is_graph_active(graph& g); + friend tbb::task& tbb::flow::interface11::internal::prioritize_task(graph& g, tbb::task& arena_task); + friend void tbb::flow::interface11::internal::spawn_in_graph_arena(graph& g, tbb::task& arena_task); + friend void tbb::flow::interface11::internal::enqueue_in_graph_arena(graph &g, tbb::task& arena_task); + friend void tbb::flow::interface11::internal::add_task_to_graph_reset_list(graph& g, tbb::task *tp); - virtual ~successor_cache() {} + friend class tbb::interface7::internal::task_arena_base; - void register_successor( successor_type &r ) { - mutex_type::scoped_lock l(my_mutex, true); - my_successors.push_back( &r ); - if ( my_owner && r.is_continue_receiver() ) { - r.register_predecessor( *my_owner ); - } - } +}; // class graph +} // namespace interface10 - void remove_successor( successor_type &r ) { - mutex_type::scoped_lock l(my_mutex, true); - for ( successors_type::iterator i = my_successors.begin(); - i != my_successors.end(); ++i ) { - if ( *i == & r ) { - // TODO: Check if we need to test for continue_receiver before - // removing from r. - if ( my_owner ) - r.remove_predecessor( *my_owner ); - my_successors.erase(i); - break; - } - } - } +namespace interface11 { - bool empty() { - mutex_type::scoped_lock l(my_mutex, false); - return my_successors.empty(); - } +using tbb::flow::interface10::graph; - void clear() { - my_successors.clear(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - my_built_successors.clear(); +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +namespace internal{ +class get_graph_helper; +} #endif - } -#if !__TBB_PREVIEW_ASYNC_MSG - virtual task * try_put_task( const continue_msg &t ) = 0; -#endif // __TBB_PREVIEW_ASYNC_MSG +//! The base of all graph nodes. +class graph_node : tbb::internal::no_copy { + friend class graph; + template + friend class graph_iterator; - }; // successor_cache< continue_msg > +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class internal::get_graph_helper; +#endif - //! A cache of successors that are broadcast to - // TODO: make broadcast_cache type T-independent when async_msg becomes regular feature - template - class broadcast_cache : public successor_cache { - typedef M mutex_type; - typedef typename successor_cache::successors_type successors_type; +protected: + graph& my_graph; + graph_node *next, *prev; +public: + explicit graph_node(graph& g); - public: + virtual ~graph_node(); - broadcast_cache( ) {} +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + virtual void set_name(const char *name) = 0; +#endif - // as above, but call try_put_task instead, and return the last task we received (if any) -#if __TBB_PREVIEW_ASYNC_MSG - template - task * try_put_task( const X &t ) { -#else - task * try_put_task( const T &t ) __TBB_override { -#endif // __TBB_PREVIEW_ASYNC_MSG - task * last_task = NULL; - bool upgraded = true; - typename mutex_type::scoped_lock l(this->my_mutex, upgraded); - typename successors_type::iterator i = this->my_successors.begin(); - while ( i != this->my_successors.end() ) { - task *new_task = (*i)->try_put_task(t); - last_task = combine_tasks(last_task, new_task); // enqueue if necessary - if(new_task) { - ++i; - } - else { // failed - if ( (*i)->register_predecessor(*this->my_owner) ) { - if (!upgraded) { - l.upgrade_to_writer(); - upgraded = true; - } - i = this->my_successors.erase(i); - } else { - ++i; - } - } - } - return last_task; - } +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + virtual void extract() = 0; +#endif - }; +protected: + // performs the reset on an individual node. + virtual void reset_node(reset_flags f = rf_reset_protocol) = 0; +}; // class graph_node - //! A cache of successors that are put in a round-robin fashion - // TODO: make round_robin_cache type T-independent when async_msg becomes regular feature - template - class round_robin_cache : public successor_cache { - typedef size_t size_type; - typedef M mutex_type; - typedef typename successor_cache::successors_type successors_type; +namespace internal { - public: +inline void activate_graph(graph& g) { + g.my_is_active = true; +} - round_robin_cache( ) {} +inline void deactivate_graph(graph& g) { + g.my_is_active = false; +} - size_type size() { - typename mutex_type::scoped_lock l(this->my_mutex, false); - return this->my_successors.size(); - } +inline bool is_graph_active(graph& g) { + return g.my_is_active; +} -#if __TBB_PREVIEW_ASYNC_MSG - template - task * try_put_task( const X &t ) { +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +inline tbb::task& prioritize_task(graph& g, tbb::task& t) { + task* critical_task = &t; + // TODO: change flow graph's interfaces to work with graph_task type instead of tbb::task. + graph_task* gt = static_cast(&t); + if( gt->priority != no_priority ) { + //! Non-preemptive priority pattern. The original task is submitted as a work item to the + //! priority queue, and a new critical task is created to take and execute a work item with + //! the highest known priority. The reference counting responsibility is transferred (via + //! allocate_continuation) to the new task. + critical_task = new( gt->allocate_continuation() ) priority_task_selector(g.my_priority_queue); + tbb::internal::make_critical( *critical_task ); + g.my_priority_queue.push(gt); + } + return *critical_task; +} #else - task *try_put_task( const T &t ) __TBB_override { -#endif // __TBB_PREVIEW_ASYNC_MSG - bool upgraded = true; - typename mutex_type::scoped_lock l(this->my_mutex, upgraded); - typename successors_type::iterator i = this->my_successors.begin(); - while ( i != this->my_successors.end() ) { - task *new_task = (*i)->try_put_task(t); - if ( new_task ) { - return new_task; - } else { - if ( (*i)->register_predecessor(*this->my_owner) ) { - if (!upgraded) { - l.upgrade_to_writer(); - upgraded = true; - } - i = this->my_successors.erase(i); - } - else { - ++i; - } - } - } - return NULL; - } - }; - - template - class decrementer : public continue_receiver, tbb::internal::no_copy { - - T *my_node; - - task *execute() __TBB_override { - return my_node->decrement_counter(); - } - - public: +inline tbb::task& prioritize_task(graph&, tbb::task& t) { + return t; +} +#endif /* __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES */ + +//! Spawns a task inside graph arena +inline void spawn_in_graph_arena(graph& g, tbb::task& arena_task) { + if (is_graph_active(g)) { + graph::spawn_functor s_fn(prioritize_task(g, arena_task)); + __TBB_ASSERT(g.my_task_arena && g.my_task_arena->is_active(), NULL); + g.my_task_arena->execute(s_fn); + } +} - typedef continue_msg input_type; - typedef continue_msg output_type; - decrementer( int number_of_predecessors = 0 ) : continue_receiver( number_of_predecessors ) { } - void set_owner( T *node ) { my_node = node; } - }; +//! Enqueues a task inside graph arena +inline void enqueue_in_graph_arena(graph &g, tbb::task& arena_task) { + if (is_graph_active(g)) { + __TBB_ASSERT( g.my_task_arena && g.my_task_arena->is_active(), "Is graph's arena initialized and active?" ); + task::enqueue(prioritize_task(g, arena_task), *g.my_task_arena); + } +} +inline void add_task_to_graph_reset_list(graph& g, tbb::task *tp) { + g.my_reset_task_list.push_back(tp); } -#endif // __TBB__flow_graph_impl_H +} // namespace internal + +} // namespace interfaceX +} // namespace flow +} // namespace tbb +#endif // __TBB_flow_graph_impl_H diff --git a/inst/include/tbb/internal/_flow_graph_indexer_impl.h b/inst/include/tbb/internal/_flow_graph_indexer_impl.h index 1fc6690c..2900db91 100644 --- a/inst/include/tbb/internal/_flow_graph_indexer_impl.h +++ b/inst/include/tbb/internal/_flow_graph_indexer_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_indexer_impl_H @@ -43,18 +39,18 @@ namespace internal { template struct indexer_helper { template - static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) { + static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p, graph& g) { typedef typename tuple_element::type T; task *(*indexer_node_put_task)(const T&, void *) = do_try_put; - tbb::flow::get(my_input).set_up(p, indexer_node_put_task); - indexer_helper::template set_indexer_node_pointer(my_input, p); + tbb::flow::get(my_input).set_up(p, indexer_node_put_task, g); + indexer_helper::template set_indexer_node_pointer(my_input, p, g); } template static inline void reset_inputs(InputTuple &my_input, reset_flags f) { indexer_helper::reset_inputs(my_input, f); tbb::flow::get(my_input).reset_receiver(f); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION template static inline void extract(InputTuple &my_input) { indexer_helper::extract(my_input); @@ -66,16 +62,16 @@ namespace internal { template struct indexer_helper { template - static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p) { + static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p, graph& g) { typedef typename tuple_element<0, TupleTypes>::type T; task *(*indexer_node_put_task)(const T&, void *) = do_try_put; - tbb::flow::get<0>(my_input).set_up(p, indexer_node_put_task); + tbb::flow::get<0>(my_input).set_up(p, indexer_node_put_task, g); } template static inline void reset_inputs(InputTuple &my_input, reset_flags f) { tbb::flow::get<0>(my_input).reset_receiver(f); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION template static inline void extract(InputTuple &my_input) { tbb::flow::get<0>(my_input).extract_receiver(); @@ -89,22 +85,24 @@ namespace internal { void* my_indexer_ptr; typedef task* (* forward_function_ptr)(T const &, void* ); forward_function_ptr my_try_put_task; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION spin_mutex my_pred_mutex; typedef typename receiver::built_predecessors_type built_predecessors_type; built_predecessors_type my_built_predecessors; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + graph* my_graph; public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES - indexer_input_port() : my_pred_mutex() {} - indexer_input_port( const indexer_input_port & /*other*/ ) : receiver(), my_pred_mutex() { +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + indexer_input_port() : my_pred_mutex(), my_graph(NULL) {} + indexer_input_port( const indexer_input_port & other) : receiver(), my_pred_mutex(), my_graph(other.my_graph) { } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ - void set_up(void *p, forward_function_ptr f) { - my_indexer_ptr = p; - my_try_put_task = f; - } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + void set_up(void* p, forward_function_ptr f, graph& g) { + my_indexer_ptr = p; + my_try_put_task = f; + my_graph = &g; + } +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename receiver::predecessor_list_type predecessor_list_type; typedef typename receiver::predecessor_type predecessor_type; @@ -126,7 +124,7 @@ namespace internal { spin_mutex::scoped_lock l(my_pred_mutex); my_built_predecessors.copy_edges(v); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: template< typename R, typename B > friend class run_and_put_task; template friend class internal::broadcast_cache; @@ -135,14 +133,18 @@ namespace internal { return my_try_put_task(v, my_indexer_ptr); } + graph& graph_reference() const __TBB_override { + return *my_graph; + } + public: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void reset_receiver(reset_flags f) __TBB_override { if(f&rf_clear_edges) my_built_predecessors.clear(); } #else void reset_receiver(reset_flags /*f*/) __TBB_override { } #endif -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract_receiver() { my_built_predecessors.receiver_extract(*this); } #endif }; @@ -154,7 +156,7 @@ namespace internal { typedef OutputType output_type; typedef InputTuple input_type; - // Some versions of Intel C++ compiler fail to generate an implicit constructor for the class which has std::tuple as a member. + // Some versions of Intel(R) C++ Compiler fail to generate an implicit constructor for the class which has std::tuple as a member. indexer_node_FE() : my_inputs() {} input_type &input_ports() { return my_inputs; } @@ -174,7 +176,7 @@ namespace internal { typedef StructTypes tuple_types; typedef typename sender::successor_type successor_type; typedef indexer_node_FE input_ports_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename sender::built_successors_type built_successors_type; typedef typename sender::successor_list_type successor_list_type; #endif @@ -182,7 +184,7 @@ namespace internal { private: // ----------- Aggregator ------------ enum op_type { reg_succ, rem_succ, try__put_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION , add_blt_succ, del_blt_succ, blt_succ_cnt, blt_succ_cpy #endif @@ -196,7 +198,7 @@ namespace internal { output_type const *my_arg; successor_type *my_succ; task *bypass_t; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION size_t cnt_val; successor_list_type *succv; #endif @@ -233,7 +235,7 @@ namespace internal { __TBB_store_with_release(current->status, SUCCEEDED); // return of try_put_task actual return value } break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION case add_blt_succ: my_successors.internal_add_built_successor(*(current->my_succ)); __TBB_store_with_release(current->status, SUCCEEDED); @@ -250,20 +252,20 @@ namespace internal { my_successors.copy_successors(*(current->succv)); __TBB_store_with_release(current->status, SUCCEEDED); break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ } } } // ---------- end aggregator ----------- public: indexer_node_base(graph& g) : graph_node(g), input_ports_type() { - indexer_helper::set_indexer_node_pointer(this->my_inputs, this); + indexer_helper::set_indexer_node_pointer(this->my_inputs, this, g); my_successors.set_owner(this); my_aggregator.initialize_handler(handler_type(this)); } indexer_node_base(const indexer_node_base& other) : graph_node(other.my_graph), input_ports_type(), sender() { - indexer_helper::set_indexer_node_pointer(this->my_inputs, this); + indexer_helper::set_indexer_node_pointer(this->my_inputs, this, other.my_graph); my_successors.set_owner(this); my_aggregator.initialize_handler(handler_type(this)); } @@ -286,7 +288,7 @@ namespace internal { return op_data.bypass_t; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } @@ -315,7 +317,7 @@ namespace internal { my_successors.built_successors().sender_extract(*this); indexer_helper::extract(this->my_inputs); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: void reset_node(reset_flags f) __TBB_override { if(f & rf_clear_edges) { diff --git a/inst/include/tbb/internal/_flow_graph_item_buffer_impl.h b/inst/include/tbb/internal/_flow_graph_item_buffer_impl.h index 85d2686d..da76da1f 100644 --- a/inst/include/tbb/internal/_flow_graph_item_buffer_impl.h +++ b/inst/include/tbb/internal/_flow_graph_item_buffer_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_item_buffer_impl_H @@ -144,7 +140,7 @@ namespace internal { return get_my_item(my_tail - 1); } - // following methods are for reservation of the front of a bufffer. + // following methods are for reservation of the front of a buffer. void reserve_item(size_type i) { __TBB_ASSERT(my_item_valid(i) && !my_item_reserved(i), "item cannot be reserved"); item(i).second = reserved_item; } void release_item(size_type i) { __TBB_ASSERT(my_item_reserved(i), "item is not reserved"); item(i).second = has_item; } diff --git a/inst/include/tbb/internal/_flow_graph_join_impl.h b/inst/include/tbb/internal/_flow_graph_join_impl.h index 4999bef7..16837c7a 100644 --- a/inst/include/tbb/internal/_flow_graph_join_impl.h +++ b/inst/include/tbb/internal/_flow_graph_join_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_join_impl_H @@ -27,21 +23,21 @@ namespace internal { - struct forwarding_base { - forwarding_base(graph &g) : graph_pointer(&g) {} + struct forwarding_base : tbb::internal::no_assign { + forwarding_base(graph &g) : graph_ref(g) {} virtual ~forwarding_base() {} // decrement_port_count may create a forwarding task. If we cannot handle the task // ourselves, ask decrement_port_count to deal with it. virtual task * decrement_port_count(bool handle_task) = 0; virtual void increment_port_count() = 0; // moved here so input ports can queue tasks - graph* graph_pointer; + graph& graph_ref; }; // specialization that lets us keep a copy of the current_key for building results. // KeyType can be a reference type. template - struct matching_forwarding_base :public forwarding_base { + struct matching_forwarding_base : public forwarding_base { typedef typename tbb::internal::strip::type current_key_type; matching_forwarding_base(graph &g) : forwarding_base(g) { } virtual task * increment_key_count(current_key_type const & /*t*/, bool /*handle_task*/) = 0; // {return NULL;} @@ -126,7 +122,7 @@ namespace internal { tbb::flow::get(my_input).reset_receiver(f); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION template static inline void extract_inputs(InputTuple &my_input) { join_helper::extract_inputs(my_input); @@ -200,7 +196,7 @@ namespace internal { tbb::flow::get<0>(my_input).reset_receiver(f); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION template static inline void extract_inputs(InputTuple &my_input) { tbb::flow::get<0>(my_input).extract_receiver(); @@ -214,18 +210,17 @@ namespace internal { public: typedef T input_type; typedef typename receiver::predecessor_type predecessor_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename receiver::predecessor_list_type predecessor_list_type; typedef typename receiver::built_predecessors_type built_predecessors_type; #endif private: // ----------- Aggregator ------------ enum op_type { reg_pred, rem_pred, res_item, rel_res, con_res -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy #endif }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; typedef reserving_port class_type; class reserving_port_operation : public aggregated_operation { @@ -234,7 +229,7 @@ namespace internal { union { T *my_arg; predecessor_type *my_pred; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION size_t cnt_val; predecessor_list_type *plist; #endif @@ -294,7 +289,7 @@ namespace internal { my_predecessors.try_consume( ); __TBB_store_with_release(current->status, SUCCEEDED); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION case add_blt_pred: my_predecessors.internal_add_built_predecessor(*(current->my_pred)); __TBB_store_with_release(current->status, SUCCEEDED); @@ -311,7 +306,7 @@ namespace internal { my_predecessors.copy_predecessors(*(current->plist)); __TBB_store_with_release(current->status, SUCCEEDED); break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ } } } @@ -324,6 +319,10 @@ namespace internal { return NULL; } + graph& graph_reference() const __TBB_override { + return my_join->graph_ref; + } + public: //! Constructor @@ -378,7 +377,7 @@ namespace internal { my_aggregator.execute(&op_data); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_predecessors_type &built_predecessors() __TBB_override { return my_predecessors.built_predecessors(); } void internal_add_built_predecessor(predecessor_type &src) __TBB_override { reserving_port_operation op_data(src, add_blt_pred); @@ -406,7 +405,7 @@ namespace internal { my_predecessors.built_predecessors().receiver_extract(*this); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ void reset_receiver( reset_flags f) __TBB_override { if(f & rf_clear_edges) my_predecessors.clear(); @@ -417,6 +416,10 @@ namespace internal { } private: +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class get_graph_helper; +#endif + forwarding_base *my_join; reservable_predecessor_cache< T, null_mutex > my_predecessors; bool reserved; @@ -429,7 +432,7 @@ namespace internal { typedef T input_type; typedef typename receiver::predecessor_type predecessor_type; typedef queueing_port class_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename receiver::built_predecessors_type built_predecessors_type; typedef typename receiver::predecessor_list_type predecessor_list_type; #endif @@ -437,18 +440,17 @@ namespace internal { // ----------- Aggregator ------------ private: enum op_type { get__item, res_port, try__put_task -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy #endif }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; class queueing_port_operation : public aggregated_operation { public: char type; T my_val; T *my_arg; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION predecessor_type *pred; size_t cnt_val; predecessor_list_type *plist; @@ -509,7 +511,7 @@ namespace internal { } __TBB_store_with_release(current->status, SUCCEEDED); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION case add_blt_pred: my_built_predecessors.add_edge(*(current->pred)); __TBB_store_with_release(current->status, SUCCEEDED); @@ -526,7 +528,7 @@ namespace internal { my_built_predecessors.copy_edges(*(current->plist)); __TBB_store_with_release(current->status, SUCCEEDED); break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ } } } @@ -544,6 +546,10 @@ namespace internal { return op_data.bypass_t; } + graph& graph_reference() const __TBB_override { + return my_join->graph_ref; + } + public: //! Constructor @@ -577,7 +583,7 @@ namespace internal { return; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } void internal_add_built_predecessor(predecessor_type &p) __TBB_override { @@ -608,20 +614,24 @@ namespace internal { item_buffer::reset(); my_built_predecessors.receiver_extract(*this); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ void reset_receiver(reset_flags f) __TBB_override { tbb::internal::suppress_unused_warning(f); item_buffer::reset(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION if (f & rf_clear_edges) my_built_predecessors.clear(); #endif } private: +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class get_graph_helper; +#endif + forwarding_base *my_join; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION edge_container my_built_predecessors; #endif }; // queueing_port @@ -659,7 +669,7 @@ namespace internal { typedef typename TraitsType::TtoK type_to_key_func_type; typedef typename TraitsType::KHash hash_compare_type; typedef hash_buffer< key_type, input_type, type_to_key_func_type, hash_compare_type > buffer_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename receiver::built_predecessors_type built_predecessors_type; typedef typename receiver::predecessor_list_type predecessor_list_type; #endif @@ -667,18 +677,17 @@ namespace internal { // ----------- Aggregator ------------ private: enum op_type { try__put, get__item, res_port -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy #endif }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; class key_matching_port_operation : public aggregated_operation { public: char type; input_type my_val; input_type *my_arg; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION predecessor_type *pred; size_t cnt_val; predecessor_list_type *plist; @@ -721,7 +730,7 @@ namespace internal { this->delete_with_key(my_join->current_key); __TBB_store_with_release(current->status, SUCCEEDED); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION case add_blt_pred: my_built_predecessors.add_edge(*(current->pred)); __TBB_store_with_release(current->status, SUCCEEDED); @@ -759,6 +768,10 @@ namespace internal { return rtask; } + graph& graph_reference() const __TBB_override { + return my_join->graph_ref; + } + public: key_matching_port() : receiver(), buffer_type() { @@ -789,7 +802,7 @@ namespace internal { return op_data.status == SUCCEEDED; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } void internal_add_built_predecessor(predecessor_type &p) __TBB_override { @@ -825,7 +838,7 @@ namespace internal { return; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract_receiver() { buffer_type::reset(); my_built_predecessors.receiver_extract(*this); @@ -834,7 +847,7 @@ namespace internal { void reset_receiver(reset_flags f ) __TBB_override { tbb::internal::suppress_unused_warning(f); buffer_type::reset(); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION if (f & rf_clear_edges) my_built_predecessors.clear(); #endif @@ -844,7 +857,7 @@ namespace internal { // my_join forwarding base used to count number of inputs that // received key. matching_forwarding_base *my_join; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION edge_container my_built_predecessors; #endif }; // key_matching_port @@ -871,7 +884,7 @@ namespace internal { join_helper::set_join_node_pointer(my_inputs, this); } - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::graph_pointer)), my_node(NULL) { + join_node_FE(const join_node_FE& other) : forwarding_base((other.forwarding_base::graph_ref)), my_node(NULL) { ports_with_no_inputs = N; join_helper::set_join_node_pointer(my_inputs, this); } @@ -885,11 +898,11 @@ namespace internal { // if all input_ports have predecessors, spawn forward to try and consume tuples task * decrement_port_count(bool handle_task) __TBB_override { if(ports_with_no_inputs.fetch_and_decrement() == 1) { - if(this->graph_pointer->is_active()) { - task *rtask = new ( task::allocate_additional_child_of( *(this->graph_pointer->root_task()) ) ) + if(internal::is_graph_active(this->graph_ref)) { + task *rtask = new ( task::allocate_additional_child_of( *(this->graph_ref.root_task()) ) ) forward_task_bypass(*my_node); if(!handle_task) return rtask; - FLOW_SPAWN(*rtask); + internal::spawn_in_graph_arena(this->graph_ref, *rtask); } } return NULL; @@ -905,7 +918,7 @@ namespace internal { join_helper::reset_inputs(my_inputs, f); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract( ) { // called outside of parallel contexts ports_with_no_inputs = N; @@ -949,7 +962,7 @@ namespace internal { join_helper::set_join_node_pointer(my_inputs, this); } - join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::graph_pointer)), my_node(NULL) { + join_node_FE(const join_node_FE& other) : forwarding_base((other.forwarding_base::graph_ref)), my_node(NULL) { ports_with_no_items = N; join_helper::set_join_node_pointer(my_inputs, this); } @@ -965,11 +978,11 @@ namespace internal { task * decrement_port_count(bool handle_task) __TBB_override { if(ports_with_no_items.fetch_and_decrement() == 1) { - if(this->graph_pointer->is_active()) { - task *rtask = new ( task::allocate_additional_child_of( *(this->graph_pointer->root_task()) ) ) + if(internal::is_graph_active(this->graph_ref)) { + task *rtask = new ( task::allocate_additional_child_of( *(this->graph_ref.root_task()) ) ) forward_task_bypass (*my_node); if(!handle_task) return rtask; - FLOW_SPAWN( *rtask); + internal::spawn_in_graph_arena(this->graph_ref, *rtask); } } return NULL; @@ -986,7 +999,7 @@ namespace internal { join_helper::reset_inputs(my_inputs, f ); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract() { reset_port_count(); join_helper::extract_inputs(my_inputs); @@ -1055,7 +1068,6 @@ namespace internal { // and the output_buffer_type base class private: enum op_type { res_count, inc_count, may_succeed, try_make }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; typedef join_node_FE, InputTuple, OutputTuple> class_type; class key_matching_FE_operation : public aggregated_operation { @@ -1084,16 +1096,16 @@ namespace internal { task * fill_output_buffer(unref_key_type &t, bool should_enqueue, bool handle_task) { output_type l_out; task *rtask = NULL; - bool do_fwd = should_enqueue && this->buffer_empty() && this->graph_pointer->is_active(); + bool do_fwd = should_enqueue && this->buffer_empty() && internal::is_graph_active(this->graph_ref); this->current_key = t; this->delete_with_key(this->current_key); // remove the key if(join_helper::get_items(my_inputs, l_out)) { // <== call back this->push_back(l_out); if(do_fwd) { // we enqueue if receiving an item from predecessor, not if successor asks for item - rtask = new ( task::allocate_additional_child_of( *(this->graph_pointer->root_task()) ) ) + rtask = new ( task::allocate_additional_child_of( *(this->graph_ref.root_task()) ) ) forward_task_bypass(*my_node); if(handle_task) { - FLOW_SPAWN(*rtask); + internal::spawn_in_graph_arena(this->graph_ref, *rtask); rtask = NULL; } do_fwd = false; @@ -1167,7 +1179,7 @@ namespace internal { this->set_key_func(cfb); } - join_node_FE(const join_node_FE& other) : forwarding_base_type(*(other.forwarding_base_type::graph_pointer)), key_to_count_buffer_type(), + join_node_FE(const join_node_FE& other) : forwarding_base_type((other.forwarding_base_type::graph_ref)), key_to_count_buffer_type(), output_buffer_type() { my_node = NULL; join_helper::set_join_node_pointer(my_inputs, this); @@ -1210,7 +1222,7 @@ namespace internal { output_buffer_type::reset(); } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract() { // called outside of parallel contexts join_helper::extract_inputs(my_inputs); @@ -1262,7 +1274,7 @@ namespace internal { using input_ports_type::try_to_make_tuple; using input_ports_type::tuple_accepted; using input_ports_type::tuple_rejected; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename sender::built_successors_type built_successors_type; typedef typename sender::successor_list_type successor_list_type; #endif @@ -1270,11 +1282,10 @@ namespace internal { private: // ----------- Aggregator ------------ enum op_type { reg_succ, rem_succ, try__get, do_fwrd, do_fwrd_bypass -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION , add_blt_succ, del_blt_succ, blt_succ_cnt, blt_succ_cpy #endif }; - enum op_stat {WAIT=0, SUCCEEDED, FAILED}; typedef join_node_base class_type; class join_node_base_operation : public aggregated_operation { @@ -1283,7 +1294,7 @@ namespace internal { union { output_type *my_arg; successor_type *my_succ; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION size_t cnt_val; successor_list_type *slist; #endif @@ -1309,11 +1320,11 @@ namespace internal { switch(current->type) { case reg_succ: { my_successors.register_successor(*(current->my_succ)); - if(tuple_build_may_succeed() && !forwarder_busy && this->graph_node::my_graph.is_active()) { - task *rtask = new ( task::allocate_additional_child_of(*(this->graph_node::my_graph.root_task())) ) + if(tuple_build_may_succeed() && !forwarder_busy && internal::is_graph_active(my_graph)) { + task *rtask = new ( task::allocate_additional_child_of(*(my_graph.root_task())) ) forward_task_bypass >(*this); - FLOW_SPAWN(*rtask); + internal::spawn_in_graph_arena(my_graph, *rtask); forwarder_busy = true; } __TBB_store_with_release(current->status, SUCCEEDED); @@ -1342,7 +1353,7 @@ namespace internal { build_succeeded = try_to_make_tuple(out); // fetch front_end of queue if(build_succeeded) { task *new_task = my_successors.try_put_task(out); - last_task = combine_tasks(last_task, new_task); + last_task = combine_tasks(my_graph, last_task, new_task); if(new_task) { tuple_accepted(); } @@ -1358,7 +1369,7 @@ namespace internal { forwarder_busy = false; } break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION case add_blt_succ: my_successors.internal_add_built_successor(*(current->my_succ)); __TBB_store_with_release(current->status, SUCCEEDED); @@ -1375,7 +1386,7 @@ namespace internal { my_successors.copy_successors(*(current->slist)); __TBB_store_with_release(current->status, SUCCEEDED); break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ } } } @@ -1420,7 +1431,7 @@ namespace internal { return op_data.status == SUCCEEDED; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } void internal_add_built_successor( successor_type &r) __TBB_override { @@ -1444,9 +1455,9 @@ namespace internal { op_data.slist = &l; my_aggregator.execute(&op_data); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract() __TBB_override { input_ports_type::extract(); my_successors.built_successors().sender_extract(*this); diff --git a/inst/include/tbb/internal/_flow_graph_node_impl.h b/inst/include/tbb/internal/_flow_graph_node_impl.h index eeff72bc..744c39e7 100644 --- a/inst/include/tbb/internal/_flow_graph_node_impl.h +++ b/inst/include/tbb/internal/_flow_graph_node_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_node_impl_H @@ -61,15 +57,15 @@ namespace internal { //! Input and scheduling for a function node that takes a type Input as input // The only up-ref is apply_body_impl, which should implement the function // call and any handling of the result. - template< typename Input, typename A, typename ImplType > + template< typename Input, typename Policy, typename A, typename ImplType > class function_input_base : public receiver, tbb::internal::no_assign { - enum op_type {reg_pred, rem_pred, app_body, try_fwd, tryput_bypass, app_body_bypass -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES + enum op_type {reg_pred, rem_pred, try_fwd, tryput_bypass, app_body_bypass, occupy_concurrency +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy // create vector copies of preds and succs #endif }; - typedef function_input_base class_type; + typedef function_input_base class_type; public: @@ -79,25 +75,32 @@ namespace internal { typedef predecessor_cache predecessor_cache_type; typedef function_input_queue input_queue_type; typedef typename A::template rebind< input_queue_type >::other queue_allocator_type; + __TBB_STATIC_ASSERT(!((internal::has_policy::value) && (internal::has_policy::value)), + "queueing and rejecting policies can't be specified simultaneously"); -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename predecessor_cache_type::built_predecessors_type built_predecessors_type; typedef typename receiver::predecessor_list_type predecessor_list_type; #endif //! Constructor for function_input_base - function_input_base( graph &g, size_t max_concurrency, input_queue_type *q = NULL) - : my_graph_ptr(&g), my_max_concurrency(max_concurrency), my_concurrency(0), - my_queue(q), forwarder_busy(false) { + function_input_base( + graph &g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(size_t max_concurrency, node_priority_t priority) + ) : my_graph_ref(g), my_max_concurrency(max_concurrency) + , __TBB_FLOW_GRAPH_PRIORITY_ARG1(my_concurrency(0), my_priority(priority)) + , my_queue(!internal::has_policy::value ? new input_queue_type() : NULL) + , forwarder_busy(false) + { my_predecessors.set_owner(this); my_aggregator.initialize_handler(handler_type(this)); } //! Copy constructor - function_input_base( const function_input_base& src, input_queue_type *q = NULL) : - receiver(), tbb::internal::no_assign(), - my_graph_ptr(src.my_graph_ptr), my_max_concurrency(src.my_max_concurrency), - my_concurrency(0), my_queue(q), forwarder_busy(false) + function_input_base( const function_input_base& src) + : receiver(), tbb::internal::no_assign() + , my_graph_ref(src.my_graph_ref), my_max_concurrency(src.my_max_concurrency) + , __TBB_FLOW_GRAPH_PRIORITY_ARG1(my_concurrency(0), my_priority(src.my_priority)) + , my_queue(src.my_queue ? new input_queue_type() : NULL), forwarder_busy(false) { my_predecessors.set_owner(this); my_aggregator.initialize_handler(handler_type(this)); @@ -111,18 +114,8 @@ namespace internal { if ( my_queue ) delete my_queue; } - //! Put to the node, returning a task if available - task * try_put_task( const input_type &t ) __TBB_override { - if ( my_max_concurrency == 0 ) { - return create_body_task( t ); - } else { - operation_type op_data(t, tryput_bypass); - my_aggregator.execute(&op_data); - if(op_data.status == internal::SUCCEEDED) { - return op_data.bypass_t; - } - return NULL; - } + task* try_put_task( const input_type& t) __TBB_override { + return try_put_task_impl(t, internal::has_policy()); } //! Adds src to the list of cached predecessors. @@ -141,7 +134,7 @@ namespace internal { return true; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION //! Adds to list of predecessors added by make_edge void internal_add_built_predecessor( predecessor_type &src) __TBB_override { operation_type op_data(add_blt_pred); @@ -171,7 +164,7 @@ namespace internal { built_predecessors_type &built_predecessors() __TBB_override { return my_predecessors.built_predecessors(); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ protected: @@ -184,9 +177,10 @@ namespace internal { forwarder_busy = false; } - graph* my_graph_ptr; + graph& my_graph_ref; const size_t my_max_concurrency; size_t my_concurrency; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( node_priority_t my_priority; ) input_queue_type *my_queue; predecessor_cache my_predecessors; @@ -197,6 +191,16 @@ namespace internal { __TBB_ASSERT(!(f & rf_clear_edges) || my_predecessors.empty(), "function_input_base reset failed"); } + graph& graph_reference() const __TBB_override { + return my_graph_ref; + } + + task* try_get_postponed_task(const input_type& i) { + operation_type op_data(i, app_body_bypass); // tries to pop an item or get_item + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + private: friend class apply_body_task_bypass< class_type, input_type >; @@ -208,10 +212,10 @@ namespace internal { union { input_type *elem; predecessor_type *r; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION size_t cnt_val; predecessor_list_type *predv; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ }; tbb::task *bypass_t; operation_type(const input_type& e, op_type t) : @@ -224,7 +228,7 @@ namespace internal { friend class internal::aggregating_functor; aggregator< handler_type, operation_type > my_aggregator; - task* create_and_spawn_task(bool spawn) { + task* perform_queued_requests() { task* new_task = NULL; if(my_queue) { if(!my_queue->empty()) { @@ -241,13 +245,6 @@ namespace internal { new_task = create_body_task(i); } } - //! Spawns a task that applies a body - // task == NULL => g.reset(), which shouldn't occur in concurrent context - if(spawn && new_task) { - FLOW_SPAWN(*new_task); - new_task = SUCCESSFULLY_ENQUEUED; - } - return new_task; } void handle_operations(operation_type *op_list) { @@ -268,27 +265,27 @@ namespace internal { my_predecessors.remove(*(tmp->r)); __TBB_store_with_release(tmp->status, SUCCEEDED); break; - case app_body: - __TBB_ASSERT(my_max_concurrency != 0, NULL); - --my_concurrency; - __TBB_store_with_release(tmp->status, SUCCEEDED); - if (my_concurrencybypass_t = NULL; __TBB_ASSERT(my_max_concurrency != 0, NULL); --my_concurrency; if(my_concurrencybypass_t = create_and_spawn_task(/*spawn=*/false); + tmp->bypass_t = perform_queued_requests(); __TBB_store_with_release(tmp->status, SUCCEEDED); } break; case tryput_bypass: internal_try_put_task(tmp); break; case try_fwd: internal_forward(tmp); break; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES + case occupy_concurrency: + if (my_concurrency < my_max_concurrency) { + ++my_concurrency; + __TBB_store_with_release(tmp->status, SUCCEEDED); + } else { + __TBB_store_with_release(tmp->status, FAILED); + } + break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION case add_blt_pred: { my_predecessors.internal_add_built_predecessor(*(tmp->r)); __TBB_store_with_release(tmp->status, SUCCEEDED); @@ -306,7 +303,7 @@ namespace internal { my_predecessors.copy_predecessors( *(tmp->predv) ); __TBB_store_with_release(tmp->status, SUCCEEDED); break; -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ } } } @@ -328,11 +325,11 @@ namespace internal { } } - //! Tries to spawn bodies if available and if concurrency allows + //! Creates tasks for postponed messages if available and if concurrency allows void internal_forward(operation_type *op) { op->bypass_t = NULL; if (my_concurrency < my_max_concurrency || !my_max_concurrency) - op->bypass_t = create_and_spawn_task(/*spawn=*/false); + op->bypass_t = perform_queued_requests(); if(op->bypass_t) __TBB_store_with_release(op->status, SUCCEEDED); else { @@ -341,81 +338,108 @@ namespace internal { } } + task* internal_try_put_bypass( const input_type& t ) { + operation_type op_data(t, tryput_bypass); + my_aggregator.execute(&op_data); + if( op_data.status == internal::SUCCEEDED ) { + return op_data.bypass_t; + } + return NULL; + } + + task* try_put_task_impl( const input_type& t, /*lightweight=*/tbb::internal::true_type ) { + if( my_max_concurrency == 0 ) { + return apply_body_bypass(t); + } else { + operation_type check_op(t, occupy_concurrency); + my_aggregator.execute(&check_op); + if( check_op.status == internal::SUCCEEDED ) { + return apply_body_bypass(t); + } + return internal_try_put_bypass(t); + } + } + + task* try_put_task_impl( const input_type& t, /*lightweight=*/tbb::internal::false_type ) { + if( my_max_concurrency == 0 ) { + return create_body_task(t); + } else { + return internal_try_put_bypass(t); + } + } + //! Applies the body to the provided input // then decides if more work is available - task * apply_body_bypass( input_type &i ) { - task * new_task = static_cast(this)->apply_body_impl_bypass(i); - if ( my_max_concurrency != 0 ) { - operation_type op_data(app_body_bypass); // tries to pop an item or get_item, enqueues another apply_body - my_aggregator.execute(&op_data); - tbb::task *ttask = op_data.bypass_t; - new_task = combine_tasks(new_task, ttask); - } - return new_task; + task * apply_body_bypass( const input_type &i ) { + return static_cast(this)->apply_body_impl_bypass(i); } //! allocates a task to apply a body inline task * create_body_task( const input_type &input ) { - - return (my_graph_ptr->is_active()) ? - new(task::allocate_additional_child_of(*(my_graph_ptr->root_task()))) - apply_body_task_bypass < class_type, input_type >(*this, input) : - NULL; + return (internal::is_graph_active(my_graph_ref)) ? + new( task::allocate_additional_child_of(*(my_graph_ref.root_task())) ) + apply_body_task_bypass < class_type, input_type >( + *this, __TBB_FLOW_GRAPH_PRIORITY_ARG1(input, my_priority)) + : NULL; } //! This is executed by an enqueued task, the "forwarder" - task *forward_task() { + task* forward_task() { operation_type op_data(try_fwd); - task *rval = NULL; + task* rval = NULL; do { op_data.status = WAIT; my_aggregator.execute(&op_data); if(op_data.status == SUCCEEDED) { - tbb::task *ttask = op_data.bypass_t; - rval = combine_tasks(rval, ttask); + task* ttask = op_data.bypass_t; + __TBB_ASSERT( ttask && ttask != SUCCESSFULLY_ENQUEUED, NULL ); + rval = combine_tasks(my_graph_ref, rval, ttask); } } while (op_data.status == SUCCEEDED); return rval; } inline task *create_forward_task() { - return (my_graph_ptr->is_active()) ? - new(task::allocate_additional_child_of(*(my_graph_ptr->root_task()))) forward_task_bypass< class_type >(*this) : - NULL; + return (internal::is_graph_active(my_graph_ref)) ? + new( task::allocate_additional_child_of(*(my_graph_ref.root_task())) ) + forward_task_bypass< class_type >( __TBB_FLOW_GRAPH_PRIORITY_ARG1(*this, my_priority) ) + : NULL; } //! Spawns a task that calls forward() inline void spawn_forward_task() { task* tp = create_forward_task(); if(tp) { - FLOW_SPAWN(*tp); + internal::spawn_in_graph_arena(graph_reference(), *tp); } } }; // function_input_base //! Implements methods for a function node that takes a type Input as input and sends // a type Output to its successors. - template< typename Input, typename Output, typename A> - class function_input : public function_input_base > { + template< typename Input, typename Output, typename Policy, typename A> + class function_input : public function_input_base > { public: typedef Input input_type; typedef Output output_type; typedef function_body function_body_type; - typedef function_input my_class; - typedef function_input_base base_type; + typedef function_input my_class; + typedef function_input_base base_type; typedef function_input_queue input_queue_type; // constructor template - function_input( graph &g, size_t max_concurrency, Body& body, input_queue_type *q = NULL ) : - base_type(g, max_concurrency, q), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ), - my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { + function_input( + graph &g, size_t max_concurrency, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) + ) : base_type(g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(max_concurrency, priority)) + , my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { } //! Copy constructor - function_input( const function_input& src, input_queue_type *q = NULL ) : - base_type(src, q), + function_input( const function_input& src ) : + base_type(src), my_body( src.my_init_body->clone() ), my_init_body(src.my_init_body->clone() ) { } @@ -431,18 +455,52 @@ namespace internal { return dynamic_cast< internal::function_body_leaf & >(body_ref).get_body(); } - task * apply_body_impl_bypass( const input_type &i) { -#if TBB_PREVIEW_FLOW_GRAPH_TRACE + output_type apply_body_impl( const input_type& i) { // There is an extra copied needed to capture the // body execution without the try_put tbb::internal::fgt_begin_body( my_body ); output_type v = (*my_body)(i); tbb::internal::fgt_end_body( my_body ); - task * new_task = successors().try_put_task( v ); + return v; + } + + //TODO: consider moving into the base class + task * apply_body_impl_bypass( const input_type &i) { + output_type v = apply_body_impl(i); +#if TBB_DEPRECATED_MESSAGE_FLOW_ORDER + task* successor_task = successors().try_put_task(v); +#endif + task* postponed_task = NULL; + if( base_type::my_max_concurrency != 0 ) { + postponed_task = base_type::try_get_postponed_task(i); + __TBB_ASSERT( !postponed_task || postponed_task != SUCCESSFULLY_ENQUEUED, NULL ); + } +#if TBB_DEPRECATED_MESSAGE_FLOW_ORDER + graph& g = base_type::my_graph_ref; + return combine_tasks(g, successor_task, postponed_task); #else - task * new_task = successors().try_put_task( (*my_body)(i) ); + if( postponed_task ) { + // make the task available for other workers since we do not know successors' + // execution policy + internal::spawn_in_graph_arena(base_type::graph_reference(), *postponed_task); + } + task* successor_task = successors().try_put_task(v); +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (push) +#pragma warning (disable: 4127) /* suppress conditional expression is constant */ #endif - return new_task; + if(internal::has_policy::value) { +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (pop) +#endif + if(!successor_task) { + // Return confirmative status since current + // node's body has been executed anyway + successor_task = SUCCESSFULLY_ENQUEUED; + } + } + return successor_task; +#endif /* TBB_DEPRECATED_MESSAGE_FLOW_ORDER */ } protected: @@ -485,7 +543,7 @@ namespace internal { } }; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION // helper templates to extract the output ports of an multifunction_node from graph template struct extract_element { template static void extract_this(P &p) { @@ -501,36 +559,108 @@ namespace internal { }; #endif + template + struct init_output_ports { +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(Args(g)...); + } +#else // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g)); + } +#if __TBB_VARIADIC_MAX >= 6 + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 7 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 8 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g), T8(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 9 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g), T8(g), T9(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 9 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g), T8(g), T9(g), T10(g)); + } +#endif +#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + }; // struct init_output_ports + //! Implements methods for a function node that takes a type Input as input // and has a tuple of output ports specified. - template< typename Input, typename OutputPortSet, typename A> - class multifunction_input : public function_input_base > { + template< typename Input, typename OutputPortSet, typename Policy, typename A> + class multifunction_input : public function_input_base > { public: static const int N = tbb::flow::tuple_size::value; typedef Input input_type; typedef OutputPortSet output_ports_type; typedef multifunction_body multifunction_body_type; - typedef multifunction_input my_class; - typedef function_input_base base_type; + typedef multifunction_input my_class; + typedef function_input_base base_type; typedef function_input_queue input_queue_type; // constructor template - multifunction_input( - graph &g, - size_t max_concurrency, - Body& body, - input_queue_type *q = NULL ) : - base_type(g, max_concurrency, q), - my_body( new internal::multifunction_body_leaf(body) ), - my_init_body( new internal::multifunction_body_leaf(body) ) { + multifunction_input(graph &g, size_t max_concurrency, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) + ) : base_type(g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(max_concurrency, priority)) + , my_body( new internal::multifunction_body_leaf(body) ) + , my_init_body( new internal::multifunction_body_leaf(body) ) + , my_output_ports(init_output_ports::call(g, my_output_ports)){ } //! Copy constructor - multifunction_input( const multifunction_input& src, input_queue_type *q = NULL ) : - base_type(src, q), + multifunction_input( const multifunction_input& src ) : + base_type(src), my_body( src.my_init_body->clone() ), - my_init_body(src.my_init_body->clone() ) { + my_init_body(src.my_init_body->clone() ), + my_output_ports( init_output_ports::call(src.my_graph_ref, my_output_ports) ) { } ~multifunction_input() { @@ -546,18 +676,22 @@ namespace internal { // for multifunction nodes we do not have a single successor as such. So we just tell // the task we were successful. + //TODO: consider moving common parts with implementation in function_input into separate function task * apply_body_impl_bypass( const input_type &i) { tbb::internal::fgt_begin_body( my_body ); (*my_body)(i, my_output_ports); tbb::internal::fgt_end_body( my_body ); - task * new_task = SUCCESSFULLY_ENQUEUED; - return new_task; + task* ttask = NULL; + if(base_type::my_max_concurrency != 0) { + ttask = base_type::try_get_postponed_task(i); + } + return ttask ? ttask : SUCCESSFULLY_ENQUEUED; } output_ports_type &output_ports(){ return my_output_ports; } protected: -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION void extract() { extract_element::extract_this(my_output_ports); } @@ -586,26 +720,36 @@ namespace internal { return tbb::flow::get(op.output_ports()); } -// helper structs for split_node + inline void check_task_and_spawn(graph& g, task* t) { + if (t && t != SUCCESSFULLY_ENQUEUED) { + internal::spawn_in_graph_arena(g, *t); + } + } + + // helper structs for split_node template struct emit_element { template - static void emit_this(const T &t, P &p) { - (void)tbb::flow::get(p).try_put(tbb::flow::get(t)); - emit_element::emit_this(t,p); + static task* emit_this(graph& g, const T &t, P &p) { + // TODO: consider to collect all the tasks in task_list and spawn them all at once + task* last_task = tbb::flow::get(p).try_put_task(tbb::flow::get(t)); + check_task_and_spawn(g, last_task); + return emit_element::emit_this(g,t,p); } }; template<> struct emit_element<1> { template - static void emit_this(const T &t, P &p) { - (void)tbb::flow::get<0>(p).try_put(tbb::flow::get<0>(t)); + static task* emit_this(graph& g, const T &t, P &p) { + task* last_task = tbb::flow::get<0>(p).try_put_task(tbb::flow::get<0>(t)); + check_task_and_spawn(g, last_task); + return SUCCESSFULLY_ENQUEUED; } }; //! Implements methods for an executable node that takes continue_msg as input - template< typename Output > + template< typename Output, typename Policy> class continue_input : public continue_receiver { public: @@ -615,22 +759,27 @@ namespace internal { //! The output type of this receiver typedef Output output_type; typedef function_body function_body_type; + typedef continue_input class_type; template< typename Body > - continue_input( graph &g, Body& body ) - : my_graph_ptr(&g), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ), - my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { } + continue_input( graph &g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) ) + : continue_receiver(__TBB_FLOW_GRAPH_PRIORITY_ARG1(/*number_of_predecessors=*/0, priority)) + , my_graph_ref(g) + , my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + { } template< typename Body > - continue_input( graph &g, int number_of_predecessors, Body& body ) - : continue_receiver( number_of_predecessors ), my_graph_ptr(&g), - my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ), - my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + continue_input( graph &g, int number_of_predecessors, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) + ) : continue_receiver( __TBB_FLOW_GRAPH_PRIORITY_ARG1(number_of_predecessors, priority) ) + , my_graph_ref(g) + , my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { } continue_input( const continue_input& src ) : continue_receiver(src), - my_graph_ptr(src.my_graph_ptr), + my_graph_ref(src.my_graph_ref), my_body( src.my_init_body->clone() ), my_init_body( src.my_init_body->clone() ) {} @@ -656,36 +805,48 @@ namespace internal { protected: - graph* my_graph_ptr; + graph& my_graph_ref; function_body_type *my_body; function_body_type *my_init_body; virtual broadcast_cache &successors() = 0; - friend class apply_body_task_bypass< continue_input< Output >, continue_msg >; + friend class apply_body_task_bypass< class_type, continue_msg >; //! Applies the body to the provided input task *apply_body_bypass( input_type ) { -#if TBB_PREVIEW_FLOW_GRAPH_TRACE // There is an extra copied needed to capture the // body execution without the try_put tbb::internal::fgt_begin_body( my_body ); output_type v = (*my_body)( continue_msg() ); tbb::internal::fgt_end_body( my_body ); return successors().try_put_task( v ); -#else - return successors().try_put_task( (*my_body)( continue_msg() ) ); -#endif } - //! Spawns a task that applies the body - task *execute( ) __TBB_override { - return (my_graph_ptr->is_active()) ? - new ( task::allocate_additional_child_of( *(my_graph_ptr->root_task()) ) ) - apply_body_task_bypass< continue_input< Output >, continue_msg >( *this, continue_msg() ) : - NULL; + task* execute() __TBB_override { + if(!internal::is_graph_active(my_graph_ref)) { + return NULL; + } +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (push) +#pragma warning (disable: 4127) /* suppress conditional expression is constant */ +#endif + if(internal::has_policy::value) { +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (pop) +#endif + return apply_body_bypass( continue_msg() ); + } + else { + return new ( task::allocate_additional_child_of( *(my_graph_ref.root_task()) ) ) + apply_body_task_bypass< class_type, continue_msg >( + *this, __TBB_FLOW_GRAPH_PRIORITY_ARG1(continue_msg(), my_priority) ); + } } + graph& graph_reference() const __TBB_override { + return my_graph_ref; + } }; // continue_input //! Implements methods for both executable and function nodes that puts Output to its successors @@ -697,13 +858,13 @@ namespace internal { typedef Output output_type; typedef typename sender::successor_type successor_type; typedef broadcast_cache broadcast_cache_type; -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION typedef typename sender::built_successors_type built_successors_type; typedef typename sender::successor_list_type successor_list_type; #endif - function_output() { my_successors.set_owner(this); } - function_output(const function_output & /*other*/) : sender() { + function_output( graph& g) : my_graph_ref(g) { my_successors.set_owner(this); } + function_output(const function_output & other) : sender(), my_graph_ref(other.my_graph_ref) { my_successors.set_owner(this); } @@ -719,7 +880,7 @@ namespace internal { return true; } -#if TBB_PREVIEW_FLOW_GRAPH_FEATURES +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION built_successors_type &built_successors() __TBB_override { return successors().built_successors(); } @@ -738,7 +899,7 @@ namespace internal { void copy_successors( successor_list_type &v) __TBB_override { successors().copy_successors(v); } -#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */ +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ // for multifunction_node. The function_body that implements // the node will have an input and an output tuple of ports. To put @@ -753,9 +914,11 @@ namespace internal { } broadcast_cache_type &successors() { return my_successors; } + + graph& graph_reference() const { return my_graph_ref; } protected: broadcast_cache_type my_successors; - + graph& my_graph_ref; }; // function_output template< typename Output > @@ -765,19 +928,32 @@ namespace internal { typedef function_output base_type; using base_type::my_successors; - multifunction_output() : base_type() {my_successors.set_owner(this);} - multifunction_output( const multifunction_output &/*other*/) : base_type() { my_successors.set_owner(this); } + multifunction_output(graph& g) : base_type(g) {my_successors.set_owner(this);} + multifunction_output( const multifunction_output& other) : base_type(other.my_graph_ref) { my_successors.set_owner(this); } bool try_put(const output_type &i) { - task *res = my_successors.try_put_task(i); + task *res = try_put_task(i); if(!res) return false; - if(res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res); + if(res != SUCCESSFULLY_ENQUEUED) { + FLOW_SPAWN(*res); // TODO: Spawn task inside arena + } return true; } + + using base_type::graph_reference; + + protected: + + task* try_put_task(const output_type &i) { + return my_successors.try_put_task(i); + } + + template friend struct emit_element; + }; // multifunction_output //composite_node -#if TBB_PREVIEW_FLOW_GRAPH_TRACE && __TBB_FLOW_GRAPH_CPP11_FEATURES +#if __TBB_FLOW_GRAPH_CPP11_FEATURES template void add_nodes_impl(CompositeType*, bool) {} @@ -785,10 +961,7 @@ namespace internal { void add_nodes_impl(CompositeType *c_node, bool visible, const NodeType1& n1, const NodeTypes&... n) { void *addr = const_cast(&n1); - if(visible) - tbb::internal::itt_relation_add( tbb::internal::ITT_DOMAIN_FLOW, c_node, tbb::internal::FLOW_NODE, tbb::internal::__itt_relation_is_parent_of, addr, tbb::internal::FLOW_NODE ); - else - tbb::internal::itt_relation_add( tbb::internal::ITT_DOMAIN_FLOW, addr, tbb::internal::FLOW_NODE, tbb::internal::__itt_relation_is_child_of, c_node, tbb::internal::FLOW_NODE ); + fgt_alias_port(c_node, addr, visible); add_nodes_impl(c_node, visible, n...); } #endif diff --git a/inst/include/tbb/internal/_flow_graph_streaming_node.h b/inst/include/tbb/internal/_flow_graph_streaming_node.h index 493f76e8..abb8c139 100644 --- a/inst/include/tbb/internal/_flow_graph_streaming_node.h +++ b/inst/include/tbb/internal/_flow_graph_streaming_node.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_flow_graph_streaming_H @@ -466,6 +462,7 @@ class streaming_node< tuple, JP, StreamFactory > device_selector_base *my_device_selector; }; + // TODO: investigate why copy-construction is disallowed class args_storage_base : tbb::internal::no_copy { public: typedef typename kernel_multifunction_node::output_ports_type output_ports_type; @@ -481,7 +478,7 @@ class streaming_node< tuple, JP, StreamFactory > {} args_storage_base( const args_storage_base &k ) - : my_kernel( k.my_kernel ), my_factory( k.my_factory ) + : tbb::internal::no_copy(), my_kernel( k.my_kernel ), my_factory( k.my_factory ) {} const kernel_type my_kernel; diff --git a/inst/include/tbb/internal/_flow_graph_tagged_buffer_impl.h b/inst/include/tbb/internal/_flow_graph_tagged_buffer_impl.h index 46755fe0..92291129 100644 --- a/inst/include/tbb/internal/_flow_graph_tagged_buffer_impl.h +++ b/inst/include/tbb/internal/_flow_graph_tagged_buffer_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ // a hash table buffer that can expand, and can support as many deletions as diff --git a/inst/include/tbb/internal/_flow_graph_trace_impl.h b/inst/include/tbb/internal/_flow_graph_trace_impl.h index b798888d..65809c39 100644 --- a/inst/include/tbb/internal/_flow_graph_trace_impl.h +++ b/inst/include/tbb/internal/_flow_graph_trace_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,34 +12,119 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef _FGT_GRAPH_TRACE_IMPL_H #define _FGT_GRAPH_TRACE_IMPL_H #include "../tbb_profiling.h" +#if (_MSC_VER >= 1900) + #include +#endif namespace tbb { namespace internal { +#if TBB_USE_THREADING_TOOLS + #if TBB_PREVIEW_FLOW_GRAPH_TRACE + #if (_MSC_VER >= 1900) + #define CODEPTR() (_ReturnAddress()) + #elif __TBB_GCC_VERSION >= 40800 + #define CODEPTR() ( __builtin_return_address(0)) + #else + #define CODEPTR() NULL + #endif + #else + #define CODEPTR() NULL + #endif /* TBB_PREVIEW_FLOW_GRAPH_TRACE */ + +static inline void fgt_alias_port(void *node, void *p, bool visible) { + if(visible) + itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_NODE ); + else + itt_relation_add( ITT_DOMAIN_FLOW, p, FLOW_NODE, __itt_relation_is_child_of, node, FLOW_NODE ); +} + +static inline void fgt_composite ( void* codeptr, void *node, void *graph ) { + itt_make_task_group( ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_COMPOSITE_NODE ); + suppress_unused_warning( codeptr ); #if TBB_PREVIEW_FLOW_GRAPH_TRACE + if (codeptr != NULL) { + register_node_addr(ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} -static inline void fgt_internal_create_input_port( void *node, void *p, string_index name_index ) { +static inline void fgt_internal_alias_input_port( void *node, void *p, string_index name_index ) { itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_INPUT_PORT ); } -static inline void fgt_internal_create_output_port( void *node, void *p, string_index name_index ) { +static inline void fgt_internal_alias_output_port( void *node, void *p, string_index name_index ) { itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_OUTPUT_PORT ); +} + +template +void alias_input_port(void *node, tbb::flow::receiver* port, string_index name_index) { + // TODO: Make fgt_internal_alias_input_port a function template? + fgt_internal_alias_input_port( node, port, name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_input_alias_helper { + static void alias_port( void *node, PortsTuple &ports ) { + alias_input_port( node, &(tbb::flow::get(ports)), static_cast(FLOW_INPUT_PORT_0 + N - 1) ); + fgt_internal_input_alias_helper::alias_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_input_alias_helper { + static void alias_port( void * /* node */, PortsTuple & /* ports */ ) { } +}; + +template +void alias_output_port(void *node, tbb::flow::sender* port, string_index name_index) { + // TODO: Make fgt_internal_alias_output_port a function template? + fgt_internal_alias_output_port( node, static_cast(port), name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_output_alias_helper { + static void alias_port( void *node, PortsTuple &ports ) { + alias_output_port( node, &(tbb::flow::get(ports)), static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); + fgt_internal_output_alias_helper::alias_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_output_alias_helper { + static void alias_port( void * /*node*/, PortsTuple &/*ports*/ ) { + } +}; + +static inline void fgt_internal_create_input_port( void *node, void *p, string_index name_index ) { + itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); +} + +static inline void fgt_internal_create_output_port( void* codeptr, void *node, void *p, string_index name_index ) { + itt_make_task_group(ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index); + suppress_unused_warning( codeptr ); +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + if (codeptr != NULL) { + register_node_addr(ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif } template void register_input_port(void *node, tbb::flow::receiver* port, string_index name_index) { - //TODO: Make fgt_internal_create_input_port a function template? - fgt_internal_create_input_port( node, port, name_index); + // TODO: Make fgt_internal_create_input_port a function template? + // In C++03 dependent name lookup from the template definition context + // works only for function declarations with external linkage: + // http://www.open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#561 + fgt_internal_create_input_port(node, static_cast(port), name_index); } template < typename PortsTuple, int N > @@ -58,23 +143,23 @@ struct fgt_internal_input_helper { }; template -void register_output_port(void *node, tbb::flow::sender* port, string_index name_index) { - //TODO: Make fgt_internal_create_output_port a function template? - fgt_internal_create_output_port( node, static_cast(port), name_index); +void register_output_port(void* codeptr, void *node, tbb::flow::sender* port, string_index name_index) { + // TODO: Make fgt_internal_create_output_port a function template? + fgt_internal_create_output_port( codeptr, node, static_cast(port), name_index); } template < typename PortsTuple, int N > struct fgt_internal_output_helper { - static void register_port( void *node, PortsTuple &ports ) { - register_output_port( node, &(tbb::flow::get(ports)), static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); - fgt_internal_output_helper::register_port( node, ports ); + static void register_port( void* codeptr, void *node, PortsTuple &ports ) { + register_output_port( codeptr, node, &(tbb::flow::get(ports)), static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); + fgt_internal_output_helper::register_port( codeptr, node, ports ); } }; template < typename PortsTuple > struct fgt_internal_output_helper { - static void register_port( void *node, PortsTuple &ports ) { - register_output_port( node, &(tbb::flow::get<0>(ports)), FLOW_OUTPUT_PORT_0 ); + static void register_port( void* codeptr, void *node, PortsTuple &ports ) { + register_output_port( codeptr, node, &(tbb::flow::get<0>(ports)), FLOW_OUTPUT_PORT_0 ); } }; @@ -105,52 +190,61 @@ static inline void fgt_body( void *node, void *body ) { } template< int N, typename PortsTuple > -static inline void fgt_multioutput_node( string_index t, void *g, void *input_port, PortsTuple &ports ) { +static inline void fgt_multioutput_node(void* codeptr, string_index t, void *g, void *input_port, PortsTuple &ports ) { itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); - fgt_internal_output_helper::register_port( input_port, ports ); + fgt_internal_output_helper::register_port(codeptr, input_port, ports ); } template< int N, typename PortsTuple > -static inline void fgt_multioutput_node_with_body( string_index t, void *g, void *input_port, PortsTuple &ports, void *body ) { +static inline void fgt_multioutput_node_with_body( void* codeptr, string_index t, void *g, void *input_port, PortsTuple &ports, void *body ) { itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); - fgt_internal_output_helper::register_port( input_port, ports ); + fgt_internal_output_helper::register_port( codeptr, input_port, ports ); fgt_body( input_port, body ); } template< int N, typename PortsTuple > -static inline void fgt_multiinput_node( string_index t, void *g, PortsTuple &ports, void *output_port) { +static inline void fgt_multiinput_node( void* codeptr, string_index t, void *g, PortsTuple &ports, void *output_port) { itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); + fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); fgt_internal_input_helper::register_port( output_port, ports ); } -static inline void fgt_node( string_index t, void *g, void *output_port ) { +static inline void fgt_multiinput_multioutput_node( void* codeptr, string_index t, void *n, void *g ) { + itt_make_task_group( ITT_DOMAIN_FLOW, n, FLOW_NODE, g, FLOW_GRAPH, t ); + suppress_unused_warning( codeptr ); +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + if (codeptr != NULL) { + register_node_addr(ITT_DOMAIN_FLOW, n, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} + +static inline void fgt_node( void* codeptr, string_index t, void *g, void *output_port ) { itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); + fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); } -static inline void fgt_node_with_body( string_index t, void *g, void *output_port, void *body ) { +static void fgt_node_with_body( void* codeptr, string_index t, void *g, void *output_port, void *body ) { itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); - fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 ); + fgt_internal_create_output_port(codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); fgt_body( output_port, body ); } - -static inline void fgt_node( string_index t, void *g, void *input_port, void *output_port ) { - fgt_node( t, g, output_port ); +static inline void fgt_node( void* codeptr, string_index t, void *g, void *input_port, void *output_port ) { + fgt_node( codeptr, t, g, output_port ); fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); } -static inline void fgt_node_with_body( string_index t, void *g, void *input_port, void *output_port, void *body ) { - fgt_node_with_body( t, g, output_port, body ); +static inline void fgt_node_with_body( void* codeptr, string_index t, void *g, void *input_port, void *output_port, void *body ) { + fgt_node_with_body( codeptr, t, g, output_port, body ); fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); } -static inline void fgt_node( string_index t, void *g, void *input_port, void *decrement_port, void *output_port ) { - fgt_node( t, g, input_port, output_port ); +static inline void fgt_node( void* codeptr, string_index t, void *g, void *input_port, void *decrement_port, void *output_port ) { + fgt_node( codeptr, t, g, input_port, output_port ); fgt_internal_create_input_port( output_port, decrement_port, FLOW_INPUT_PORT_1 ); } @@ -186,11 +280,25 @@ static inline void fgt_async_reserve( void *node, void *graph ) { itt_region_begin( ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_NULL ); } -static inline void fgt_async_commit( void *node, void *graph ) { +static inline void fgt_async_commit( void *node, void * /*graph*/) { itt_region_end( ITT_DOMAIN_FLOW, node, FLOW_NODE ); } -#else // TBB_PREVIEW_FLOW_GRAPH_TRACE +static inline void fgt_reserve_wait( void *graph ) { + itt_region_begin( ITT_DOMAIN_FLOW, graph, FLOW_GRAPH, NULL, FLOW_NULL, FLOW_NULL ); +} + +static inline void fgt_release_wait( void *graph ) { + itt_region_end( ITT_DOMAIN_FLOW, graph, FLOW_GRAPH ); +} + +#else // TBB_USE_THREADING_TOOLS + +#define CODEPTR() NULL + +static inline void fgt_alias_port(void * /*node*/, void * /*p*/, bool /*visible*/ ) { } + +static inline void fgt_composite ( void* /*codeptr*/, void * /*node*/, void * /*graph*/ ) { } static inline void fgt_graph( void * /*g*/ ) { } @@ -205,32 +313,50 @@ static inline void fgt_graph_desc( void * /*g*/, const char * /*desc*/ ) { } static inline void fgt_body( void * /*node*/, void * /*body*/ ) { } template< int N, typename PortsTuple > -static inline void fgt_multioutput_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { } +static inline void fgt_multioutput_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { } template< int N, typename PortsTuple > -static inline void fgt_multioutput_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { } +static inline void fgt_multioutput_node_with_body( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { } template< int N, typename PortsTuple > -static inline void fgt_multiinput_node( string_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { } +static inline void fgt_multiinput_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { } -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*output_port*/ ) { } -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/ ) { } -static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*decrement_port*/, void * /*output_port*/ ) { } +static inline void fgt_multiinput_multioutput_node( void* /*codeptr*/, string_index /*t*/, void * /*node*/, void * /*graph*/ ) { } -static inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*output_port*/, void * /*body*/ ) { } -static inline void fgt_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/, void * /*body*/ ) { } +static inline void fgt_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*output_port*/ ) { } +static inline void fgt_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/ ) { } +static inline void fgt_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*decrement_port*/, void * /*output_port*/ ) { } + +static inline void fgt_node_with_body( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*output_port*/, void * /*body*/ ) { } +static inline void fgt_node_with_body( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/, void * /*body*/ ) { } static inline void fgt_make_edge( void * /*output_port*/, void * /*input_port*/ ) { } static inline void fgt_remove_edge( void * /*output_port*/, void * /*input_port*/ ) { } static inline void fgt_begin_body( void * /*body*/ ) { } static inline void fgt_end_body( void * /*body*/) { } + static inline void fgt_async_try_put_begin( void * /*node*/, void * /*port*/ ) { } static inline void fgt_async_try_put_end( void * /*node*/ , void * /*port*/ ) { } static inline void fgt_async_reserve( void * /*node*/, void * /*graph*/ ) { } static inline void fgt_async_commit( void * /*node*/, void * /*graph*/ ) { } +static inline void fgt_reserve_wait( void * /*graph*/ ) { } +static inline void fgt_release_wait( void * /*graph*/ ) { } + +template< typename NodeType > +void fgt_multiinput_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } + +template < typename PortsTuple, int N > +struct fgt_internal_input_alias_helper { + static void alias_port( void * /*node*/, PortsTuple & /*ports*/ ) { } +}; + +template < typename PortsTuple, int N > +struct fgt_internal_output_alias_helper { + static void alias_port( void * /*node*/, PortsTuple & /*ports*/ ) { } +}; -#endif // TBB_PREVIEW_FLOW_GRAPH_TRACE +#endif // TBB_USE_THREADING_TOOLS } // namespace internal } // namespace tbb diff --git a/inst/include/tbb/internal/_flow_graph_types_impl.h b/inst/include/tbb/internal/_flow_graph_types_impl.h index 73b5f547..f374831b 100644 --- a/inst/include/tbb/internal/_flow_graph_types_impl.h +++ b/inst/include/tbb/internal/_flow_graph_types_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__flow_graph_types_impl_H @@ -48,7 +44,7 @@ namespace internal { typedef KHashp KHash; }; -// wrap each element of a tuple in a template, and make a tuple of the result. + // wrap each element of a tuple in a template, and make a tuple of the result. template class PT, typename TypeTuple> struct wrap_tuple_elements; @@ -57,6 +53,19 @@ namespace internal { template class PT, typename KeyTraits, typename TypeTuple> struct wrap_key_tuple_elements; +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_VARIADIC_TUPLE_PRESENT + template class PT, typename... Args> + struct wrap_tuple_elements >{ + typedef typename tbb::flow::tuple... > type; + }; + + template class PT, typename KeyTraits, typename... Args> + struct wrap_key_tuple_elements > { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef typename tbb::flow::tuple >... > type; + }; +#else template class PT, typename TypeTuple> struct wrap_tuple_elements<1, PT, TypeTuple> { typedef typename tbb::flow::tuple< @@ -314,6 +323,7 @@ namespace internal { PT > type; }; #endif +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_VARIADIC_TUPLE_PRESENT */ #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT template< int... S > class sequence {}; diff --git a/inst/include/tbb/internal/_mutex_padding.h b/inst/include/tbb/internal/_mutex_padding.h index 6c1d9b59..d26f5f48 100644 --- a/inst/include/tbb/internal/_mutex_padding.h +++ b/inst/include/tbb/internal/_mutex_padding.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_mutex_padding_H diff --git a/inst/include/tbb/internal/_range_iterator.h b/inst/include/tbb/internal/_range_iterator.h index 5ebc42e8..df00e88d 100644 --- a/inst/include/tbb/internal/_range_iterator.h +++ b/inst/include/tbb/internal/_range_iterator.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_range_iterator_H diff --git a/inst/include/tbb/internal/_tbb_hash_compare_impl.h b/inst/include/tbb/internal/_tbb_hash_compare_impl.h index 6381e2dc..82f0df13 100644 --- a/inst/include/tbb/internal/_tbb_hash_compare_impl.h +++ b/inst/include/tbb/internal/_tbb_hash_compare_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ // must be included outside namespaces. @@ -62,23 +58,23 @@ static const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654 //! Hasher functions template -inline size_t tbb_hasher( const T& t ) { +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( const T& t ) { return static_cast( t ) * internal::hash_multiplier; } template -inline size_t tbb_hasher( P* ptr ) { +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( P* ptr ) { size_t const h = reinterpret_cast( ptr ); return (h >> 3) ^ h; } template -inline size_t tbb_hasher( const std::basic_string& s ) { +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( const std::basic_string& s ) { size_t h = 0; for( const E* c = s.c_str(); *c; ++c ) h = static_cast(*c) ^ (h * internal::hash_multiplier); return h; } template -inline size_t tbb_hasher( const std::pair& p ) { +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( const std::pair& p ) { return tbb_hasher(p.first) ^ tbb_hasher(p.second); } @@ -87,7 +83,7 @@ using interface5::tbb_hasher; // Template class for hash compare template -class tbb_hash +class __TBB_DEPRECATED_MSG("tbb::tbb_hash is deprecated, use std::hash") tbb_hash { public: tbb_hash() {} diff --git a/inst/include/tbb/internal/_tbb_strings.h b/inst/include/tbb/internal/_tbb_strings.h index 1aa532f3..df443f3f 100644 --- a/inst/include/tbb/internal/_tbb_strings.h +++ b/inst/include/tbb/internal/_tbb_strings.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ TBB_STRING_RESOURCE(FLOW_BROADCAST_NODE, "broadcast_node") @@ -65,3 +61,19 @@ TBB_STRING_RESOURCE(FLOW_NULL, "null") TBB_STRING_RESOURCE(FLOW_INDEXER_NODE, "indexer_node") TBB_STRING_RESOURCE(FLOW_COMPOSITE_NODE, "composite_node") TBB_STRING_RESOURCE(FLOW_ASYNC_NODE, "async_node") +TBB_STRING_RESOURCE(FLOW_OPENCL_NODE, "opencl_node") +TBB_STRING_RESOURCE(ALGORITHM, "tbb_algorithm") +TBB_STRING_RESOURCE(PARALLEL_FOR, "tbb_parallel_for") +TBB_STRING_RESOURCE(PARALLEL_DO, "tbb_parallel_do") +TBB_STRING_RESOURCE(PARALLEL_INVOKE, "tbb_parallel_invoke") +TBB_STRING_RESOURCE(PARALLEL_REDUCE, "tbb_parallel_reduce") +TBB_STRING_RESOURCE(PARALLEL_SCAN, "tbb_parallel_scan") +TBB_STRING_RESOURCE(PARALLEL_SORT, "tbb_parallel_sort") +TBB_STRING_RESOURCE(CUSTOM_CTX, "tbb_custom") +TBB_STRING_RESOURCE(FLOW_TASKS, "tbb_flow_graph") +TBB_STRING_RESOURCE(PARALLEL_FOR_TASK, "tbb_parallel_for_task") +// TODO: Drop following string prefix "fgt_" here and in FGA's collector +TBB_STRING_RESOURCE(USER_EVENT, "fgt_user_event") +#if __TBB_CPF_BUILD || (TBB_PREVIEW_FLOW_GRAPH_TRACE && TBB_USE_THREADING_TOOLS) +TBB_STRING_RESOURCE(CODE_ADDRESS, "code_address") +#endif diff --git a/inst/include/tbb/internal/_tbb_trace_impl.h b/inst/include/tbb/internal/_tbb_trace_impl.h index e7eb5784..38dc68cf 100644 --- a/inst/include/tbb/internal/_tbb_trace_impl.h +++ b/inst/include/tbb/internal/_tbb_trace_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef _FGT_TBB_TRACE_IMPL_H @@ -27,18 +23,17 @@ namespace tbb { namespace internal { #if TBB_PREVIEW_ALGORITHM_TRACE - static inline void fgt_algorithm( string_index t, void *algorithm, void *parent ) { - itt_make_task_group( ITT_DOMAIN_FLOW, algorithm, FGT_ALGORITHM, parent, FGT_ALGORITHM, t ); + itt_make_task_group( ITT_DOMAIN_FLOW, algorithm, ALGORITHM, parent, ALGORITHM, t ); } static inline void fgt_begin_algorithm( string_index t, void *algorithm ) { - itt_task_begin( ITT_DOMAIN_FLOW, algorithm, FGT_ALGORITHM, NULL, FLOW_NULL, t ); + itt_task_begin( ITT_DOMAIN_FLOW, algorithm, ALGORITHM, NULL, FLOW_NULL, t ); } static inline void fgt_end_algorithm( void * ) { itt_task_end( ITT_DOMAIN_FLOW ); } static inline void fgt_alg_begin_body( string_index t, void *body, void *algorithm ) { - itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, algorithm, FGT_ALGORITHM, t ); + itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, algorithm, ALGORITHM, t ); } static inline void fgt_alg_end_body( void * ) { itt_task_end( ITT_DOMAIN_FLOW ); diff --git a/inst/include/tbb/internal/_tbb_windef.h b/inst/include/tbb/internal/_tbb_windef.h index e798dee4..b2eb9d5d 100644 --- a/inst/include/tbb/internal/_tbb_windef.h +++ b/inst/include/tbb/internal/_tbb_windef.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,17 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_tbb_windef_H #error Do not #include this internal file directly; use public TBB headers instead. #endif /* __TBB_tbb_windef_H */ -// Check that the target Windows version has all API calls requried for TBB. +// Check that the target Windows version has all API calls required for TBB. // Do not increase the version in condition beyond 0x0500 without prior discussion! #if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0501 #error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0501 or greater. @@ -54,7 +50,7 @@ namespace std { # endif #endif -#if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE) +#if (__TBB_BUILD || __TBBMALLOC_BUILD || __TBBBIND_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE) #define __TBB_NO_IMPLICIT_LINKAGE 1 #endif diff --git a/inst/include/tbb/internal/_template_helpers.h b/inst/include/tbb/internal/_template_helpers.h index 1e0abbe8..197f77a1 100644 --- a/inst/include/tbb/internal/_template_helpers.h +++ b/inst/include/tbb/internal/_template_helpers.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_template_helpers_H @@ -23,6 +19,14 @@ #include #include +#include "../tbb_config.h" +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT +#include +#endif +#if __TBB_CPP11_PRESENT +#include +#include // allocator_traits +#endif namespace tbb { namespace internal { @@ -60,11 +64,41 @@ template struct is_same_type { static const bool value = template struct is_ref { static const bool value = false; }; template struct is_ref { static const bool value = true; }; +//! Partial support for std::is_integral +template struct is_integral_impl { static const bool value = false; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +#if __TBB_CPP11_PRESENT +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +#endif +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; + +template +struct is_integral : is_integral_impl::type> {}; + #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT //! std::void_t internal implementation (to avoid GCC < 4.7 "template aliases" absence) template struct void_t { typedef void type; }; #endif +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT + +// Generic SFINAE helper for expression checks, based on the idea demonstrated in ISO C++ paper n4502 +template class... Checks> +struct supports_impl { typedef std::false_type type; }; +template class... Checks> +struct supports_impl...>::type, Checks...> { typedef std::true_type type; }; + +template class... Checks> +using supports = typename supports_impl::type; + +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT */ + #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT //! Allows to store a function parameter pack as a variable and later pass it to another function @@ -153,6 +187,98 @@ stored_pack save_pack( Types&&... types ) { } #endif /* __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ + +#if __TBB_CPP14_INTEGER_SEQUENCE_PRESENT + +using std::index_sequence; +using std::make_index_sequence; + +#elif __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT + +template class index_sequence {}; + +template +struct make_index_sequence_impl : make_index_sequence_impl < N - 1, N - 1, S... > {}; + +template +struct make_index_sequence_impl <0, S...> { + using type = index_sequence; +}; + +template +using make_index_sequence = typename tbb::internal::make_index_sequence_impl::type; + +#endif /* __TBB_CPP14_INTEGER_SEQUENCE_PRESENT */ + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +template +struct conjunction; + +template +struct conjunction + : std::conditional, First>::type {}; + +template +struct conjunction : T {}; + +template<> +struct conjunction<> : std::true_type {}; + +#endif + +#if __TBB_CPP11_PRESENT + +template< typename Iter > +using iterator_value_t = typename std::iterator_traits::value_type; + +template< typename Iter > +using iterator_key_t = typename std::remove_const::first_type>::type; + +template< typename Iter > +using iterator_mapped_t = typename iterator_value_t::second_type; + +template< typename A > using value_type = typename A::value_type; +template< typename A > using alloc_ptr_t = typename std::allocator_traits::pointer; +template< typename A > using has_allocate = decltype(std::declval&>() = std::declval().allocate(0)); +template< typename A > using has_deallocate = decltype(std::declval().deallocate(std::declval>(), 0)); + +// value_type should be checked first because it can be used in other checks (via allocator_traits) +template< typename T > +using is_allocator = supports; + +#if __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT + +template< typename T > +static constexpr bool is_allocator_v = is_allocator::value; + +#endif /*__TBB_CPP14_VARIABLE_TEMPLATES */ + +template< std::size_t N, typename... Args > +struct pack_element { + using type = void; +}; + +template< std::size_t N, typename T, typename... Args > +struct pack_element { + using type = typename pack_element::type; +}; + +template< typename T, typename... Args > +struct pack_element<0, T, Args...> { + using type = T; +}; + +template< std::size_t N, typename... Args > +using pack_element_t = typename pack_element::type; + +// Helper alias for heterogeneous lookup functions in containers +// template parameter K and std::conditional are needed to provide immediate context +// and postpone getting is_trasparent from the compare functor until method instantiation. +template +using is_transparent = typename std::conditional::type::is_transparent; + +#endif /* __TBB_CPP11_PRESENT */ + } } // namespace internal, namespace tbb #endif /* __TBB_template_helpers_H */ diff --git a/inst/include/tbb/internal/_x86_eliding_mutex_impl.h b/inst/include/tbb/internal/_x86_eliding_mutex_impl.h index ef5f9223..11be329d 100644 --- a/inst/include/tbb/internal/_x86_eliding_mutex_impl.h +++ b/inst/include/tbb/internal/_x86_eliding_mutex_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__x86_eliding_mutex_impl_H diff --git a/inst/include/tbb/internal/_x86_rtm_rw_mutex_impl.h b/inst/include/tbb/internal/_x86_rtm_rw_mutex_impl.h index b08c2331..9373aaa0 100644 --- a/inst/include/tbb/internal/_x86_rtm_rw_mutex_impl.h +++ b/inst/include/tbb/internal/_x86_rtm_rw_mutex_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB__x86_rtm_rw_mutex_impl_H @@ -88,11 +84,11 @@ class x86_rtm_rw_mutex: private spin_rw_mutex { static x86_rtm_rw_mutex* internal_get_mutex( const spin_rw_mutex::scoped_lock& lock ) { - return static_cast( lock.internal_get_mutex() ); + return static_cast( lock.mutex ); } static void internal_set_mutex( spin_rw_mutex::scoped_lock& lock, spin_rw_mutex* mtx ) { - lock.internal_set_mutex( mtx ); + lock.mutex = mtx; } //! @endcond public: @@ -171,7 +167,8 @@ class x86_rtm_rw_mutex: private spin_rw_mutex { bool upgrade_to_writer() { x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state==RTM_transacting_reader || transaction_state==RTM_real_reader, "Invalid state for upgrade" ); + if (transaction_state == RTM_transacting_writer || transaction_state == RTM_real_writer) + return true; // Already a writer return mutex->internal_upgrade(*this); } @@ -180,7 +177,8 @@ class x86_rtm_rw_mutex: private spin_rw_mutex { bool downgrade_to_reader() { x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( transaction_state==RTM_transacting_writer || transaction_state==RTM_real_writer, "Invalid state for downgrade" ); + if (transaction_state == RTM_transacting_reader || transaction_state == RTM_real_reader) + return true; // Already a reader return mutex->internal_downgrade(*this); } diff --git a/inst/include/tbb/iterators.h b/inst/include/tbb/iterators.h new file mode 100644 index 00000000..2d4da9c9 --- /dev/null +++ b/inst/include/tbb/iterators.h @@ -0,0 +1,326 @@ +/* + Copyright (c) 2017-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_iterators_H +#define __TBB_iterators_H + +#include +#include + +#include "tbb_config.h" +#include "tbb_stddef.h" + +#if __TBB_CPP11_PRESENT + +#include + +namespace tbb { + +template +class counting_iterator { + __TBB_STATIC_ASSERT(std::numeric_limits::is_integer, "Cannot instantiate counting_iterator with a non-integer type"); +public: + typedef typename std::make_signed::type difference_type; + typedef IntType value_type; + typedef const IntType* pointer; + typedef const IntType& reference; + typedef std::random_access_iterator_tag iterator_category; + + counting_iterator() : my_counter() {} + explicit counting_iterator(IntType init) : my_counter(init) {} + + reference operator*() const { return my_counter; } + value_type operator[](difference_type i) const { return *(*this + i); } + + difference_type operator-(const counting_iterator& it) const { return my_counter - it.my_counter; } + + counting_iterator& operator+=(difference_type forward) { my_counter += forward; return *this; } + counting_iterator& operator-=(difference_type backward) { return *this += -backward; } + counting_iterator& operator++() { return *this += 1; } + counting_iterator& operator--() { return *this -= 1; } + + counting_iterator operator++(int) { + counting_iterator it(*this); + ++(*this); + return it; + } + counting_iterator operator--(int) { + counting_iterator it(*this); + --(*this); + return it; + } + + counting_iterator operator-(difference_type backward) const { return counting_iterator(my_counter - backward); } + counting_iterator operator+(difference_type forward) const { return counting_iterator(my_counter + forward); } + friend counting_iterator operator+(difference_type forward, const counting_iterator it) { return it + forward; } + + bool operator==(const counting_iterator& it) const { return *this - it == 0; } + bool operator!=(const counting_iterator& it) const { return !(*this == it); } + bool operator<(const counting_iterator& it) const {return *this - it < 0; } + bool operator>(const counting_iterator& it) const { return it < *this; } + bool operator<=(const counting_iterator& it) const { return !(*this > it); } + bool operator>=(const counting_iterator& it) const { return !(*this < it); } + +private: + IntType my_counter; +}; +} //namespace tbb + + +#include + +#include "internal/_template_helpers.h" // index_sequence, make_index_sequence + +namespace tbb { +namespace internal { + +template +struct tuple_util { + template + static void increment(TupleType& it, DifferenceType forward) { + std::get(it) += forward; + tuple_util::increment(it, forward); + } + template + static bool check_sync(const TupleType& it1, const TupleType& it2, DifferenceType val) { + if(std::get(it1) - std::get(it2) != val) + return false; + return tuple_util::check_sync(it1, it2, val); + } +}; + +template<> +struct tuple_util<0> { + template + static void increment(TupleType&, DifferenceType) {} + template + static bool check_sync(const TupleType&, const TupleType&, DifferenceType) { return true;} +}; + +template +struct make_references { + template + TupleReturnType operator()(const TupleType& t, tbb::internal::index_sequence) { + return std::tie( *std::get(t)... ); + } +}; + +// A simple wrapper over a tuple of references. +// The class is designed to hold a temporary tuple of reference +// after dereferencing a zip_iterator; in particular, it is needed +// to swap these rvalue tuples. Any other usage is not supported. +template +struct tuplewrapper : public std::tuple::value, T&&>::type...> { + // In the context of this class, T is a reference, so T&& is a "forwarding reference" + typedef std::tuple base_type; + // Construct from the result of std::tie + tuplewrapper(const base_type& in) : base_type(in) {} +#if __INTEL_COMPILER + // ICC cannot generate copy ctor & assignment + tuplewrapper(const tuplewrapper& rhs) : base_type(rhs) {} + tuplewrapper& operator=(const tuplewrapper& rhs) { + *this = base_type(rhs); + return *this; + } +#endif + // Assign any tuple convertible to std::tuple: *it = a_tuple; + template + tuplewrapper& operator=(const std::tuple& other) { + base_type::operator=(other); + return *this; + } +#if _LIBCPP_VERSION + // (Necessary for libc++ tuples) Convert to a tuple of values: v = *it; + operator std::tuple::type...>() { return base_type(*this); } +#endif + // Swap rvalue tuples: swap(*it1,*it2); + friend void swap(tuplewrapper&& a, tuplewrapper&& b) { + std::swap(a,b); + } +}; + +} //namespace internal + +template +class zip_iterator { + __TBB_STATIC_ASSERT(sizeof...(Types)>0, "Cannot instantiate zip_iterator with empty template parameter pack"); + static const std::size_t num_types = sizeof...(Types); + typedef std::tuple it_types; +public: + typedef typename std::make_signed::type difference_type; + typedef std::tuple::value_type...> value_type; +#if __INTEL_COMPILER && __INTEL_COMPILER < 1800 && _MSC_VER + typedef std::tuple::reference...> reference; +#else + typedef tbb::internal::tuplewrapper::reference...> reference; +#endif + typedef std::tuple::pointer...> pointer; + typedef std::random_access_iterator_tag iterator_category; + + zip_iterator() : my_it() {} + explicit zip_iterator(Types... args) : my_it(std::make_tuple(args...)) {} + zip_iterator(const zip_iterator& input) : my_it(input.my_it) {} + zip_iterator& operator=(const zip_iterator& input) { + my_it = input.my_it; + return *this; + } + + reference operator*() const { + return tbb::internal::make_references()(my_it, tbb::internal::make_index_sequence()); + } + reference operator[](difference_type i) const { return *(*this + i); } + + difference_type operator-(const zip_iterator& it) const { + __TBB_ASSERT(internal::tuple_util::check_sync(my_it, it.my_it, std::get<0>(my_it) - std::get<0>(it.my_it)), + "Components of zip_iterator are not synchronous"); + return std::get<0>(my_it) - std::get<0>(it.my_it); + } + + zip_iterator& operator+=(difference_type forward) { + internal::tuple_util::increment(my_it, forward); + return *this; + } + zip_iterator& operator-=(difference_type backward) { return *this += -backward; } + zip_iterator& operator++() { return *this += 1; } + zip_iterator& operator--() { return *this -= 1; } + + zip_iterator operator++(int) { + zip_iterator it(*this); + ++(*this); + return it; + } + zip_iterator operator--(int) { + zip_iterator it(*this); + --(*this); + return it; + } + + zip_iterator operator-(difference_type backward) const { + zip_iterator it(*this); + return it -= backward; + } + zip_iterator operator+(difference_type forward) const { + zip_iterator it(*this); + return it += forward; + } + friend zip_iterator operator+(difference_type forward, const zip_iterator& it) { return it + forward; } + + bool operator==(const zip_iterator& it) const { + return *this - it == 0; + } + it_types base() const { return my_it; } + + bool operator!=(const zip_iterator& it) const { return !(*this == it); } + bool operator<(const zip_iterator& it) const { return *this - it < 0; } + bool operator>(const zip_iterator& it) const { return it < *this; } + bool operator<=(const zip_iterator& it) const { return !(*this > it); } + bool operator>=(const zip_iterator& it) const { return !(*this < it); } +private: + it_types my_it; +}; + +template +zip_iterator make_zip_iterator(T... args) { return zip_iterator(args...); } + +template +class transform_iterator { +public: + typedef typename std::iterator_traits::value_type value_type; + typedef typename std::iterator_traits::difference_type difference_type; +#if __TBB_CPP17_INVOKE_RESULT_PRESENT + typedef typename std::invoke_result::reference>::type reference; +#else + typedef typename std::result_of::reference)>::type reference; +#endif + typedef typename std::iterator_traits::pointer pointer; + typedef typename std::random_access_iterator_tag iterator_category; + + transform_iterator(Iter it, UnaryFunc unary_func) : my_it(it), my_unary_func(unary_func) { + __TBB_STATIC_ASSERT((std::is_same::iterator_category, + std::random_access_iterator_tag>::value), "Random access iterator required."); + } + transform_iterator(const transform_iterator& input) : my_it(input.my_it), my_unary_func(input.my_unary_func) { } + transform_iterator& operator=(const transform_iterator& input) { + my_it = input.my_it; + return *this; + } + reference operator*() const { + return my_unary_func(*my_it); + } + reference operator[](difference_type i) const { + return *(*this + i); + } + transform_iterator& operator++() { + ++my_it; + return *this; + } + transform_iterator& operator--() { + --my_it; + return *this; + } + transform_iterator operator++(int) { + transform_iterator it(*this); + ++(*this); + return it; + } + transform_iterator operator--(int) { + transform_iterator it(*this); + --(*this); + return it; + } + transform_iterator operator+(difference_type forward) const { + return { my_it + forward, my_unary_func }; + } + transform_iterator operator-(difference_type backward) const { + return { my_it - backward, my_unary_func }; + } + transform_iterator& operator+=(difference_type forward) { + my_it += forward; + return *this; + } + transform_iterator& operator-=(difference_type backward) { + my_it -= backward; + return *this; + } + friend transform_iterator operator+(difference_type forward, const transform_iterator& it) { + return it + forward; + } + difference_type operator-(const transform_iterator& it) const { + return my_it - it.my_it; + } + bool operator==(const transform_iterator& it) const { return *this - it == 0; } + bool operator!=(const transform_iterator& it) const { return !(*this == it); } + bool operator<(const transform_iterator& it) const { return *this - it < 0; } + bool operator>(const transform_iterator& it) const { return it < *this; } + bool operator<=(const transform_iterator& it) const { return !(*this > it); } + bool operator>=(const transform_iterator& it) const { return !(*this < it); } + + Iter base() const { return my_it; } +private: + Iter my_it; + const UnaryFunc my_unary_func; +}; + +template +transform_iterator make_transform_iterator(Iter it, UnaryFunc unary_func) { + return transform_iterator(it, unary_func); +} + +} //namespace tbb + +#endif //__TBB_CPP11_PRESENT + +#endif /* __TBB_iterators_H */ diff --git a/inst/include/tbb/machine/gcc_armv7.h b/inst/include/tbb/machine/gcc_arm.h similarity index 94% rename from inst/include/tbb/machine/gcc_armv7.h rename to inst/include/tbb/machine/gcc_arm.h index 642c14fe..284a3f9e 100644 --- a/inst/include/tbb/machine/gcc_armv7.h +++ b/inst/include/tbb/machine/gcc_arm.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ /* @@ -26,10 +22,7 @@ #error Do not include this file directly; include tbb_machine.h instead #endif -//TODO: is ARMv7 is the only version ever to support? -#if !(__ARM_ARCH_7A__) -#error compilation requires an ARMv7-a architecture. -#endif +#if __ARM_ARCH_7A__ #include #include @@ -171,15 +164,6 @@ static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend return result; } -inline void __TBB_machine_pause (int32_t delay ) -{ - while(delay>0) - { - __TBB_compiler_fence(); - delay--; - } -} - namespace tbb { namespace internal { template @@ -205,7 +189,6 @@ namespace internal { #define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) #define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_Pause(V) __TBB_machine_pause(V) // Use generics for some things #define __TBB_USE_GENERIC_PART_WORD_CAS 1 @@ -215,3 +198,19 @@ namespace internal { #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 +#elif defined __aarch64__ +// Generic gcc implementations are fine for ARMv8-a except __TBB_PAUSE. +#include "gcc_generic.h" +#else +#error compilation requires an ARMv7-a or ARMv8-a architecture. +#endif // __ARM_ARCH_7A__ + +inline void __TBB_machine_pause (int32_t delay) +{ + while(delay>0) + { + __asm__ __volatile__("yield" ::: "memory"); + delay--; + } +} +#define __TBB_Pause(V) __TBB_machine_pause(V) diff --git a/inst/include/tbb/machine/gcc_generic.h b/inst/include/tbb/machine/gcc_generic.h index 5fc2a901..9b157721 100644 --- a/inst/include/tbb/machine/gcc_generic.h +++ b/inst/include/tbb/machine/gcc_generic.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H) @@ -106,23 +102,17 @@ __TBB_MACHINE_DEFINE_ATOMICS(8,int64_t) #undef __TBB_MACHINE_DEFINE_ATOMICS -namespace tbb{ namespace internal { namespace gcc_builtins { - inline int clz(unsigned int x){ return __builtin_clz(x);}; - inline int clz(unsigned long int x){ return __builtin_clzl(x);}; - inline int clz(unsigned long long int x){ return __builtin_clzll(x);}; -}}} -//gcc __builtin_clz builtin count _number_ of leading zeroes -static inline intptr_t __TBB_machine_lg( uintptr_t x ) { - return sizeof(x)*8 - tbb::internal::gcc_builtins::clz(x) -1 ; -} - - typedef unsigned char __TBB_Flag; typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; #if __TBB_GCC_VERSION < 40700 // Use __sync_* builtins +// Use generic machine_load_store functions if there are no builtin atomics +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + static inline void __TBB_machine_or( volatile void *ptr, uintptr_t addend ) { __sync_fetch_and_or(reinterpret_cast(ptr),addend); } @@ -158,6 +148,59 @@ inline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag ) { __atomic_clear(&flag,__ATOMIC_RELEASE); } +namespace tbb { namespace internal { + +/** GCC atomic operation intrinsics might miss compiler fence. + Adding it after load-with-acquire, before store-with-release, and + on both sides of sequentially consistent operations is sufficient for correctness. **/ + +template +inline T __TBB_machine_atomic_load( const volatile T& location) { + if (MemOrder == __ATOMIC_SEQ_CST) __TBB_compiler_fence(); + T value = __atomic_load_n(&location, MemOrder); + if (MemOrder != __ATOMIC_RELAXED) __TBB_compiler_fence(); + return value; +} + +template +inline void __TBB_machine_atomic_store( volatile T& location, T value) { + if (MemOrder != __ATOMIC_RELAXED) __TBB_compiler_fence(); + __atomic_store_n(&location, value, MemOrder); + if (MemOrder == __ATOMIC_SEQ_CST) __TBB_compiler_fence(); +} + +template +struct machine_load_store { + static T load_with_acquire ( const volatile T& location ) { + return __TBB_machine_atomic_load(location); + } + static void store_with_release ( volatile T &location, T value ) { + __TBB_machine_atomic_store(location, value); + } +}; + +template +struct machine_load_store_relaxed { + static inline T load ( const volatile T& location ) { + return __TBB_machine_atomic_load(location); + } + static inline void store ( volatile T& location, T value ) { + __TBB_machine_atomic_store(location, value); + } +}; + +template +struct machine_load_store_seq_cst { + static T load ( const volatile T& location ) { + return __TBB_machine_atomic_load(location); + } + static void store ( volatile T &location, T value ) { + __TBB_machine_atomic_store(location, value); + } +}; + +}} // namespace tbb::internal + #endif // __TBB_GCC_VERSION < 40700 // Machine specific atomic operations @@ -167,18 +210,24 @@ inline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag ) { #define __TBB_TryLockByte __TBB_machine_try_lock_byte #define __TBB_UnlockByte __TBB_machine_unlock_byte -// Definition of other functions -#define __TBB_Log2(V) __TBB_machine_lg(V) +// __builtin_clz counts the number of leading zeroes +namespace tbb{ namespace internal { namespace gcc_builtins { + inline int clz(unsigned int x){ return __builtin_clz(x); } + inline int clz(unsigned long int x){ return __builtin_clzl(x); } + inline int clz(unsigned long long int x){ return __builtin_clzll(x); } +}}} +// logarithm is the index of the most significant non-zero bit +static inline intptr_t __TBB_machine_lg( uintptr_t x ) { + // If P is a power of 2 and x static inline intptr_t __TBB_machine_lg( T x ) { __TBB_ASSERT(x>0, "The logarithm of a non-positive value is undefined."); @@ -33,6 +29,7 @@ static inline intptr_t __TBB_machine_lg( T x ) { return j; } #define __TBB_Log2(V) __TBB_machine_lg(V) +#endif /* !__TBB_Log2 */ #ifndef __TBB_Pause //TODO: check if raising a ratio of pause instructions to loop control instructions diff --git a/inst/include/tbb/machine/gcc_itsx.h b/inst/include/tbb/machine/gcc_itsx.h index caa35441..5e93a202 100644 --- a/inst/include/tbb/machine/gcc_itsx.h +++ b/inst/include/tbb/machine/gcc_itsx.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_itsx_H) diff --git a/inst/include/tbb/machine/ibm_aix51.h b/inst/include/tbb/machine/ibm_aix51.h index a905b4e1..c8246848 100644 --- a/inst/include/tbb/machine/ibm_aix51.h +++ b/inst/include/tbb/machine/ibm_aix51.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ // TODO: revise by comparing with mac_ppc.h diff --git a/inst/include/tbb/machine/icc_generic.h b/inst/include/tbb/machine/icc_generic.h index 04863000..c4675b8e 100644 --- a/inst/include/tbb/machine/icc_generic.h +++ b/inst/include/tbb/machine/icc_generic.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H) @@ -23,7 +19,7 @@ #endif #if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT - #error "Intel C++ Compiler of at least 12.0 version is needed to use ICC intrinsics port" + #error "Intel(R) C++ Compiler of at least 12.0 version is needed to use ICC intrinsics port" #endif #define __TBB_machine_icc_generic_H diff --git a/inst/include/tbb/machine/linux_common.h b/inst/include/tbb/machine/linux_common.h index 4d2d355b..9dc6c813 100644 --- a/inst/include/tbb/machine/linux_common.h +++ b/inst/include/tbb/machine/linux_common.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_machine_H @@ -30,19 +26,44 @@ #include #if defined(SYS_futex) +/* This header file is included for Linux and some other systems that may support futexes.*/ #define __TBB_USE_FUTEX 1 + +#if defined(__has_include) +#define __TBB_has_include __has_include +#else +#define __TBB_has_include(x) 0 +#endif + +/* +If available, use typical headers where futex API is defined. While Linux and OpenBSD +are known to provide such headers, other systems might have them as well. +*/ +#if defined(__linux__) || __TBB_has_include() +#include +#elif defined(__OpenBSD__) || __TBB_has_include() +#include +#endif + #include #include -// Unfortunately, some versions of Linux do not have a header that defines FUTEX_WAIT and FUTEX_WAKE. -#ifdef FUTEX_WAIT +/* +Some systems might not define the macros or use different names. In such case we expect +the actual parameter values to match Linux: 0 for wait, 1 for wake. +*/ +#if defined(FUTEX_WAIT_PRIVATE) +#define __TBB_FUTEX_WAIT FUTEX_WAIT_PRIVATE +#elif defined(FUTEX_WAIT) #define __TBB_FUTEX_WAIT FUTEX_WAIT #else #define __TBB_FUTEX_WAIT 0 #endif -#ifdef FUTEX_WAKE +#if defined(FUTEX_WAKE_PRIVATE) +#define __TBB_FUTEX_WAKE FUTEX_WAKE_PRIVATE +#elif defined(FUTEX_WAKE) #define __TBB_FUTEX_WAKE FUTEX_WAKE #else #define __TBB_FUTEX_WAKE 1 diff --git a/inst/include/tbb/machine/linux_ia32.h b/inst/include/tbb/machine/linux_ia32.h index 932d343d..3942d8bf 100644 --- a/inst/include/tbb/machine/linux_ia32.h +++ b/inst/include/tbb/machine/linux_ia32.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia32_H) diff --git a/inst/include/tbb/machine/linux_ia64.h b/inst/include/tbb/machine/linux_ia64.h index f4772288..28b2bc41 100644 --- a/inst/include/tbb/machine/linux_ia64.h +++ b/inst/include/tbb/machine/linux_ia64.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia64_H) diff --git a/inst/include/tbb/machine/linux_intel64.h b/inst/include/tbb/machine/linux_intel64.h index 02153c2a..907ead52 100644 --- a/inst/include/tbb/machine/linux_intel64.h +++ b/inst/include/tbb/machine/linux_intel64.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_intel64_H) diff --git a/inst/include/tbb/machine/mac_ppc.h b/inst/include/tbb/machine/mac_ppc.h index 13f387b4..2eb5ad3a 100644 --- a/inst/include/tbb/machine/mac_ppc.h +++ b/inst/include/tbb/machine/mac_ppc.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_power_H) diff --git a/inst/include/tbb/machine/macos_common.h b/inst/include/tbb/machine/macos_common.h index 119ad979..87bb5e3e 100644 --- a/inst/include/tbb/machine/macos_common.h +++ b/inst/include/tbb/machine/macos_common.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_macos_common_H) diff --git a/inst/include/tbb/machine/mic_common.h b/inst/include/tbb/machine/mic_common.h index 8765d39f..8c844f1d 100644 --- a/inst/include/tbb/machine/mic_common.h +++ b/inst/include/tbb/machine/mic_common.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_mic_common_H diff --git a/inst/include/tbb/machine/msvc_armv7.h b/inst/include/tbb/machine/msvc_armv7.h index 40d22020..e83c077e 100644 --- a/inst/include/tbb/machine/msvc_armv7.h +++ b/inst/include/tbb/machine/msvc_armv7.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_msvc_armv7_H) diff --git a/inst/include/tbb/machine/msvc_ia32_common.h b/inst/include/tbb/machine/msvc_ia32_common.h index 8b4814bc..2e17836d 100644 --- a/inst/include/tbb/machine/msvc_ia32_common.h +++ b/inst/include/tbb/machine/msvc_ia32_common.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_msvc_ia32_common_H) @@ -49,7 +45,7 @@ }}} #endif -#if _MSC_VER>=1600 && (!__INTEL_COMPILER || __INTEL_COMPILER>=1310) +#if __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT // S is the operand size in bytes, B is the suffix for intrinsics for that size #define __TBB_MACHINE_DEFINE_ATOMICS(S,B,T,U) \ __pragma(intrinsic( _InterlockedCompareExchange##B )) \ @@ -75,8 +71,7 @@ #endif #undef __TBB_MACHINE_DEFINE_ATOMICS - #define __TBB_ATOMIC_PRIMITIVES_DEFINED -#endif /*_MSC_VER>=1600*/ +#endif /* __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT */ #if _MSC_VER>=1300 || __INTEL_COMPILER>=1100 #pragma intrinsic(_ReadWriteBarrier) diff --git a/inst/include/tbb/machine/sunos_sparc.h b/inst/include/tbb/machine/sunos_sparc.h index 9119f402..b5864ba7 100644 --- a/inst/include/tbb/machine/sunos_sparc.h +++ b/inst/include/tbb/machine/sunos_sparc.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ diff --git a/inst/include/tbb/machine/windows_api.h b/inst/include/tbb/machine/windows_api.h index d362abc5..54987915 100644 --- a/inst/include/tbb/machine/windows_api.h +++ b/inst/include/tbb/machine/windows_api.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_machine_windows_api_H diff --git a/inst/include/tbb/machine/windows_ia32.h b/inst/include/tbb/machine/windows_ia32.h index 8db0d2b8..62968226 100644 --- a/inst/include/tbb/machine/windows_ia32.h +++ b/inst/include/tbb/machine/windows_ia32.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_ia32_H) @@ -43,7 +39,7 @@ extern "C" { __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr); } -#ifndef __TBB_ATOMIC_PRIMITIVES_DEFINED +#if !__TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT #define __TBB_MACHINE_DEFINE_ATOMICS(S,T,U,A,C) \ static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \ @@ -93,7 +89,7 @@ __TBB_MACHINE_DEFINE_ATOMICS(4, ptrdiff_t, ptrdiff_t, eax, ecx) #undef __TBB_MACHINE_DEFINE_ATOMICS -#endif /*__TBB_ATOMIC_PRIMITIVES_DEFINED*/ +#endif /* __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT */ //TODO: Check if it possible and profitable for IA-32 architecture on (Linux and Windows) //to use of 64-bit load/store via floating point registers together with full fence diff --git a/inst/include/tbb/machine/windows_intel64.h b/inst/include/tbb/machine/windows_intel64.h index 86abd6ad..6e2a2cc7 100644 --- a/inst/include/tbb/machine/windows_intel64.h +++ b/inst/include/tbb/machine/windows_intel64.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_intel64_H) @@ -29,7 +25,7 @@ #include "msvc_ia32_common.h" -#ifndef __TBB_ATOMIC_PRIMITIVES_DEFINED +#if !__TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT #include #pragma intrinsic(_InterlockedCompareExchange,_InterlockedExchangeAdd,_InterlockedExchange) @@ -66,7 +62,7 @@ inline __int64 __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ) { return _InterlockedExchange64( (__int64*)ptr, value ); } -#endif /*__TBB_ATOMIC_PRIMITIVES_DEFINED*/ +#endif /* __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT */ #define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 diff --git a/inst/include/tbb/machine/xbox360_ppc.h b/inst/include/tbb/machine/xbox360_ppc.h deleted file mode 100644 index 148e5b1d..00000000 --- a/inst/include/tbb/machine/xbox360_ppc.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - Copyright 2005-2014 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. Threading Building Blocks is free software; - you can redistribute it and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. Threading Building Blocks is - distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the - implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. You should have received a copy of - the GNU General Public License along with Threading Building Blocks; if not, write to the - Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software library without - restriction. Specifically, if other files instantiate templates or use macros or inline - functions from this file, or you compile this file and link it with other files to produce - an executable, this file does not by itself cause the resulting executable to be covered - by the GNU General Public License. This exception does not however invalidate any other - reasons why the executable file might be covered by the GNU General Public License. -*/ - -// TODO: revise by comparing with mac_ppc.h - -#if !defined(__TBB_machine_H) || defined(__TBB_machine_xbox360_ppc_H) -#error Do not #include this internal file directly; use public TBB headers instead. -#endif - -#define __TBB_machine_xbox360_ppc_H - -#define NONET -#define NOD3D -#include "xtl.h" -#include "ppcintrinsics.h" - -#if _MSC_VER >= 1300 -extern "C" void _MemoryBarrier(); -#pragma intrinsic(_MemoryBarrier) -#define __TBB_control_consistency_helper() __isync() -#define __TBB_acquire_consistency_helper() _MemoryBarrier() -#define __TBB_release_consistency_helper() _MemoryBarrier() -#endif - -#define __TBB_full_memory_fence() __sync() - -#define __TBB_WORDSIZE 4 -#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG - -//todo: define __TBB_USE_FENCED_ATOMICS and define acquire/release primitives to maximize performance - -inline __int32 __TBB_machine_cmpswp4(volatile void *ptr, __int32 value, __int32 comparand ) { - __sync(); - __int32 result = InterlockedCompareExchange((volatile LONG*)ptr, value, comparand); - __isync(); - return result; -} - -inline __int64 __TBB_machine_cmpswp8(volatile void *ptr, __int64 value, __int64 comparand ) -{ - __sync(); - __int64 result = InterlockedCompareExchange64((volatile LONG64*)ptr, value, comparand); - __isync(); - return result; -} - -#define __TBB_USE_GENERIC_PART_WORD_CAS 1 -#define __TBB_USE_GENERIC_FETCH_ADD 1 -#define __TBB_USE_GENERIC_FETCH_STORE 1 -#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 -#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 -#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 - -#pragma optimize( "", off ) -inline void __TBB_machine_pause (__int32 delay ) -{ - for (__int32 i=0; i> 0) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 1) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 2) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 3) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 4) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1; // +1 accomodates for the master thread -} - -static inline int __TBB_XBOX360_GetHardwareThreadIndex(int workerThreadIndex) -{ - workerThreadIndex %= __TBB_XBOX360_DetectNumberOfWorkers()-1; - int m = __TBB_XBOX360_HARDWARE_THREAD_MASK; - int index = 0; - int skipcount = workerThreadIndex; - while (true) - { - if ((m & 1)!=0) - { - if (skipcount==0) break; - skipcount--; - } - m >>= 1; - index++; - } - return index; -} - -#define __TBB_HardwareConcurrency() __TBB_XBOX360_DetectNumberOfWorkers() diff --git a/inst/include/tbb/memory_pool.h b/inst/include/tbb/memory_pool.h index b3bba397..99a31d6a 100644 --- a/inst/include/tbb/memory_pool.h +++ b/inst/include/tbb/memory_pool.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_memory_pool_H diff --git a/inst/include/tbb/mutex.h b/inst/include/tbb/mutex.h index e40b4cd0..76a9da89 100644 --- a/inst/include/tbb/mutex.h +++ b/inst/include/tbb/mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_mutex_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_mutex_H +#pragma message("TBB Warning: tbb/mutex.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_mutex_H #define __TBB_mutex_H +#define __TBB_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #if _WIN32||_WIN64 #include "machine/windows_api.h" #else @@ -36,7 +46,7 @@ namespace tbb { //! Wrapper around the platform's native lock. /** @ingroup synchronization */ -class mutex : internal::mutex_copy_deprecated_and_disabled { +class __TBB_DEPRECATED_VERBOSE_MSG("tbb::critical_section is deprecated, use std::mutex") mutex : internal::mutex_copy_deprecated_and_disabled { public: //! Construct unacquired mutex. mutex() { @@ -230,4 +240,7 @@ __TBB_DEFINE_PROFILING_SET_NAME(mutex) } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_mutex_H_include_area + #endif /* __TBB_mutex_H */ diff --git a/inst/include/tbb/null_mutex.h b/inst/include/tbb/null_mutex.h index 85c660e3..3c7bad1a 100644 --- a/inst/include/tbb/null_mutex.h +++ b/inst/include/tbb/null_mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_null_mutex_H diff --git a/inst/include/tbb/null_rw_mutex.h b/inst/include/tbb/null_rw_mutex.h index fa0c8035..f1ea4df6 100644 --- a/inst/include/tbb/null_rw_mutex.h +++ b/inst/include/tbb/null_rw_mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_null_rw_mutex_H diff --git a/inst/include/tbb/parallel_do.h b/inst/include/tbb/parallel_do.h index 15275682..1b63e279 100644 --- a/inst/include/tbb/parallel_do.h +++ b/inst/include/tbb/parallel_do.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_do_H #define __TBB_parallel_do_H +#define __TBB_parallel_do_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "internal/_range_iterator.h" #include "internal/_template_helpers.h" #include "task.h" @@ -500,7 +499,7 @@ void parallel_do( Iterator first, Iterator last, const Body& body ) if ( first == last ) return; #if __TBB_TASK_GROUP_CONTEXT - task_group_context context; + task_group_context context(internal::PARALLEL_DO); #endif interface9::internal::select_parallel_do( first, last, body, &Body::operator() #if __TBB_TASK_GROUP_CONTEXT @@ -548,4 +547,7 @@ using interface9::parallel_do_feeder; } // namespace +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_do_H_include_area + #endif /* __TBB_parallel_do_H */ diff --git a/inst/include/tbb/parallel_for.h b/inst/include/tbb/parallel_for.h index 2cab6586..0b4861f4 100644 --- a/inst/include/tbb/parallel_for.h +++ b/inst/include/tbb/parallel_for.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,20 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_for_H #define __TBB_parallel_for_H +#define __TBB_parallel_for_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include #include "task.h" #include "partitioner.h" #include "blocked_range.h" #include "tbb_exception.h" +#include "internal/_tbb_trace_impl.h" namespace tbb { @@ -57,6 +57,7 @@ namespace internal { my_body(body), my_partition(partitioner) { + tbb::internal::fgt_algorithm(tbb::internal::PARALLEL_FOR_TASK, this, NULL); } //! Splitting constructor used to generate children. /** parent_ becomes left child. Newly constructed object is right child. */ @@ -66,6 +67,7 @@ namespace internal { my_partition(parent_.my_partition, split_obj) { my_partition.set_affinity(*this); + tbb::internal::fgt_algorithm(tbb::internal::PARALLEL_FOR_TASK, this, (void *)&parent_); } //! Construct right child from the given range as response to the demand. /** parent_ remains left child. Newly constructed object is right child. */ @@ -76,6 +78,7 @@ namespace internal { { my_partition.set_affinity(*this); my_partition.align_depth( d ); + tbb::internal::fgt_algorithm(tbb::internal::PARALLEL_FOR_TASK, this, (void *)&parent_); } static void run( const Range& range, const Body& body, Partitioner& partitioner ) { if( !range.empty() ) { @@ -84,22 +87,34 @@ namespace internal { #else // Bound context prevents exceptions from body to affect nesting or sibling algorithms, // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; + task_group_context context(PARALLEL_FOR); start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); #endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ + // REGION BEGIN + fgt_begin_algorithm( tbb::internal::PARALLEL_FOR_TASK, (void*)&context ); task::spawn_root_and_wait(a); + fgt_end_algorithm( (void*)&context ); + // REGION END } } #if __TBB_TASK_GROUP_CONTEXT static void run( const Range& range, const Body& body, Partitioner& partitioner, task_group_context& context ) { if( !range.empty() ) { start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); + // REGION BEGIN + fgt_begin_algorithm( tbb::internal::PARALLEL_FOR_TASK, (void*)&context ); task::spawn_root_and_wait(a); + fgt_end_algorithm( (void*)&context ); + // END REGION } } #endif /* __TBB_TASK_GROUP_CONTEXT */ //! Run body for range, serves as callback for partitioner - void run_body( Range &r ) { my_body( r ); } + void run_body( Range &r ) { + fgt_alg_begin_body( tbb::internal::PARALLEL_FOR_TASK, (void *)const_cast(&(this->my_body)), (void*)this ); + my_body( r ); + fgt_alg_end_body( (void *)const_cast(&(this->my_body)) ); + } //! spawn right task, serves as callback for partitioner void offer_work(typename Partitioner::split_type& split_obj) { @@ -404,4 +419,7 @@ using strict_ppl::parallel_for; #undef __TBB_NORMAL_EXECUTION #endif +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_for_H_include_area + #endif /* __TBB_parallel_for_H */ diff --git a/inst/include/tbb/parallel_for_each.h b/inst/include/tbb/parallel_for_each.h index 6c2ec9f0..e1da1bbd 100644 --- a/inst/include/tbb/parallel_for_each.h +++ b/inst/include/tbb/parallel_for_each.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_for_each_H diff --git a/inst/include/tbb/parallel_invoke.h b/inst/include/tbb/parallel_invoke.h index 0dd7590e..4be4bdb7 100644 --- a/inst/include/tbb/parallel_invoke.h +++ b/inst/include/tbb/parallel_invoke.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_invoke_H #define __TBB_parallel_invoke_H +#define __TBB_parallel_invoke_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "task.h" +#include "tbb_profiling.h" #if __TBB_VARIADIC_PARALLEL_INVOKE #include // std::forward @@ -31,7 +31,9 @@ namespace tbb { #if !__TBB_TASK_GROUP_CONTEXT /** Dummy to avoid cluttering the bulk of the header with enormous amount of ifdefs. **/ - struct task_group_context {}; + struct task_group_context { + task_group_context(tbb::internal::string_index){} + }; #endif /* __TBB_TASK_GROUP_CONTEXT */ //! @cond INTERNAL @@ -223,7 +225,7 @@ namespace internal { // task_group_context is not in the pack, needs to be added template void parallel_invoke_impl(false_type, F0&& f0, F1&& f1, F&&... f) { - tbb::task_group_context context; + tbb::task_group_context context(PARALLEL_INVOKE); // Add context to the arguments, and redirect to the other overload parallel_invoke_impl(true_type(), std::forward(f0), std::forward(f1), std::forward(f)..., context); } @@ -385,31 +387,31 @@ void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, con // two arguments template void parallel_invoke(const F0& f0, const F1& f1) { - task_group_context context; + task_group_context context(internal::PARALLEL_INVOKE); parallel_invoke(f0, f1, context); } // three arguments template void parallel_invoke(const F0& f0, const F1& f1, const F2& f2) { - task_group_context context; + task_group_context context(internal::PARALLEL_INVOKE); parallel_invoke(f0, f1, f2, context); } // four arguments template void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) { - task_group_context context; + task_group_context context(internal::PARALLEL_INVOKE); parallel_invoke(f0, f1, f2, f3, context); } // five arguments template void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) { - task_group_context context; + task_group_context context(internal::PARALLEL_INVOKE); parallel_invoke(f0, f1, f2, f3, f4, context); } // six arguments template void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) { - task_group_context context; + task_group_context context(internal::PARALLEL_INVOKE); parallel_invoke(f0, f1, f2, f3, f4, f5, context); } // seven arguments @@ -417,7 +419,7 @@ template(f0, f1, f2, f3, f4, f5, f6, context); } // eight arguments @@ -426,7 +428,7 @@ template(f0, f1, f2, f3, f4, f5, f6, f7, context); } // nine arguments @@ -435,7 +437,7 @@ template(f0, f1, f2, f3, f4, f5, f6, f7, f8, context); } // ten arguments @@ -444,7 +446,7 @@ template(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context); } #endif // __TBB_VARIADIC_PARALLEL_INVOKE @@ -452,4 +454,7 @@ void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, con } // namespace +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_invoke_H_include_area + #endif /* __TBB_parallel_invoke_H */ diff --git a/inst/include/tbb/parallel_reduce.h b/inst/include/tbb/parallel_reduce.h index 0596ae03..da2e2f8d 100644 --- a/inst/include/tbb/parallel_reduce.h +++ b/inst/include/tbb/parallel_reduce.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_reduce_H #define __TBB_parallel_reduce_H +#define __TBB_parallel_reduce_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include #include "task.h" #include "aligned_space.h" @@ -136,7 +135,7 @@ namespace internal { #else // Bound context prevents exceptions from body to affect nesting or sibling algorithms, // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; + task_group_context context(PARALLEL_REDUCE); task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); #endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ } @@ -213,67 +212,71 @@ namespace internal { my_left_body.join( my_right_body ); return NULL; } - template + template friend class start_deterministic_reduce; }; //! Task type used to split the work of parallel_deterministic_reduce. /** @ingroup algorithms */ - template + template class start_deterministic_reduce: public task { typedef finish_deterministic_reduce finish_type; Body &my_body; Range my_range; + typename Partitioner::task_partition_type my_partition; task* execute() __TBB_override; //! Constructor used for root task - start_deterministic_reduce( const Range& range, Body& body ) : + start_deterministic_reduce( const Range& range, Body& body, Partitioner& partitioner ) : my_body( body ), - my_range( range ) + my_range( range ), + my_partition( partitioner ) { } //! Splitting constructor used to generate children. /** parent_ becomes left child. Newly constructed object is right child. */ - start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c ) : + start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c, typename Partitioner::split_type& split_obj ) : my_body( c.my_right_body ), - my_range( parent_.my_range, split() ) + my_range( parent_.my_range, split_obj ), + my_partition( parent_.my_partition, split_obj ) { } public: - static void run( const Range& range, Body& body ) { + static void run( const Range& range, Body& body, Partitioner& partitioner ) { if( !range.empty() ) { #if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body) ); + task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body,partitioner) ); #else // Bound context prevents exceptions from body to affect nesting or sibling algorithms, // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) ); + task_group_context context(PARALLEL_REDUCE); + task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body,partitioner) ); #endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ } } #if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, Body& body, task_group_context& context ) { + static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) { if( !range.empty() ) - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) ); + task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body,partitioner) ); } #endif /* __TBB_TASK_GROUP_CONTEXT */ - }; - template - task* start_deterministic_reduce::execute() { - if( !my_range.is_divisible() ) { - my_body( my_range ); - return NULL; - } else { - finish_type& c = *new( allocate_continuation() ) finish_type( my_body ); - recycle_as_child_of(c); - c.set_ref_count(2); - start_deterministic_reduce& b = *new( c.allocate_child() ) start_deterministic_reduce( *this, c ); - task::spawn(b); - return this; + void offer_work( typename Partitioner::split_type& split_obj) { + task* tasks[2]; + allocate_sibling(static_cast(this), tasks, sizeof(start_deterministic_reduce), sizeof(finish_type)); + new((void*)tasks[0]) finish_type(my_body); + new((void*)tasks[1]) start_deterministic_reduce(*this, *static_cast(tasks[0]), split_obj); + spawn(*tasks[1]); } + + void run_body( Range &r ) { my_body(r); } + }; + + template + task* start_deterministic_reduce::execute() { + my_partition.execute(*this, my_range); + return NULL; } } // namespace internal //! @endcond @@ -389,6 +392,13 @@ void parallel_reduce( const Range& range, Body& body, affinity_partitioner& part } #if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration with reduction, default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, task_group_context& context ) { + internal::start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER(), context ); +} + //! Parallel iteration with reduction, simple partitioner and user-supplied context. /** @ingroup algorithms **/ template @@ -476,6 +486,17 @@ Value parallel_reduce( const Range& range, const Value& identity, const RealBody } #if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration with reduction, default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const __TBB_DEFAULT_PARTITIONER> + ::run( range, body, __TBB_DEFAULT_PARTITIONER(), context ); + return body.result(); +} + //! Parallel iteration with reduction, simple partitioner and user-supplied context. /** @ingroup algorithms **/ template @@ -521,44 +542,108 @@ Value parallel_reduce( const Range& range, const Value& identity, const RealBody } #endif /* __TBB_TASK_GROUP_CONTEXT */ -//! Parallel iteration with deterministic reduction and default partitioner. +//! Parallel iteration with deterministic reduction and default simple partitioner. /** @ingroup algorithms **/ template void parallel_deterministic_reduce( const Range& range, Body& body ) { - internal::start_deterministic_reduce::run( range, body ); + internal::start_deterministic_reduce::run(range, body, simple_partitioner()); +} + +//! Parallel iteration with deterministic reduction and simple partitioner. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { + internal::start_deterministic_reduce::run(range, body, partitioner); +} + +//! Parallel iteration with deterministic reduction and static partitioner. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) { + internal::start_deterministic_reduce::run(range, body, partitioner); } #if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. +//! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context. /** @ingroup algorithms **/ template void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) { - internal::start_deterministic_reduce::run( range, body, context ); + internal::start_deterministic_reduce::run( range, body, simple_partitioner(), context ); +} + +//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { + internal::start_deterministic_reduce::run(range, body, partitioner, context); +} + +//! Parallel iteration with deterministic reduction, static partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) { + internal::start_deterministic_reduce::run(range, body, partitioner, context); } #endif /* __TBB_TASK_GROUP_CONTEXT */ /** parallel_reduce overloads that work with anonymous function objects (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ -//! Parallel iteration with deterministic reduction and default partitioner. +//! Parallel iteration with deterministic reduction and default simple partitioner. +// TODO: consider making static_partitioner the default /** @ingroup algorithms **/ template Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { + return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner()); +} + +//! Parallel iteration with deterministic reduction and simple partitioner. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const simple_partitioner& partitioner ) { internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_deterministic_reduce > - ::run(range, body); + internal::start_deterministic_reduce, const simple_partitioner> + ::run(range, body, partitioner); return body.result(); } +//! Parallel iteration with deterministic reduction and static partitioner. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const static_partitioner& partitioner ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_deterministic_reduce, const static_partitioner> + ::run(range, body, partitioner); + return body.result(); +} #if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + task_group_context& context ) { + return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner(), context); +} + //! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. /** @ingroup algorithms **/ template Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_deterministic_reduce > - ::run( range, body, context ); + const simple_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_deterministic_reduce, const simple_partitioner> + ::run(range, body, partitioner, context); + return body.result(); +} + +//! Parallel iteration with deterministic reduction, static partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const static_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_deterministic_reduce, const static_partitioner> + ::run(range, body, partitioner, context); return body.result(); } #endif /* __TBB_TASK_GROUP_CONTEXT */ @@ -566,4 +651,7 @@ Value parallel_deterministic_reduce( const Range& range, const Value& identity, } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_reduce_H_include_area + #endif /* __TBB_parallel_reduce_H */ diff --git a/inst/include/tbb/parallel_scan.h b/inst/include/tbb/parallel_scan.h index faf6b316..7930b5c4 100644 --- a/inst/include/tbb/parallel_scan.h +++ b/inst/include/tbb/parallel_scan.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_scan_H #define __TBB_parallel_scan_H +#define __TBB_parallel_scan_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "task.h" #include "aligned_space.h" #include @@ -32,12 +31,14 @@ namespace tbb { /** @ingroup algorithms */ struct pre_scan_tag { static bool is_final_scan() {return false;} + operator bool() {return is_final_scan();} }; //! Used to indicate that the final scan is being performed. /** @ingroup algorithms */ struct final_scan_tag { static bool is_final_scan() {return true;} + operator bool() {return is_final_scan();} }; //! @cond INTERNAL @@ -227,7 +228,6 @@ namespace internal { if( !range_.empty() ) { typedef internal::start_scan start_pass1_type; internal::sum_node* root = NULL; - typedef internal::final_sum final_sum_type; final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body_ ); start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type( /*my_return_slot=*/root, @@ -297,6 +297,43 @@ namespace internal { } return next_task; } + + template + class lambda_scan_body : no_assign { + Value my_sum; + const Value& identity_element; + const Scan& my_scan; + const ReverseJoin& my_reverse_join; + public: + lambda_scan_body( const Value& identity, const Scan& scan, const ReverseJoin& rev_join) + : my_sum(identity) + , identity_element(identity) + , my_scan(scan) + , my_reverse_join(rev_join) {} + + lambda_scan_body( lambda_scan_body& b, split ) + : my_sum(b.identity_element) + , identity_element(b.identity_element) + , my_scan(b.my_scan) + , my_reverse_join(b.my_reverse_join) {} + + template + void operator()( const Range& r, Tag tag ) { + my_sum = my_scan(r, my_sum, tag); + } + + void reverse_join( lambda_scan_body& a ) { + my_sum = my_reverse_join(a.my_sum, my_sum); + } + + void assign( lambda_scan_body& b ) { + my_sum = b.my_sum; + } + + Value result() const { + return my_sum; + } + }; } // namespace internal //! @endcond @@ -340,9 +377,40 @@ template void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { internal::start_scan::run(range,body,partitioner); } + +//! Parallel prefix with default partitioner +/** @ingroup algorithms **/ +template +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join ) { + internal::lambda_scan_body body(identity, scan, reverse_join); + tbb::parallel_scan(range,body,__TBB_DEFAULT_PARTITIONER()); + return body.result(); +} + +//! Parallel prefix with simple_partitioner +/** @ingroup algorithms **/ +template +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join, const simple_partitioner& partitioner ) { + internal::lambda_scan_body body(identity, scan, reverse_join); + tbb::parallel_scan(range,body,partitioner); + return body.result(); +} + +//! Parallel prefix with auto_partitioner +/** @ingroup algorithms **/ +template +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join, const auto_partitioner& partitioner ) { + internal::lambda_scan_body body(identity, scan, reverse_join); + tbb::parallel_scan(range,body,partitioner); + return body.result(); +} + //@} } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_scan_H_include_area + #endif /* __TBB_parallel_scan_H */ diff --git a/inst/include/tbb/parallel_sort.h b/inst/include/tbb/parallel_sort.h index 07d3907b..b865b2ee 100644 --- a/inst/include/tbb/parallel_sort.h +++ b/inst/include/tbb/parallel_sort.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,21 +12,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_sort_H #define __TBB_parallel_sort_H +#define __TBB_parallel_sort_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "parallel_for.h" #include "blocked_range.h" #include "internal/_range_iterator.h" #include #include #include +#if __TBB_TASK_GROUP_CONTEXT + #include "tbb_profiling.h" +#endif namespace tbb { @@ -159,7 +161,7 @@ struct quick_sort_body { template void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { #if __TBB_TASK_GROUP_CONTEXT - task_group_context my_context; + task_group_context my_context(PARALLEL_SORT); const int serial_cutoff = 9; __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" ); @@ -230,13 +232,6 @@ void parallel_sort(Range& rng, const Compare& comp) { parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp); } -//! Sorts the data in const rng using the given comparator -/** @ingroup algorithms **/ -template -void parallel_sort(const Range& rng, const Compare& comp) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp); -} - //! Sorts the data in rng with a default comparator \c std::less /** @ingroup algorithms **/ template @@ -244,13 +239,6 @@ void parallel_sort(Range& rng) { parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng)); } -//! Sorts the data in const rng with a default comparator \c std::less -/** @ingroup algorithms **/ -template -void parallel_sort(const Range& rng) { - parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng)); -} - //! Sorts the data in the range \c [begin,end) with a default comparator \c std::less /** @ingroup algorithms **/ template @@ -262,5 +250,8 @@ inline void parallel_sort( T * begin, T * end ) { } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_sort_H_include_area + #endif diff --git a/inst/include/tbb/parallel_while.h b/inst/include/tbb/parallel_while.h index 2f37a41a..65984af5 100644 --- a/inst/include/tbb/parallel_while.h +++ b/inst/include/tbb/parallel_while.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_parallel_while #define __TBB_parallel_while +#define __TBB_parallel_while_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "task.h" #include @@ -183,4 +182,7 @@ void parallel_while::add( const value_type& item ) { } // namespace +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_while_H_include_area + #endif /* __TBB_parallel_while */ diff --git a/inst/include/tbb/partitioner.h b/inst/include/tbb/partitioner.h index 96a0b757..23990868 100644 --- a/inst/include/tbb/partitioner.h +++ b/inst/include/tbb/partitioner.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_partitioner_H #define __TBB_partitioner_H +#define __TBB_partitioner_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #ifndef __TBB_INITIAL_CHUNKS // initial task divisions per thread #define __TBB_INITIAL_CHUNKS 2 @@ -50,6 +49,7 @@ #endif // __TBB_DEFINE_MIC #include "task.h" +#include "task_arena.h" #include "aligned_space.h" #include "atomic.h" #include "internal/_template_helpers.h" @@ -122,6 +122,7 @@ namespace internal { using namespace tbb::internal; template class start_for; template class start_reduce; +template class start_deterministic_reduce; //! Join task node that contains shared flag for stealing feedback class flag_task: public task { @@ -260,7 +261,6 @@ struct partition_type_base { template struct adaptive_mode : partition_type_base { typedef Partition my_partition; - using partition_type_base::self; // CRTP helper to get access to derived classes size_t my_divisor; // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves. // A task which has only one index must produce the right split without reserved index in order to avoid @@ -269,12 +269,44 @@ struct adaptive_mode : partition_type_base { static const unsigned factor = 1; adaptive_mode() : my_divisor(tbb::internal::get_initial_auto_partitioner_divisor() / 4 * my_partition::factor) {} adaptive_mode(adaptive_mode &src, split) : my_divisor(do_split(src, split())) {} - adaptive_mode(adaptive_mode &src, const proportional_split& split_obj) : my_divisor(do_split(src, split_obj)) {} /*! Override do_split methods in order to specify splitting strategy */ size_t do_split(adaptive_mode &src, split) { return src.my_divisor /= 2u; } - size_t do_split(adaptive_mode &src, const proportional_split& split_obj) { +}; + +//! A helper class to create a proportional_split object for a given type of Range. +/** If the Range has static boolean constant 'is_splittable_in_proportion' set to 'true', + the created object splits a provided value in an implemenation-defined proportion; + otherwise it represents equal-size split. */ +// TODO: check if this helper can be a nested class of proportional_mode. +template +struct proportion_helper { + static proportional_split get_split(size_t) { return proportional_split(1,1); } +}; +template +struct proportion_helper::type> { + static proportional_split get_split(size_t n) { +#if __TBB_NONUNIFORM_TASK_CREATION + size_t right = (n + 2) / 3; +#else + size_t right = n / 2; +#endif + size_t left = n - right; + return proportional_split(left, right); + } +}; + +//! Provides proportional splitting strategy for partition objects +template +struct proportional_mode : adaptive_mode { + typedef Partition my_partition; + using partition_type_base::self; // CRTP helper to get access to derived classes + + proportional_mode() : adaptive_mode() {} + proportional_mode(proportional_mode &src, split) : adaptive_mode(src, split()) {} + proportional_mode(proportional_mode &src, const proportional_split& split_obj) { self().my_divisor = do_split(src, split_obj); } + size_t do_split(proportional_mode &src, const proportional_split& split_obj) { #if __TBB_ENABLE_RANGE_FEEDBACK size_t portion = size_t(float(src.my_divisor) * float(split_obj.right()) / float(split_obj.left() + split_obj.right()) + 0.5f); @@ -293,81 +325,43 @@ struct adaptive_mode : partition_type_base { return portion; } bool is_divisible() { // part of old should_execute_range() - return my_divisor > my_partition::factor; + return self().my_divisor > my_partition::factor; + } + template + proportional_split get_split() { + // Create a proportion for the number of threads expected to handle "this" subrange + return proportion_helper::get_split( self().my_divisor / my_partition::factor ); } }; +static size_t get_initial_partition_head() { + int current_index = tbb::this_task_arena::current_thread_index(); + if (current_index == tbb::task_arena::not_initialized) + current_index = 0; + return size_t(current_index); +} + //! Provides default linear indexing of partitioner's sequence template -struct linear_affinity_mode : adaptive_mode { - using adaptive_mode::my_divisor; +struct linear_affinity_mode : proportional_mode { size_t my_head; - using adaptive_mode::self; - linear_affinity_mode() : adaptive_mode(), my_head(0) {} - linear_affinity_mode(linear_affinity_mode &src, split) : adaptive_mode(src, split()) - , my_head(src.my_head + src.my_divisor) {} - linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : adaptive_mode(src, split_obj) - , my_head(src.my_head + src.my_divisor) {} + size_t my_max_affinity; + using proportional_mode::self; + linear_affinity_mode() : proportional_mode(), my_head(get_initial_partition_head()), + my_max_affinity(self().my_divisor) {} + linear_affinity_mode(linear_affinity_mode &src, split) : proportional_mode(src, split()) + , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {} + linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : proportional_mode(src, split_obj) + , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {} void set_affinity( task &t ) { - if( my_divisor ) + if( self().my_divisor ) t.set_affinity( affinity_id(my_head) + 1 ); } }; -//! Class determines whether template parameter has static boolean constant -//! 'is_splittable_in_proportion' initialized with value of 'true' or not. -/** If template parameter has such field that has been initialized with non-zero - * value then class field will be set to 'true', otherwise - 'false' - */ -template -class is_splittable_in_proportion { -private: - typedef char yes[1]; - typedef char no [2]; - - template static yes& decide(typename enable_if::type *); - template static no& decide(...); -public: - // equals to 'true' if and only if static const variable 'is_splittable_in_proportion' of template parameter - // initialized with the value of 'true' - static const bool value = (sizeof(decide(0)) == sizeof(yes)); -}; - -//! Provides default methods for non-balancing partition objects. -template -struct unbalancing_partition_type : Mode { - using Mode::self; - unbalancing_partition_type() : Mode() {} - unbalancing_partition_type(unbalancing_partition_type& p, split) : Mode(p, split()) {} - unbalancing_partition_type(unbalancing_partition_type& p, const proportional_split& split_obj) : Mode(p, split_obj) {} -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - template - proportional_split get_split() { - if (is_splittable_in_proportion::value) { - size_t size = self().my_divisor / Mode::my_partition::factor; -#if __TBB_NONUNIFORM_TASK_CREATION - size_t right = (size + 2) / 3; -#else - size_t right = size / 2; -#endif - size_t left = size - right; - return proportional_split(left, right); - } else { - return proportional_split(1, 1); - } - } -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4127 is back -}; - /*! Determine work-balance phase implementing splitting & stealing actions */ template -struct balancing_partition_type : unbalancing_partition_type { +struct dynamic_grainsize_mode : Mode { using Mode::self; #ifdef __TBB_USE_MACHINE_TIME_STAMPS tbb::internal::machine_tsc_t my_dst_tsc; @@ -379,27 +373,27 @@ struct balancing_partition_type : unbalancing_partition_type { } my_delay; depth_t my_max_depth; static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; - balancing_partition_type(): unbalancing_partition_type() + dynamic_grainsize_mode(): Mode() #ifdef __TBB_USE_MACHINE_TIME_STAMPS , my_dst_tsc(0) #endif , my_delay(begin) , my_max_depth(__TBB_INIT_DEPTH) {} - balancing_partition_type(balancing_partition_type& p, split) - : unbalancing_partition_type(p, split()) + dynamic_grainsize_mode(dynamic_grainsize_mode& p, split) + : Mode(p, split()) #ifdef __TBB_USE_MACHINE_TIME_STAMPS , my_dst_tsc(0) #endif , my_delay(pass) , my_max_depth(p.my_max_depth) {} - balancing_partition_type(balancing_partition_type& p, const proportional_split& split_obj) - : unbalancing_partition_type(p, split_obj) + dynamic_grainsize_mode(dynamic_grainsize_mode& p, const proportional_split& split_obj) + : Mode(p, split_obj) #ifdef __TBB_USE_MACHINE_TIME_STAMPS , my_dst_tsc(0) #endif , my_delay(begin) , my_max_depth(p.my_max_depth) {} - bool check_being_stolen( task &t) { // part of old should_execute_range() + bool check_being_stolen(task &t) { // part of old should_execute_range() if( !(self().my_divisor / Mode::my_partition::factor) ) { // if not from the top P tasks of binary tree self().my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)? if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task @@ -478,14 +472,14 @@ struct balancing_partition_type : unbalancing_partition_type { } }; -class auto_partition_type: public balancing_partition_type > { +class auto_partition_type: public dynamic_grainsize_mode > { public: auto_partition_type( const auto_partitioner& ) - : balancing_partition_type >() { + : dynamic_grainsize_mode >() { my_divisor *= __TBB_INITIAL_CHUNKS; } auto_partition_type( auto_partition_type& src, split) - : balancing_partition_type >(src, split()) {} + : dynamic_grainsize_mode >(src, split()) {} bool is_divisible() { // part of old should_execute_range() if( my_divisor > 1 ) return true; if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead @@ -517,25 +511,25 @@ class simple_partition_type: public partition_type_base { } }; -class static_partition_type : public unbalancing_partition_type > { +class static_partition_type : public linear_affinity_mode { public: typedef proportional_split split_type; static_partition_type( const static_partitioner& ) - : unbalancing_partition_type >() {} + : linear_affinity_mode() {} static_partition_type( static_partition_type& p, split ) - : unbalancing_partition_type >(p, split()) {} + : linear_affinity_mode(p, split()) {} static_partition_type( static_partition_type& p, const proportional_split& split_obj ) - : unbalancing_partition_type >(p, split_obj) {} + : linear_affinity_mode(p, split_obj) {} }; -class affinity_partition_type : public balancing_partition_type > { +class affinity_partition_type : public dynamic_grainsize_mode > { static const unsigned factor_power = 4; // TODO: get a unified formula based on number of computing units tbb::internal::affinity_id* my_array; public: static const unsigned factor = 1 << factor_power; // number of slots in affinity array per task typedef proportional_split split_type; affinity_partition_type( tbb::internal::affinity_partitioner_base_v3& ap ) - : balancing_partition_type >() { + : dynamic_grainsize_mode >() { __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); ap.resize(factor); my_array = ap.my_array; @@ -543,10 +537,10 @@ class affinity_partition_type : public balancing_partition_type >(p, split()) + : dynamic_grainsize_mode >(p, split()) , my_array(p.my_array) {} affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj) - : balancing_partition_type >(p, split_obj) + : dynamic_grainsize_mode >(p, split_obj) , my_array(p.my_array) {} void set_affinity( task &t ) { if( my_divisor ) { @@ -596,6 +590,7 @@ class simple_partitioner { template friend class serial::interface9::start_for; template friend class interface9::internal::start_for; template friend class interface9::internal::start_reduce; + template friend class interface9::internal::start_deterministic_reduce; template friend class internal::start_scan; // backward compatibility class partition_type: public internal::partition_type_base { @@ -641,6 +636,7 @@ class static_partitioner { template friend class serial::interface9::start_for; template friend class interface9::internal::start_for; template friend class interface9::internal::start_reduce; + template friend class interface9::internal::start_deterministic_reduce; template friend class internal::start_scan; // backward compatibility typedef interface9::internal::old_auto_partition_type partition_type; @@ -678,4 +674,8 @@ class affinity_partitioner: internal::affinity_partitioner_base_v3 { #undef __TBB_INITIAL_CHUNKS #undef __TBB_RANGE_POOL_CAPACITY #undef __TBB_INIT_DEPTH + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_partitioner_H_include_area + #endif /* __TBB_partitioner_H */ diff --git a/inst/include/tbb/pipeline.h b/inst/include/tbb/pipeline.h index 20a8ec9b..13bf4e33 100644 --- a/inst/include/tbb/pipeline.h +++ b/inst/include/tbb/pipeline.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,21 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_pipeline_H #define __TBB_pipeline_H +#define __TBB_pipeline_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "atomic.h" #include "task.h" #include "tbb_allocator.h" #include -#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT || __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT +#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT #include #endif @@ -65,7 +64,7 @@ namespace interface6 { class filter: internal::no_copy { private: //! Value used to mark "not in pipeline" - static filter* not_in_pipeline() {return reinterpret_cast(intptr_t(-1));} + static filter* not_in_pipeline() { return reinterpret_cast(intptr_t(-1)); } protected: //! The lowest bit 0 is for parallel vs. serial static const unsigned char filter_is_serial = 0x1; @@ -157,7 +156,7 @@ class filter: internal::no_copy { //! Destroys item if pipeline was cancelled. /** Required to prevent memory leaks. Note it can be called concurrently even for serial filters.*/ - virtual void finalize( void* /*item*/ ) {}; + virtual void finalize( void* /*item*/ ) {} #endif private: @@ -233,7 +232,7 @@ class thread_bound_filter: public filter { //! A processing pipeline that applies filters to items. /** @ingroup algorithms */ -class pipeline { +class __TBB_DEPRECATED_MSG("tbb::pipeline is deprecated, use tbb::parallel_pipeline") pipeline { public: //! Construct empty pipeline. __TBB_EXPORTED_METHOD pipeline(); @@ -319,43 +318,58 @@ class flow_control { //! @cond INTERNAL namespace internal { -template struct tbb_large_object {enum { value = sizeof(T) > sizeof(void *) }; }; - -// Obtain type properties in one or another way +// Emulate std::is_trivially_copyable (false positives not allowed, false negatives suboptimal but safe). #if __TBB_CPP11_TYPE_PROPERTIES_PRESENT template struct tbb_trivially_copyable { enum { value = std::is_trivially_copyable::value }; }; -#elif __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT -template struct tbb_trivially_copyable { enum { value = std::has_trivial_copy_constructor::value }; }; #else -// Explicitly list the types we wish to be placed as-is in the pipeline input_buffers. -template struct tbb_trivially_copyable { enum { value = false }; }; -template struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = true }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -template<> struct tbb_trivially_copyable { enum { value = !tbb_large_object::value }; }; -#endif // Obtaining type properties - -template struct is_large_object {enum { value = tbb_large_object::value || !tbb_trivially_copyable::value }; }; - -template class token_helper; - -// large object helper (uses tbb_allocator) +template struct tbb_trivially_copyable { enum { value = false }; }; +template struct tbb_trivially_copyable < T* > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < bool > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < char > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < signed char > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < short > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < int > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long long> { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < float > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < double > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long double > { enum { value = true }; }; +#if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) +template<> struct tbb_trivially_copyable < wchar_t > { enum { value = true }; }; +#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ +#endif // tbb_trivially_copyable + +template +struct use_allocator { + enum { value = sizeof(T) > sizeof(void *) || !tbb_trivially_copyable::value }; +}; + +// A helper class to customize how a type is passed between filters. +// Usage: token_helper::value> +template class token_helper; + +// using tbb_allocator template class token_helper { - public: +public: typedef typename tbb::tbb_allocator allocator; typedef T* pointer; typedef T value_type; - static pointer create_token(const value_type & source) { +#if __TBB_CPP11_RVALUE_REF_PRESENT + static pointer create_token(value_type && source) +#else + static pointer create_token(const value_type & source) +#endif + { pointer output_t = allocator().allocate(1); - return new (output_t) T(source); + return new (output_t) T(tbb::internal::move(source)); } - static value_type & token(pointer & t) { return *t;} + static value_type & token(pointer & t) { return *t; } static void * cast_to_void_ptr(pointer ref) { return (void *) ref; } static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } static void destroy_token(pointer token) { @@ -366,30 +380,29 @@ class token_helper { // pointer specialization template -class token_helper { - public: +class token_helper { +public: typedef T* pointer; typedef T* value_type; static pointer create_token(const value_type & source) { return source; } - static value_type & token(pointer & t) { return t;} + static value_type & token(pointer & t) { return t; } static void * cast_to_void_ptr(pointer ref) { return (void *)ref; } static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } static void destroy_token( pointer /*token*/) {} }; -// small object specialization (converts void* to the correct type, passes objects directly.) +// converting type to and from void*, passing objects directly template class token_helper { typedef union { T actual_value; void * void_overlay; } type_to_void_ptr_map; - public: +public: typedef T pointer; // not really a pointer in this case. typedef T value_type; - static pointer create_token(const value_type & source) { - return source; } - static value_type & token(pointer & t) { return t;} + static pointer create_token(const value_type & source) { return source; } + static value_type & token(pointer & t) { return t; } static void * cast_to_void_ptr(pointer ref) { type_to_void_ptr_map mymap; mymap.void_overlay = NULL; @@ -404,17 +417,18 @@ class token_helper { static void destroy_token( pointer /*token*/) {} }; +// intermediate template class concrete_filter: public tbb::filter { const Body& my_body; - typedef token_helper::value > t_helper; + typedef token_helper::value> t_helper; typedef typename t_helper::pointer t_pointer; - typedef token_helper::value > u_helper; + typedef token_helper::value> u_helper; typedef typename u_helper::pointer u_pointer; void* operator()(void* input) __TBB_override { t_pointer temp_input = t_helper::cast_from_void_ptr(input); - u_pointer output_u = u_helper::create_token(my_body(t_helper::token(temp_input))); + u_pointer output_u = u_helper::create_token(my_body(tbb::internal::move(t_helper::token(temp_input)))); t_helper::destroy_token(temp_input); return u_helper::cast_to_void_ptr(output_u); } @@ -432,7 +446,7 @@ class concrete_filter: public tbb::filter { template class concrete_filter: public filter { const Body& my_body; - typedef token_helper::value > u_helper; + typedef token_helper::value> u_helper; typedef typename u_helper::pointer u_pointer; void* operator()(void*) __TBB_override { @@ -453,15 +467,16 @@ class concrete_filter: public filter { {} }; +// output template class concrete_filter: public filter { const Body& my_body; - typedef token_helper::value > t_helper; + typedef token_helper::value> t_helper; typedef typename t_helper::pointer t_pointer; void* operator()(void* input) __TBB_override { t_pointer temp_input = t_helper::cast_from_void_ptr(input); - my_body(t_helper::token(temp_input)); + my_body(tbb::internal::move(t_helper::token(temp_input))); t_helper::destroy_token(temp_input); return NULL; } @@ -478,7 +493,6 @@ template class concrete_filter: public filter { const Body& my_body; - /** Override privately because it is always called virtually */ void* operator()(void*) __TBB_override { flow_control control; my_body(control); @@ -518,7 +532,7 @@ class filter_node: tbb::internal::no_copy { //! Add concrete_filter to pipeline virtual void add_to( pipeline& ) = 0; //! Increment reference count - void add_ref() {++ref_count;} + void add_ref() { ++ref_count; } //! Decrement reference count and delete if it becomes zero. void remove_ref() { __TBB_ASSERT(ref_count>0,"ref_count underflow"); @@ -534,7 +548,7 @@ class filter_node: tbb::internal::no_copy { //! Node in parse tree representing result of make_filter. template -class filter_node_leaf: public filter_node { +class filter_node_leaf: public filter_node { const tbb::filter::mode mode; const Body body; void add_to( pipeline& p ) __TBB_override { @@ -662,4 +676,7 @@ using interface6::parallel_pipeline; } // tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_pipeline_H_include_area + #endif /* __TBB_pipeline_H */ diff --git a/inst/include/tbb/queuing_mutex.h b/inst/include/tbb/queuing_mutex.h index 0fe4b3ea..c5c64993 100644 --- a/inst/include/tbb/queuing_mutex.h +++ b/inst/include/tbb/queuing_mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,29 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_queuing_mutex_H #define __TBB_queuing_mutex_H -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif +#define __TBB_queuing_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" #include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - #include "atomic.h" #include "tbb_profiling.h" @@ -59,6 +45,7 @@ class queuing_mutex : internal::mutex_copy_deprecated_and_disabled { //! Initialize fields to mean "no lock held". void initialize() { mutex = NULL; + going = 0; #if TBB_USE_ASSERT internal::poison_pointer(next); #endif /* TBB_USE_ASSERT */ @@ -120,4 +107,7 @@ __TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex) } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_queuing_mutex_H_include_area + #endif /* __TBB_queuing_mutex_H */ diff --git a/inst/include/tbb/queuing_rw_mutex.h b/inst/include/tbb/queuing_rw_mutex.h index e0224ed5..b264141c 100644 --- a/inst/include/tbb/queuing_rw_mutex.h +++ b/inst/include/tbb/queuing_rw_mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,29 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_queuing_rw_mutex_H #define __TBB_queuing_rw_mutex_H -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif +#define __TBB_queuing_rw_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" #include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - #include "atomic.h" #include "tbb_profiling.h" @@ -68,6 +54,8 @@ class queuing_rw_mutex : internal::mutex_copy_deprecated_and_disabled { //! Initialize fields to mean "no lock held". void initialize() { my_mutex = NULL; + my_internal_lock = 0; + my_going = 0; #if TBB_USE_ASSERT my_state = 0xFF; // Set to invalid state internal::poison_pointer(my_next); @@ -160,4 +148,7 @@ __TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex) } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_queuing_rw_mutex_H_include_area + #endif /* __TBB_queuing_rw_mutex_H */ diff --git a/inst/include/tbb/reader_writer_lock.h b/inst/include/tbb/reader_writer_lock.h index 353beec5..e55e8a8d 100644 --- a/inst/include/tbb/reader_writer_lock.h +++ b/inst/include/tbb/reader_writer_lock.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_reader_writer_lock_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_reader_writer_lock_H +#pragma message("TBB Warning: tbb/reader_writer_lock.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_reader_writer_lock_H #define __TBB_reader_writer_lock_H +#define __TBB_reader_writer_lock_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb_thread.h" #include "tbb_allocator.h" #include "atomic.h" @@ -31,7 +41,8 @@ namespace interface5 { /** Loosely adapted from Mellor-Crummey and Scott pseudocode at http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp @ingroup synchronization */ - class reader_writer_lock : tbb::internal::no_copy { + class __TBB_DEPRECATED_VERBOSE_MSG("tbb::reader_writer_lock is deprecated, use std::shared_mutex") + reader_writer_lock : tbb::internal::no_copy { public: friend class scoped_lock; friend class scoped_lock_read; @@ -229,4 +240,7 @@ using interface5::reader_writer_lock; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_reader_writer_lock_H_include_area + #endif /* __TBB_reader_writer_lock_H */ diff --git a/inst/include/tbb/recursive_mutex.h b/inst/include/tbb/recursive_mutex.h index 5a23c097..6ac88198 100644 --- a/inst/include/tbb/recursive_mutex.h +++ b/inst/include/tbb/recursive_mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_recursive_mutex_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_recursive_mutex_H +#pragma message("TBB Warning: tbb/recursive_mutex.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_recursive_mutex_H #define __TBB_recursive_mutex_H +#define __TBB_recursive_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #if _WIN32||_WIN64 #include "machine/windows_api.h" #else @@ -36,7 +46,8 @@ namespace tbb { //! Mutex that allows recursive mutex acquisition. /** Mutex that allows recursive mutex acquisition. @ingroup synchronization */ -class recursive_mutex : internal::mutex_copy_deprecated_and_disabled { +class __TBB_DEPRECATED_VERBOSE_MSG("tbb::recursive_mutex is deprecated, use std::recursive_mutex") +recursive_mutex : internal::mutex_copy_deprecated_and_disabled { public: //! Construct unacquired recursive_mutex. recursive_mutex() { @@ -231,4 +242,7 @@ __TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex) } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_recursive_mutex_H_include_area + #endif /* __TBB_recursive_mutex_H */ diff --git a/inst/include/tbb/runtime_loader.h b/inst/include/tbb/runtime_loader.h index df284645..e7906031 100644 --- a/inst/include/tbb/runtime_loader.h +++ b/inst/include/tbb/runtime_loader.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_runtime_loader_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_runtime_loader_H +#pragma message("TBB Warning: tbb/runtime_loader.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_runtime_loader_H #define __TBB_runtime_loader_H +#define __TBB_runtime_loader_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #if ! TBB_PREVIEW_RUNTIME_LOADER #error Set TBB_PREVIEW_RUNTIME_LOADER to include runtime_loader.h #endif @@ -82,7 +92,7 @@ There are some implications: */ -class runtime_loader : tbb::internal::no_copy { +class __TBB_DEPRECATED_VERBOSE runtime_loader : tbb::internal::no_copy { public: @@ -176,5 +186,8 @@ using interface6::runtime_loader; } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_runtime_loader_H_include_area + #endif /* __TBB_runtime_loader_H */ diff --git a/inst/include/tbb/scalable_allocator.h b/inst/include/tbb/scalable_allocator.h index c2a81493..f1fc98ed 100644 --- a/inst/include/tbb/scalable_allocator.h +++ b/inst/include/tbb/scalable_allocator.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_scalable_allocator_H @@ -95,9 +91,12 @@ typedef enum { TBBMALLOC_USE_HUGE_PAGES, /* value turns using huge pages on and off */ /* deprecated, kept for backward compatibility only */ USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES, - /* try to limit memory consumption value Bytes, clean internal buffers + /* try to limit memory consumption value (Bytes), clean internal buffers if limit is exceeded, but not prevents from requesting memory from OS */ - TBBMALLOC_SET_SOFT_HEAP_LIMIT + TBBMALLOC_SET_SOFT_HEAP_LIMIT, + /* Lower bound for the size (Bytes), that is interpreted as huge + * and not released during regular cleanup operations. */ + TBBMALLOC_SET_HUGE_SIZE_THRESHOLD } AllocationModeParam; /** Set TBB allocator-specific allocation modes. @@ -193,7 +192,9 @@ void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_t ali bool pool_reset(MemoryPool* memPool); bool pool_free(MemoryPool *memPool, void *object); MemoryPool *pool_identify(void *object); -} +size_t pool_msize(MemoryPool *memPool, void *object); + +} // namespace rml #include /* To use new with the placement argument */ @@ -207,7 +208,11 @@ MemoryPool *pool_identify(void *object); #endif #if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC - #include // std::forward +#include // std::forward +#endif + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +#include #endif namespace tbb { @@ -314,6 +319,48 @@ inline bool operator==( const scalable_allocator&, const scalable_allocator inline bool operator!=( const scalable_allocator&, const scalable_allocator& ) {return false;} +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +namespace internal { + +//! C++17 memory resource implementation for scalable allocator +//! ISO C++ Section 23.12.2 +class scalable_resource_impl : public std::pmr::memory_resource { +private: + void* do_allocate(size_t bytes, size_t alignment) override { + void* ptr = scalable_aligned_malloc( bytes, alignment ); + if (!ptr) { + throw_exception(std::bad_alloc()); + } + return ptr; + } + + void do_deallocate(void* ptr, size_t /*bytes*/, size_t /*alignment*/) override { + scalable_free(ptr); + } + + //! Memory allocated by one instance of scalable_resource_impl could be deallocated by any + //! other instance of this class + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override { + return this == &other || +#if __TBB_USE_OPTIONAL_RTTI + dynamic_cast(&other) != NULL; +#else + false; +#endif + } +}; + +} // namespace internal + +//! Global scalable allocator memory resource provider +inline std::pmr::memory_resource* scalable_memory_resource() noexcept { + static tbb::internal::scalable_resource_impl scalable_res; + return &scalable_res; +} + +#endif /* __TBB_CPP17_MEMORY_RESOURCE_PRESENT */ + } // namespace tbb #if _MSC_VER diff --git a/inst/include/tbb/spin_mutex.h b/inst/include/tbb/spin_mutex.h index 99ef15c2..56348c9d 100644 --- a/inst/include/tbb/spin_mutex.h +++ b/inst/include/tbb/spin_mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_spin_mutex_H #define __TBB_spin_mutex_H +#define __TBB_spin_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include #include #include "aligned_space.h" @@ -209,4 +208,7 @@ __TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex) } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_spin_mutex_H_include_area + #endif /* __TBB_spin_mutex_H */ diff --git a/inst/include/tbb/spin_rw_mutex.h b/inst/include/tbb/spin_rw_mutex.h index b20f4ebd..57a4ce2f 100644 --- a/inst/include/tbb/spin_rw_mutex.h +++ b/inst/include/tbb/spin_rw_mutex.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_spin_rw_mutex_H @@ -90,9 +86,6 @@ class spin_rw_mutex_v3 : internal::mutex_copy_deprecated_and_disabled { class scoped_lock : internal::no_copy { #if __TBB_TSX_AVAILABLE friend class tbb::interface8::internal::x86_rtm_rw_mutex; - // helper methods for x86_rtm_rw_mutex - spin_rw_mutex *internal_get_mutex() const { return mutex; } - void internal_set_mutex(spin_rw_mutex* m) { mutex = m; } #endif public: //! Construct lock that has not acquired a mutex. @@ -121,15 +114,15 @@ class spin_rw_mutex_v3 : internal::mutex_copy_deprecated_and_disabled { //! Upgrade reader to become a writer. /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ bool upgrade_to_writer() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( !is_writer, "not a reader" ); + __TBB_ASSERT( mutex, "mutex is not acquired" ); + if (is_writer) return true; // Already a writer is_writer = true; return mutex->internal_upgrade(); } //! Release lock. void release() { - __TBB_ASSERT( mutex, "lock is not acquired" ); + __TBB_ASSERT( mutex, "mutex is not acquired" ); spin_rw_mutex *m = mutex; mutex = NULL; #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT @@ -143,8 +136,8 @@ class spin_rw_mutex_v3 : internal::mutex_copy_deprecated_and_disabled { //! Downgrade writer to become a reader. bool downgrade_to_reader() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( is_writer, "not a writer" ); + __TBB_ASSERT( mutex, "mutex is not acquired" ); + if (!is_writer) return true; // Already a reader #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT mutex->internal_downgrade(); #else diff --git a/inst/include/tbb/task.h b/inst/include/tbb/task.h index 246684ab..085f30e8 100644 --- a/inst/include/tbb/task.h +++ b/inst/include/tbb/task.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_task_H #define __TBB_task_H +#define __TBB_task_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb_stddef.h" #include "tbb_machine.h" #include "tbb_profiling.h" @@ -49,7 +48,9 @@ namespace internal { //< @cond INTERNAL task* self; task& parent; public: - explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {} + explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) { + suppress_unused_warning( self ); + } task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; void __TBB_EXPORTED_METHOD free( task& ) const; }; @@ -118,6 +119,11 @@ namespace internal { /** Should always be non-negative. A signed type is used so that underflow can be detected. */ typedef intptr_t reference_count; +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! The flag to indicate that the wait task has been abandoned. + static const reference_count abandon_flag = reference_count(1) << (sizeof(reference_count)*CHAR_BIT - 2); +#endif + //! An id as used for specifying affinity. typedef unsigned short affinity_id; @@ -162,18 +168,25 @@ namespace internal { void __TBB_EXPORTED_METHOD free( task& ) const; }; +#if __TBB_PREVIEW_CRITICAL_TASKS + // TODO: move to class methods when critical task API becomes public + void make_critical( task& t ); + bool is_critical( task& t ); +#endif + //! Memory prefix to a task object. /** This class is internal to the library. Do not reference it directly, except within the library itself. - Fields are ordered in way that preserves backwards compatibility and yields - good packing on typical 32-bit and 64-bit platforms. New fields should be - added at the beginning for backward compatibility with accesses to the task - prefix inlined into application code. + Fields are ordered in way that preserves backwards compatibility and yields good packing on + typical 32-bit and 64-bit platforms. New fields should be added at the beginning for + backward compatibility with accesses to the task prefix inlined into application code. To + prevent ODR violation, the class shall have the same layout in all application translation + units. If some fields are conditional (e.g. enabled by preview macros) and might get + skipped, use reserved fields to adjust the layout. - In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64 - architectures correspondingly, consider dynamic setting of task_alignment - and task_prefix_reservation_size based on the maximal operand size supported - by the current CPU. + In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64 architectures + correspondingly, consider dynamic setting of task_alignment and task_prefix_reservation_size + based on the maximal operand size supported by the current CPU. @ingroup task_scheduling */ class task_prefix { @@ -186,10 +199,16 @@ namespace internal { friend class internal::allocate_child_proxy; friend class internal::allocate_continuation_proxy; friend class internal::allocate_additional_child_of_proxy; +#if __TBB_PREVIEW_CRITICAL_TASKS + friend void make_critical( task& ); + friend bool is_critical( task& ); +#endif #if __TBB_TASK_ISOLATION //! The tag used for task isolation. isolation_tag isolation; +#else + intptr_t reserved_space_for_task_isolation_tag; #endif /* __TBB_TASK_ISOLATION */ #if __TBB_TASK_GROUP_CONTEXT @@ -208,7 +227,7 @@ namespace internal { thread-specific pools. */ scheduler* origin; -#if __TBB_TASK_PRIORITY +#if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS union { #endif /* __TBB_TASK_PRIORITY */ //! Obsolete. The scheduler that owns the task. @@ -220,8 +239,15 @@ namespace internal { //! Pointer to the next offloaded lower priority task. /** Used to maintain a list of offloaded tasks inside the scheduler. **/ task* next_offloaded; +#endif + +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! Pointer to the abandoned scheduler where the current task is waited for. + scheduler* abandoned_scheduler; +#endif +#if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS }; -#endif /* __TBB_TASK_PRIORITY */ +#endif /* __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS */ //! The task whose reference count includes me. /** In the "blocking style" of programming, this field points to the parent task. @@ -271,6 +297,10 @@ namespace internal { #if __TBB_TASK_PRIORITY namespace internal { static const int priority_stride_v4 = INT_MAX / 4; +#if __TBB_PREVIEW_CRITICAL_TASKS + // TODO: move into priority_t enum when critical tasks become public feature + static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2; +#endif } enum priority_t { @@ -291,6 +321,7 @@ enum priority_t { class task_scheduler_init; namespace interface7 { class task_arena; } +using interface7::task_arena; //! Used to form groups of tasks /** @ingroup task_scheduling @@ -317,7 +348,7 @@ class task_group_context : internal::no_copy { private: friend class internal::generic_scheduler; friend class task_scheduler_init; - friend class interface7::task_arena; + friend class task_arena; #if TBB_USE_CAPTURED_EXCEPTION typedef tbb_exception exception_container_type; @@ -417,12 +448,16 @@ class task_group_context : internal::no_copy { intptr_t my_priority; #endif /* __TBB_TASK_PRIORITY */ + //! Description of algorithm for scheduler based instrumentation. + internal::string_index my_name; + //! Trailing padding protecting accesses to frequently used members from false sharing /** \sa _leading_padding **/ char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*) #if __TBB_TASK_PRIORITY - - sizeof(intptr_t) + - sizeof(intptr_t) #endif /* __TBB_TASK_PRIORITY */ + - sizeof(internal::string_index) ]; public: @@ -458,7 +493,17 @@ class task_group_context : internal::no_copy { task_group_context ( kind_type relation_with_parent = bound, uintptr_t t = default_traits ) : my_kind(relation_with_parent) - , my_version_and_traits(2 | t) + , my_version_and_traits(3 | t) + , my_name(internal::CUSTOM_CTX) + { + init(); + } + + // Custom constructor for instrumentation of tbb algorithm + task_group_context ( internal::string_index name ) + : my_kind(bound) + , my_version_and_traits(3 | default_traits) + , my_name(name) { init(); } @@ -512,10 +557,10 @@ class task_group_context : internal::no_copy { #if __TBB_TASK_PRIORITY //! Changes priority of the task group - void set_priority ( priority_t ); + __TBB_DEPRECATED void set_priority ( priority_t ); //! Retrieves current priority of the current task group - priority_t priority () const; + __TBB_DEPRECATED priority_t priority () const; #endif /* __TBB_TASK_PRIORITY */ //! Returns the context's trait @@ -592,6 +637,10 @@ class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { #if __TBB_RECYCLE_TO_ENQUEUE //! task to be scheduled for starvation-resistant execution ,to_enqueue +#endif +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! a special task used to resume a scheduler. + ,to_resume #endif }; @@ -779,12 +828,25 @@ class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { #if __TBB_TASK_PRIORITY //! Enqueue task for starvation-resistant execution on the specified priority level. - static void enqueue( task& t, priority_t p ) { - __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" ); + __TBB_DEPRECATED static void enqueue( task& t, priority_t p ) { +#if __TBB_PREVIEW_CRITICAL_TASKS + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high + || p == internal::priority_critical, "Invalid priority level value"); +#else + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value"); +#endif t.prefix().owner->enqueue( t, (void*)p ); } #endif /* __TBB_TASK_PRIORITY */ + //! Enqueue task in task_arena + //! The implementation is in task_arena.h +#if __TBB_TASK_PRIORITY + __TBB_DEPRECATED inline static void enqueue( task& t, task_arena& arena, priority_t p = priority_t(0) ); +#else + inline static void enqueue( task& t, task_arena& arena); +#endif + //! The innermost task being executed or destroyed by the current thread at the moment. static task& __TBB_EXPORTED_FUNC self(); @@ -813,6 +875,24 @@ class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { return (prefix().extra_state & 0x80)!=0; } + //! True if the task was enqueued + bool is_enqueued_task() const { + // es_task_enqueued = 0x10 + return (prefix().extra_state & 0x10)!=0; + } + +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! Type that defines suspension point + typedef void* suspend_point; + + //! Suspend current task execution + template + static void suspend(F f); + + //! Resume specific suspend point + static void resume(suspend_point tag); +#endif + //------------------------------------------------------------------------ // Debugging //------------------------------------------------------------------------ @@ -823,10 +903,18 @@ class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { //! The internal reference count. int ref_count() const { #if TBB_USE_ASSERT +#if __TBB_PREVIEW_RESUMABLE_TASKS + internal::reference_count ref_count_ = prefix().ref_count & ~internal::abandon_flag; +#else internal::reference_count ref_count_ = prefix().ref_count; +#endif __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error"); #endif +#if __TBB_PREVIEW_RESUMABLE_TASKS + return int(prefix().ref_count & ~internal::abandon_flag); +#else return int(prefix().ref_count); +#endif } //! Obsolete, and only retained for the sake of backward compatibility. Always returns true. @@ -879,10 +967,10 @@ class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { #if __TBB_TASK_PRIORITY //! Changes priority of the task group this task belongs to. - void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); } + __TBB_DEPRECATED void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); } //! Retrieves current priority of the task group this task belongs to. - priority_t group_priority () const { return prefix().context->priority(); } + __TBB_DEPRECATED priority_t group_priority () const { return prefix().context->priority(); } #endif /* __TBB_TASK_PRIORITY */ @@ -903,8 +991,41 @@ class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { internal::task_prefix& prefix( internal::version_tag* = NULL ) const { return reinterpret_cast(const_cast(this))[-1]; } +#if __TBB_PREVIEW_CRITICAL_TASKS + friend void internal::make_critical( task& ); + friend bool internal::is_critical( task& ); +#endif }; // class task +#if __TBB_PREVIEW_CRITICAL_TASKS +namespace internal { +inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; } +inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); } +} // namespace internal +#endif /* __TBB_PREVIEW_CRITICAL_TASKS */ + +#if __TBB_PREVIEW_RESUMABLE_TASKS +namespace internal { + template + static void suspend_callback(void* user_callback, task::suspend_point tag) { + // Copy user function to a new stack to avoid a race when the previous scheduler is resumed. + F user_callback_copy = *static_cast(user_callback); + user_callback_copy(tag); + } + void __TBB_EXPORTED_FUNC internal_suspend(void* suspend_callback, void* user_callback); + void __TBB_EXPORTED_FUNC internal_resume(task::suspend_point); + task::suspend_point __TBB_EXPORTED_FUNC internal_current_suspend_point(); +} + +template +inline void task::suspend(F f) { + internal::internal_suspend((void*)internal::suspend_callback, &f); +} +inline void task::resume(suspend_point tag) { + internal::internal_resume(tag); +} +#endif + //! task that does nothing. Useful for synchronization. /** @ingroup task_scheduling */ class empty_task: public task { @@ -918,6 +1039,7 @@ namespace internal { template class function_task : public task { #if __TBB_ALLOW_MUTABLE_FUNCTORS + // TODO: deprecated behavior, remove F my_func; #else const F my_func; @@ -928,6 +1050,9 @@ namespace internal { } public: function_task( const F& f ) : my_func(f) {} +#if __TBB_CPP11_RVALUE_REF_PRESENT + function_task( F&& f ) : my_func( std::move(f) ) {} +#endif }; } // namespace internal //! @endcond @@ -948,7 +1073,7 @@ class task_list: internal::no_copy { //! Destroys the list, but does not destroy the task objects. ~task_list() {} - //! True if list if empty; false otherwise. + //! True if list is empty; false otherwise. bool empty() const {return !first;} //! Push task onto back of list. @@ -1047,4 +1172,7 @@ inline void operator delete( void* task, const tbb::internal::allocate_additiona p.free( *static_cast(task) ); } +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_H_include_area + #endif /* __TBB_task_H */ diff --git a/inst/include/tbb/task_arena.h b/inst/include/tbb/task_arena.h index f33135b6..46eb4959 100644 --- a/inst/include/tbb/task_arena.h +++ b/inst/include/tbb/task_arena.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,20 +12,24 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_task_arena_H #define __TBB_task_arena_H +#define __TBB_task_arena_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "task.h" #include "tbb_exception.h" +#include "internal/_template_helpers.h" +#if __TBB_NUMA_SUPPORT +#include "info.h" +#endif /*__TBB_NUMA_SUPPORT*/ #if TBB_USE_THREADING_TOOLS #include "atomic.h" // for as_atomic #endif +#include "aligned_space.h" namespace tbb { @@ -55,17 +59,62 @@ class delegate_base : no_assign { virtual ~delegate_base() {} }; -template +// If decltype is available, the helper detects the return type of functor of specified type, +// otherwise it defines the void type. +template +struct return_type_or_void { +#if __TBB_CPP11_DECLTYPE_PRESENT && !__TBB_CPP11_DECLTYPE_OF_FUNCTION_RETURN_TYPE_BROKEN + typedef decltype(declval()()) type; +#else + typedef void type; +#endif +}; + +template class delegated_function : public delegate_base { + F &my_func; + tbb::aligned_space my_return_storage; + // The function should be called only once. + void operator()() const __TBB_override { + new (my_return_storage.begin()) R(my_func()); + } +public: + delegated_function(F& f) : my_func(f) {} + // The function can be called only after operator() and only once. + R consume_result() const { + return tbb::internal::move(*(my_return_storage.begin())); + } + ~delegated_function() { + my_return_storage.begin()->~R(); + } +}; + +template +class delegated_function : public delegate_base { F &my_func; void operator()() const __TBB_override { my_func(); } public: - delegated_function ( F& f ) : my_func(f) {} + delegated_function(F& f) : my_func(f) {} + void consume_result() const {} + + friend class task_arena_base; }; class task_arena_base { +#if __TBB_NUMA_SUPPORT +public: + // TODO: consider version approach to resolve backward compatibility potential issues. + struct constraints { + constraints(numa_node_id id = automatic, int maximal_concurrency = automatic) + : numa_id(id) + , max_concurrency(maximal_concurrency) + {} + numa_node_id numa_id; + int max_concurrency; + }; +#endif /*__TBB_NUMA_SUPPORT*/ protected: //! NULL if not currently initialized. internal::arena* my_arena; @@ -84,11 +133,30 @@ class task_arena_base { //! Special settings intptr_t my_version_and_traits; + bool my_initialized; + +#if __TBB_NUMA_SUPPORT + //! The NUMA node index to which the arena will be attached + numa_node_id my_numa_id; + + // Do not access my_numa_id without the following runtime check. + // Despite my_numa_id is accesible, it does not exist in task_arena_base on user side + // if TBB_PREVIEW_NUMA_SUPPORT macro is not defined by the user. To be sure that + // my_numa_id exists in task_arena_base layout we check the traits. + // TODO: Consider increasing interface version for task_arena_base instead of this runtime check. + numa_node_id numa_id() { + return (my_version_and_traits & numa_support_flag) == numa_support_flag ? my_numa_id : automatic; + } +#endif + enum { default_flags = 0 #if __TBB_TASK_GROUP_CONTEXT | (task_group_context::default_traits & task_group_context::exact_exception) // 0 or 1 << 16 , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly +#endif +#if __TBB_NUMA_SUPPORT + , numa_support_flag = 1 #endif }; @@ -99,8 +167,30 @@ class task_arena_base { #endif , my_max_concurrency(max_concurrency) , my_master_slots(reserved_for_masters) +#if __TBB_NUMA_SUPPORT + , my_version_and_traits(default_flags | numa_support_flag) +#else , my_version_and_traits(default_flags) +#endif + , my_initialized(false) +#if __TBB_NUMA_SUPPORT + , my_numa_id(automatic) +#endif + {} + +#if __TBB_NUMA_SUPPORT + task_arena_base(const constraints& constraints_, unsigned reserved_for_masters) + : my_arena(0) +#if __TBB_TASK_GROUP_CONTEXT + , my_context(0) +#endif + , my_max_concurrency(constraints_.max_concurrency) + , my_master_slots(reserved_for_masters) + , my_version_and_traits(default_flags | numa_support_flag) + , my_initialized(false) + , my_numa_id(constraints_.numa_id ) {} +#endif /*__TBB_NUMA_SUPPORT*/ void __TBB_EXPORTED_METHOD internal_initialize(); void __TBB_EXPORTED_METHOD internal_terminate(); @@ -118,7 +208,14 @@ class task_arena_base { }; #if __TBB_TASK_ISOLATION -void __TBB_EXPORTED_FUNC isolate_within_arena( delegate_base& d, intptr_t reserved = 0 ); +void __TBB_EXPORTED_FUNC isolate_within_arena( delegate_base& d, intptr_t isolation = 0 ); + +template +R isolate_impl(F& f) { + delegated_function d(f); + isolate_within_arena(d); + return d.consume_result(); +} #endif /* __TBB_TASK_ISOLATION */ } // namespace internal //! @endcond @@ -130,8 +227,12 @@ void __TBB_EXPORTED_FUNC isolate_within_arena( delegate_base& d, intptr_t reserv */ class task_arena : public internal::task_arena_base { friend class tbb::internal::task_scheduler_observer_v3; + friend void task::enqueue(task&, task_arena& +#if __TBB_TASK_PRIORITY + , priority_t +#endif + ); friend int tbb::this_task_arena::max_concurrency(); - bool my_initialized; void mark_initialized() { __TBB_ASSERT( my_arena, "task_arena initialization is incomplete" ); #if __TBB_TASK_GROUP_CONTEXT @@ -146,6 +247,31 @@ class task_arena : public internal::task_arena_base { #endif } + template + void enqueue_impl( __TBB_FORWARDING_REF(F) f +#if __TBB_TASK_PRIORITY + , priority_t p = priority_t(0) +#endif + ) { +#if !__TBB_TASK_PRIORITY + intptr_t p = 0; +#endif + initialize(); +#if __TBB_TASK_GROUP_CONTEXT + internal_enqueue(*new(task::allocate_root(*my_context)) internal::function_task< typename internal::strip::type >(internal::forward(f)), p); +#else + internal_enqueue(*new(task::allocate_root()) internal::function_task< typename internal::strip::type >(internal::forward(f)), p); +#endif /* __TBB_TASK_GROUP_CONTEXT */ + } + + template + R execute_impl(F& f) { + initialize(); + internal::delegated_function d(f); + internal_execute(d); + return d.consume_result(); + } + public: //! Creates task_arena with certain concurrency limits /** Sets up settings only, real construction is deferred till the first method invocation @@ -155,14 +281,24 @@ class task_arena : public internal::task_arena_base { **/ task_arena(int max_concurrency_ = automatic, unsigned reserved_for_masters = 1) : task_arena_base(max_concurrency_, reserved_for_masters) - , my_initialized(false) {} +#if __TBB_NUMA_SUPPORT + //! Creates task arena pinned to certain NUMA node + task_arena(const constraints& constraints_, unsigned reserved_for_masters = 1) + : task_arena_base(constraints_, reserved_for_masters) + {} + + //! Copies settings from another task_arena + task_arena(const task_arena &s) // copy settings but not the reference or instance + : task_arena_base(constraints(s.my_numa_id, s.my_max_concurrency), s.my_master_slots) + {} +#else //! Copies settings from another task_arena task_arena(const task_arena &s) // copy settings but not the reference or instance : task_arena_base(s.my_max_concurrency, s.my_master_slots) - , my_initialized(false) {} +#endif /*__TBB_NUMA_SUPPORT*/ //! Tag class used to indicate the "attaching" constructor struct attach {}; @@ -170,7 +306,6 @@ class task_arena : public internal::task_arena_base { //! Creates an instance of task_arena attached to the current arena of the thread explicit task_arena( attach ) : task_arena_base(automatic, 1) // use default settings if attach fails - , my_initialized(false) { internal_attach(); if( my_arena ) my_initialized = true; @@ -187,7 +322,7 @@ class task_arena : public internal::task_arena_base { //! Overrides concurrency level and forces initialization of internal representation inline void initialize(int max_concurrency_, unsigned reserved_for_masters = 1) { // TODO: decide if this call must be thread-safe - __TBB_ASSERT( !my_arena, "Impossible to modify settings of an already initialized task_arena"); + __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena"); if( !my_initialized ) { my_max_concurrency = max_concurrency_; my_master_slots = reserved_for_masters; @@ -195,13 +330,26 @@ class task_arena : public internal::task_arena_base { } } +#if __TBB_NUMA_SUPPORT + inline void initialize(constraints constraints_, unsigned reserved_for_masters = 1) { + // TODO: decide if this call must be thread-safe + __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena"); + if( !my_initialized ) { + my_numa_id = constraints_.numa_id; + my_max_concurrency = constraints_.max_concurrency; + my_master_slots = reserved_for_masters; + initialize(); + } + } +#endif /*__TBB_NUMA_SUPPORT*/ + //! Attaches this instance to the current arena of the thread inline void initialize(attach) { // TODO: decide if this call must be thread-safe - __TBB_ASSERT( !my_arena, "Impossible to modify settings of an already initialized task_arena"); + __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena"); if( !my_initialized ) { internal_attach(); - if( !my_arena ) internal_initialize(); + if ( !my_arena ) internal_initialize(); mark_initialized(); } } @@ -227,49 +375,62 @@ class task_arena : public internal::task_arena_base { //! Enqueues a task into the arena to process a functor, and immediately returns. //! Does not require the calling thread to join the arena + +#if __TBB_CPP11_RVALUE_REF_PRESENT template - void enqueue( const F& f ) { - initialize(); -#if __TBB_TASK_GROUP_CONTEXT - internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task(f), 0 ); + void enqueue( F&& f ) { + enqueue_impl(std::forward(f)); + } #else - internal_enqueue( *new( task::allocate_root() ) internal::function_task(f), 0 ); -#endif + template + void enqueue( const F& f ) { + enqueue_impl(f); } +#endif #if __TBB_TASK_PRIORITY //! Enqueues a task with priority p into the arena to process a functor f, and immediately returns. //! Does not require the calling thread to join the arena template - void enqueue( const F& f, priority_t p ) { - __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" ); - initialize(); -#if __TBB_TASK_GROUP_CONTEXT - internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task(f), (intptr_t)p ); +#if __TBB_CPP11_RVALUE_REF_PRESENT + __TBB_DEPRECATED void enqueue( F&& f, priority_t p ) { +#if __TBB_PREVIEW_CRITICAL_TASKS + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high + || p == internal::priority_critical, "Invalid priority level value"); #else - internal_enqueue( *new( task::allocate_root() ) internal::function_task(f), (intptr_t)p ); + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value"); #endif + enqueue_impl(std::forward(f), p); } +#else + __TBB_DEPRECATED void enqueue( const F& f, priority_t p ) { +#if __TBB_PREVIEW_CRITICAL_TASKS + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high + || p == internal::priority_critical, "Invalid priority level value"); +#else + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value"); +#endif + enqueue_impl(f,p); + } +#endif #endif// __TBB_TASK_PRIORITY - //! Joins the arena and executes a functor, then returns + //! Joins the arena and executes a mutable functor, then returns //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). template - void execute(F& f) { - initialize(); - internal::delegated_function d(f); - internal_execute( d ); + typename internal::return_type_or_void::type execute(F& f) { + return execute_impl::type>(f); } - //! Joins the arena and executes a functor, then returns + //! Joins the arena and executes a constant functor, then returns //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). template - void execute(const F& f) { - initialize(); - internal::delegated_function d(f); - internal_execute( d ); + typename internal::return_type_or_void::type execute(const F& f) { + return execute_impl::type>(f); } #if __TBB_EXTRA_DEBUG @@ -295,26 +456,30 @@ class task_arena : public internal::task_arena_base { } }; -#if __TBB_TASK_ISOLATION namespace this_task_arena { +#if __TBB_TASK_ISOLATION + //! Executes a mutable functor in isolation within the current task arena. + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). template - void isolate( const F& f ) { - internal::delegated_function d(f); - internal::isolate_within_arena( d ); + typename internal::return_type_or_void::type isolate(F& f) { + return internal::isolate_impl::type>(f); } -} -#endif /* __TBB_TASK_ISOLATION */ + //! Executes a constant functor in isolation within the current task arena. + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). + template + typename internal::return_type_or_void::type isolate(const F& f) { + return internal::isolate_impl::type>(f); + } +#endif /* __TBB_TASK_ISOLATION */ +} // namespace this_task_arena } // namespace interfaceX using interface7::task_arena; -#if __TBB_TASK_ISOLATION + namespace this_task_arena { using namespace interface7::this_task_arena; -} -#endif /* __TBB_TASK_ISOLATION */ -namespace this_task_arena { //! Returns the index, aka slot number, of the calling thread in its current arena inline int current_thread_index() { int idx = tbb::task_arena::current_thread_index(); @@ -325,9 +490,22 @@ namespace this_task_arena { inline int max_concurrency() { return tbb::task_arena::internal_max_concurrency(NULL); } - } // namespace this_task_arena +//! Enqueue task in task_arena +#if __TBB_TASK_PRIORITY +void task::enqueue( task& t, task_arena& arena, priority_t p ) { +#else +void task::enqueue( task& t, task_arena& arena ) { + intptr_t p = 0; +#endif + arena.initialize(); + //! Note: the context of the task may differ from the context instantiated by task_arena + arena.internal_enqueue(t, p); +} } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_arena_H_include_area + #endif /* __TBB_task_arena_H */ diff --git a/inst/include/tbb/task_group.h b/inst/include/tbb/task_group.h index bf6922b9..f090d3a2 100644 --- a/inst/include/tbb/task_group.h +++ b/inst/include/tbb/task_group.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,17 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_task_group_H #define __TBB_task_group_H +#define __TBB_task_group_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "task.h" #include "tbb_exception.h" +#include "internal/_template_helpers.h" +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION +#include "task_arena.h" +#endif #if __TBB_TASK_GROUP_CONTEXT @@ -34,12 +37,18 @@ namespace internal { class task_group; class structured_task_group; +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION +class isolated_task_group; +#endif template class task_handle : internal::no_assign { template friend class internal::task_handle_task; friend class task_group; friend class structured_task_group; +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION + friend class isolated_task_group; +#endif static const intptr_t scheduled = 0x1; @@ -54,6 +63,9 @@ class task_handle : internal::no_assign { } public: task_handle( const F& f ) : my_func(f), my_state(0) {} +#if __TBB_CPP11_RVALUE_REF_PRESENT + task_handle( F&& f ) : my_func( std::move(f)), my_state(0) {} +#endif void operator() () const { my_func(); } }; @@ -78,26 +90,38 @@ class task_handle_task : public task { }; class task_group_base : internal::no_copy { + class ref_count_guard : internal::no_copy { + task& my_task; + public: + ref_count_guard(task& t) : my_task(t) { + my_task.increment_ref_count(); + } + ~ref_count_guard() { + my_task.decrement_ref_count(); + } + }; protected: empty_task* my_root; task_group_context my_context; - task& owner () { return *my_root; } - template task_group_status internal_run_and_wait( F& f ) { __TBB_TRY { - if ( !my_context.is_group_execution_cancelled() ) + if ( !my_context.is_group_execution_cancelled() ) { + // We need to increase the reference count of the root task to notify waiters that + // this task group has some work in progress. + ref_count_guard guard(*my_root); f(); + } } __TBB_CATCH( ... ) { my_context.register_pending_exception(); } return wait(); } - template - void internal_run( F& f ) { - owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) ); + template + task* prepare_task( __TBB_FORWARDING_REF(F) f ) { + return new( task::allocate_additional_child_of(*my_root) ) Task( internal::forward(f) ); } public: @@ -110,7 +134,11 @@ class task_group_base : internal::no_copy { ~task_group_base() __TBB_NOEXCEPT(false) { if( my_root->ref_count() > 1 ) { +#if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT + bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0; +#else bool stack_unwinding_in_progress = std::uncaught_exception(); +#endif // Always attempt to do proper cleanup to avoid inevitable memory corruption // in case of missing wait (for the sake of better testability & debuggability) if ( !is_canceling() ) @@ -132,7 +160,7 @@ class task_group_base : internal::no_copy { template void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); + task::spawn( *prepare_task< internal::task_handle_task >(h) ); } task_group_status wait() { @@ -168,31 +196,40 @@ class task_group : public internal::task_group_base { #if __SUNPRO_CC template void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); + internal_run< internal::task_handle_task >( h ); } #else using task_group_base::run; #endif +#if __TBB_CPP11_RVALUE_REF_PRESENT template - void run( const F& f ) { - internal_run< const F, internal::function_task >( f ); + void run( F&& f ) { + task::spawn( *prepare_task< internal::function_task< typename internal::strip::type > >(std::forward(f)) ); } +#else + template + void run(const F& f) { + task::spawn( *prepare_task< internal::function_task >(f) ); + } +#endif template task_group_status run_and_wait( const F& f ) { return internal_run_and_wait( f ); } + // TODO: add task_handle rvalues support template task_group_status run_and_wait( task_handle& h ) { - h.mark_scheduled(); - return internal_run_and_wait< task_handle >( h ); + h.mark_scheduled(); + return internal_run_and_wait< task_handle >( h ); } }; // class task_group -class structured_task_group : public internal::task_group_base { +class __TBB_DEPRECATED structured_task_group : public internal::task_group_base { public: + // TODO: add task_handle rvalues support template task_group_status run_and_wait ( task_handle& h ) { h.mark_scheduled(); @@ -206,18 +243,124 @@ class structured_task_group : public internal::task_group_base { } }; // class structured_task_group +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION +namespace internal { + using interface7::internal::delegate_base; + using interface7::internal::isolate_within_arena; + + class spawn_delegate : public delegate_base { + task* task_to_spawn; + void operator()() const __TBB_override { + task::spawn(*task_to_spawn); + } + public: + spawn_delegate(task* a_task) : task_to_spawn(a_task) {} + }; + + class wait_delegate : public delegate_base { + void operator()() const __TBB_override { + status = tg.wait(); + } + protected: + task_group& tg; + task_group_status& status; + public: + wait_delegate(task_group& a_group, task_group_status& tgs) + : tg(a_group), status(tgs) {} + }; + + template + class run_wait_delegate : public wait_delegate { + F& func; + void operator()() const __TBB_override { + status = tg.run_and_wait( func ); + } + public: + run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs) + : wait_delegate(a_group, tgs), func(a_func) {} + }; +} // namespace internal + +class isolated_task_group : public task_group { + intptr_t this_isolation() { + return reinterpret_cast(this); + } +public: + isolated_task_group () : task_group() {} + +#if __TBB_CPP11_RVALUE_REF_PRESENT + template + void run( F&& f ) { + internal::spawn_delegate sd( + prepare_task< internal::function_task< typename internal::strip::type > >(std::forward(f)) + ); + internal::isolate_within_arena( sd, this_isolation() ); + } +#else + template + void run(const F& f) { + internal::spawn_delegate sd( prepare_task< internal::function_task >(f) ); + internal::isolate_within_arena( sd, this_isolation() ); + } +#endif + + template + task_group_status run_and_wait( const F& f ) { + task_group_status result = not_complete; + internal::run_wait_delegate< const F > rwd( *this, f, result ); + internal::isolate_within_arena( rwd, this_isolation() ); + __TBB_ASSERT( result!=not_complete, "premature exit from wait?" ); + return result; + } + + // TODO: add task_handle rvalues support + template + void run( task_handle& h ) { + internal::spawn_delegate sd( prepare_task< internal::task_handle_task >(h) ); + internal::isolate_within_arena( sd, this_isolation() ); + } + + template + task_group_status run_and_wait ( task_handle& h ) { + task_group_status result = not_complete; + internal::run_wait_delegate< task_handle > rwd( *this, h, result ); + internal::isolate_within_arena( rwd, this_isolation() ); + __TBB_ASSERT( result!=not_complete, "premature exit from wait?" ); + return result; + } + + task_group_status wait() { + task_group_status result = not_complete; + internal::wait_delegate wd( *this, result ); + internal::isolate_within_arena( wd, this_isolation() ); + __TBB_ASSERT( result!=not_complete, "premature exit from wait?" ); + return result; + } +}; // class isolated_task_group +#endif // TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION + inline bool is_current_task_group_canceling() { return task::self().is_cancelled(); } +#if __TBB_CPP11_RVALUE_REF_PRESENT +template +task_handle< typename internal::strip::type > make_task( F&& f ) { + return task_handle< typename internal::strip::type >( std::forward(f) ); +} +#else template task_handle make_task( const F& f ) { return task_handle( f ); } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ } // namespace tbb #endif /* __TBB_TASK_GROUP_CONTEXT */ +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_group_H_include_area + #endif /* __TBB_task_group_H */ diff --git a/inst/include/tbb/task_scheduler_init.h b/inst/include/tbb/task_scheduler_init.h index 928e7a4e..04837049 100644 --- a/inst/include/tbb/task_scheduler_init.h +++ b/inst/include/tbb/task_scheduler_init.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_task_scheduler_init_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_task_scheduler_init_H +#pragma message("TBB Warning: tbb/task_scheduler_init.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_task_scheduler_init_H #define __TBB_task_scheduler_init_H +#define __TBB_task_scheduler_init_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb_stddef.h" #include "limits.h" #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE @@ -53,7 +63,7 @@ namespace internal { and will persist until this thread exits. Default concurrency level is defined as described in task_scheduler_init::initialize(). @ingroup task_scheduling */ -class task_scheduler_init: internal::no_copy { +class __TBB_DEPRECATED_VERBOSE task_scheduler_init: internal::no_copy { enum ExceptionPropagationMode { propagation_mode_exact = 1u, propagation_mode_captured = 2u, @@ -158,4 +168,7 @@ class task_scheduler_init: internal::no_copy { } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_scheduler_init_H_include_area + #endif /* __TBB_task_scheduler_init_H */ diff --git a/inst/include/tbb/task_scheduler_observer.h b/inst/include/tbb/task_scheduler_observer.h index 5586ad4f..1bb93636 100644 --- a/inst/include/tbb/task_scheduler_observer.h +++ b/inst/include/tbb/task_scheduler_observer.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,17 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_task_scheduler_observer_H #define __TBB_task_scheduler_observer_H +#define __TBB_task_scheduler_observer_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "atomic.h" -#if __TBB_ARENA_OBSERVER || __TBB_SLEEP_PERMISSION +#if __TBB_ARENA_OBSERVER #include "task_arena.h" #endif @@ -88,7 +87,7 @@ class task_scheduler_observer_v3 { } // namespace internal -#if __TBB_ARENA_OBSERVER || __TBB_SLEEP_PERMISSION +#if __TBB_ARENA_OBSERVER namespace interface6 { class task_scheduler_observer : public internal::task_scheduler_observer_v3 { friend class internal::task_scheduler_observer_v3; @@ -149,26 +148,19 @@ class task_scheduler_observer : public internal::task_scheduler_observer_v3 { } internal::task_scheduler_observer_v3::observe(state); } - -#if __TBB_SLEEP_PERMISSION - //! Return commands for may_sleep() - enum { keep_awake = false, allow_sleep = true }; - - //! The callback can be invoked by a worker thread before it goes to sleep. - /** If it returns false ('keep_awake'), the thread will keep spinning and looking for work. - It will not be called for master threads. **/ - virtual bool may_sleep() { return allow_sleep; } -#endif /*__TBB_SLEEP_PERMISSION*/ }; } //namespace interface6 using interface6::task_scheduler_observer; -#else /*__TBB_ARENA_OBSERVER || __TBB_SLEEP_PERMISSION*/ +#else /*__TBB_ARENA_OBSERVER*/ typedef tbb::internal::task_scheduler_observer_v3 task_scheduler_observer; -#endif /*__TBB_ARENA_OBSERVER || __TBB_SLEEP_PERMISSION*/ +#endif /*__TBB_ARENA_OBSERVER*/ } // namespace tbb #endif /* __TBB_SCHEDULER_OBSERVER */ +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_scheduler_observer_H_include_area + #endif /* __TBB_task_scheduler_observer_H */ diff --git a/inst/include/tbb/tbb.h b/inst/include/tbb/tbb.h index 5e385ea2..f06ec5a3 100644 --- a/inst/include/tbb/tbb.h +++ b/inst/include/tbb/tbb.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,18 +12,21 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_tbb_H #define __TBB_tbb_H +#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !defined(__TBB_INTERNAL_INCLUDES_DEPRECATION_MESSAGE) +#pragma message("TBB Warning: tbb.h contains deprecated functionality. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#define __TBB_tbb_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + /** This header bulk-includes declarations or definitions of all the functionality - provided by TBB (save for malloc dependent headers). + provided by TBB (save for tbbmalloc and 3rd party dependent headers). If you use only a few TBB constructs, consider including specific headers only. Any header listed below can be included independently of others. @@ -37,6 +40,9 @@ #include "blocked_range.h" #include "blocked_range2d.h" #include "blocked_range3d.h" +#if TBB_PREVIEW_BLOCKED_RANGE_ND +#include "blocked_rangeNd.h" +#endif #include "cache_aligned_allocator.h" #include "combinable.h" #include "concurrent_hash_map.h" @@ -47,13 +53,16 @@ #include "concurrent_queue.h" #include "concurrent_unordered_map.h" #include "concurrent_unordered_set.h" +#if TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS +#include "concurrent_map.h" +#include "concurrent_set.h" +#endif #include "concurrent_vector.h" #include "critical_section.h" #include "enumerable_thread_specific.h" #include "flow_graph.h" -#if TBB_PREVIEW_GLOBAL_CONTROL #include "global_control.h" -#endif +#include "iterators.h" #include "mutex.h" #include "null_mutex.h" #include "null_rw_mutex.h" @@ -82,4 +91,7 @@ #include "tbb_thread.h" #include "tick_count.h" +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_H_include_area + #endif /* __TBB_tbb_H */ diff --git a/inst/include/tbb/tbb_allocator.h b/inst/include/tbb/tbb_allocator.h index a3f1ef0c..ac0d1a6b 100644 --- a/inst/include/tbb/tbb_allocator.h +++ b/inst/include/tbb/tbb_allocator.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_tbb_allocator_H @@ -26,19 +22,8 @@ #if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC #include // std::forward #endif - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - #include -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - namespace tbb { //! @cond INTERNAL @@ -185,7 +170,7 @@ class zero_allocator : public Allocator pointer allocate(const size_type n, const void *hint = 0 ) { pointer ptr = base_allocator_type::allocate( n, hint ); - std::memset( (void*) ptr, 0, n * sizeof(value_type) ); + std::memset( static_cast(ptr), 0, n * sizeof(value_type) ); return ptr; } }; diff --git a/inst/include/tbb/tbb_config.h b/inst/include/tbb/tbb_config.h index bdc8dcb9..f469b4b6 100644 --- a/inst/include/tbb/tbb_config.h +++ b/inst/include/tbb/tbb_config.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_tbb_config_H @@ -34,29 +30,32 @@ */ #define __TBB_TODO 0 -/*Check which standard library we use on macOS*.*/ -/*__TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed.*/ -#if !defined(__TBB_SYMBOL) && (__APPLE__ || __ANDROID__) +/* Check which standard library we use. */ +/* __TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed. */ +#if !defined(__TBB_SYMBOL) && !__TBB_CONFIG_PREPROC_ONLY #include #endif -// note that when ICC or Clang is in use, __TBB_GCC_VERSION might not fully match +// Note that when ICC or Clang is in use, __TBB_GCC_VERSION might not fully match // the actual GCC version on the system. #define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) -// Since GNU libstdc++ does not have a convenient macro for its version, -// we rely on the version of GCC or the user-specified macro below. -// The format of TBB_USE_GLIBCXX_VERSION should match the __TBB_GCC_VERSION above, -// e.g. it should be set to 40902 for libstdc++ coming with GCC 4.9.2. -#ifdef TBB_USE_GLIBCXX_VERSION +// Prior to GCC 7, GNU libstdc++ did not have a convenient version macro. +// Therefore we use different ways to detect its version. +#if defined(TBB_USE_GLIBCXX_VERSION) && !defined(_GLIBCXX_RELEASE) +// The version is explicitly specified in our public TBB_USE_GLIBCXX_VERSION macro. +// Its format should match the __TBB_GCC_VERSION above, e.g. 70301 for libstdc++ coming with GCC 7.3.1. #define __TBB_GLIBCXX_VERSION TBB_USE_GLIBCXX_VERSION -#else +#elif _GLIBCXX_RELEASE && _GLIBCXX_RELEASE != __GNUC__ +// Reported versions of GCC and libstdc++ do not match; trust the latter +#define __TBB_GLIBCXX_VERSION (_GLIBCXX_RELEASE*10000) +#elif __GLIBCPP__ || __GLIBCXX__ +// The version macro is not defined or matches the GCC version; use __TBB_GCC_VERSION #define __TBB_GLIBCXX_VERSION __TBB_GCC_VERSION -//TODO: analyze __GLIBCXX__ instead of __TBB_GCC_VERSION ? #endif #if __clang__ - /**according to clang documentation version can be vendor specific **/ + // according to clang documentation, version can be vendor specific #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) #endif @@ -65,6 +64,16 @@ #define __TBB_IOS 1 #endif +#if __APPLE__ + #if __INTEL_COMPILER && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1099 \ + && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000 + // ICC does not correctly set the macro if -mmacosx-min-version is not specified + #define __TBB_MACOS_TARGET_VERSION (100000 + 10*(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 1000)) + #else + #define __TBB_MACOS_TARGET_VERSION __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ + #endif +#endif + /** Preprocessor symbols to determine HW architecture **/ #if _WIN32||_WIN64 @@ -108,6 +117,14 @@ #define __INTEL_COMPILER 1210 #endif +#if __clang__ && !__INTEL_COMPILER +#define __TBB_USE_OPTIONAL_RTTI __has_feature(cxx_rtti) +#elif defined(_CPPRTTI) +#define __TBB_USE_OPTIONAL_RTTI 1 +#else +#define __TBB_USE_OPTIONAL_RTTI (__GXX_RTTI || __RTTI || __INTEL_RTTI__) +#endif + #if __TBB_GCC_VERSION >= 40400 && !defined(__INTEL_COMPILER) /** warning suppression pragmas available in GCC since 4.4 **/ #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1 @@ -124,7 +141,17 @@ support added. */ -/** C++11 mode detection macros for Intel(R) C++ compiler (enabled by -std=c++XY option): +/** + __TBB_CPP11_PRESENT macro indicates that the compiler supports vast majority of C++11 features. + Depending on the compiler, some features might still be unsupported or work incorrectly. + Use it when enabling C++11 features individually is not practical, and be aware that + some "good enough" compilers might be excluded. **/ +#define __TBB_CPP11_PRESENT (__cplusplus >= 201103L || _MSC_VER >= 1900) + +#define __TBB_CPP17_FALLTHROUGH_PRESENT (__cplusplus >= 201703L) +#define __TBB_FALLTHROUGH_PRESENT (__TBB_GCC_VERSION >= 70000 && !__INTEL_COMPILER) + +/** C++11 mode detection macros for Intel(R) C++ Compiler (enabled by -std=c++XY option): __INTEL_CXX11_MODE__ for version >=13.0 (not available for ICC 15.0 if -std=c++14 is used), __STDC_HOSTED__ for version >=12.0 (useful only on Windows), __GXX_EXPERIMENTAL_CXX0X__ for version >=12.0 on Linux and macOS. **/ @@ -133,16 +160,9 @@ #define __INTEL_CXX11_MODE__ (__GXX_EXPERIMENTAL_CXX0X__ || (_MSC_VER && __STDC_HOSTED__)) #endif -// Intel(R) C++ Compiler offloading API to the Intel(R) Graphics Technology presence macro -// TODO: add support for ICC 15.00 _GFX_enqueue API and then decrease Intel compiler supported version -// TODO: add linux support and restict it with (__linux__ && __TBB_x86_64 && !__ANDROID__) macro -#if __INTEL_COMPILER >= 1600 && _WIN32 -#define __TBB_GFX_PRESENT 1 -#endif - #if __INTEL_COMPILER && (!_MSC_VER || __INTEL_CXX11_MODE__) // On Windows, C++11 features supported by Visual Studio 2010 and higher are enabled by default, - // so in absence of /Qstd= use MSVC branch for __TBB_CPP11_* detection. + // so in absence of /Qstd= use MSVC branch for feature detection. // On other platforms, no -std= means C++03. #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__INTEL_CXX11_MODE__ && __VARIADIC_TEMPLATES) @@ -169,14 +189,7 @@ #endif #define __TBB_STATIC_ASSERT_PRESENT (__INTEL_CXX11_MODE__ || _MSC_VER >= 1600) #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && (__TBB_GLIBCXX_VERSION >= 40300 || _LIBCPP_VERSION)) - #if (__clang__ && __INTEL_COMPILER > 1400) - /* Older versions of Intel Compiler do not have __has_include */ - #if (__has_feature(__cxx_generalized_initializers__) && __has_include()) - #define __TBB_INITIALIZER_LISTS_PRESENT 1 - #endif - #else - #define __TBB_INITIALIZER_LISTS_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GLIBCXX_VERSION >= 40400 || _LIBCPP_VERSION)) - #endif + #define __TBB_INITIALIZER_LISTS_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GLIBCXX_VERSION >= 40400 || _LIBCPP_VERSION)) #define __TBB_CONSTEXPR_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400) #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1200) /** ICC seems to disable support of noexcept event in c++11 when compiling in compatibility mode for gcc <4.6 **/ @@ -189,19 +202,19 @@ #define __TBB_OVERRIDE_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400) #define __TBB_ALIGNAS_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1500) #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1210) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__INTEL_COMPILER > 1910) // a future version + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L) #elif __clang__ /** TODO: these options need to be rechecked **/ -/** on macOS the only way to get C++11 is to use clang. For library features (e.g. exception_ptr) libc++ is also - * required. So there is no need to check GCC version for clang**/ #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __has_feature(__cxx_variadic_templates__) #define __TBB_CPP11_RVALUE_REF_PRESENT (__has_feature(__cxx_rvalue_references__) && (_LIBCPP_VERSION || __TBB_GLIBCXX_VERSION >= 40500)) #define __TBB_IMPLICIT_MOVE_PRESENT __has_feature(cxx_implicit_moves) /** TODO: extend exception_ptr related conditions to cover libstdc++ **/ #define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 201103L && (_LIBCPP_VERSION || __TBB_GLIBCXX_VERSION >= 40600)) #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_static_assert__) - /**Clang (preprocessor) has problems with dealing with expression having __has_include in #ifs - * used inside C++ code. (At least version that comes with OS X 10.8 : Apple LLVM version 4.2 (clang-425.0.28) (based on LLVM 3.2svn)) **/ - #if (__GXX_EXPERIMENTAL_CXX0X__ && __has_include()) + #if (__cplusplus >= 201103L && __has_include()) #define __TBB_CPP11_TUPLE_PRESENT 1 #endif #if (__has_feature(__cxx_generalized_initializers__) && __has_include()) @@ -219,6 +232,10 @@ #define __TBB_OVERRIDE_PRESENT __has_feature(cxx_override_control) #define __TBB_ALIGNAS_PRESENT __has_feature(cxx_alignas) #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT __has_feature(cxx_alias_templates) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__has_feature(cxx_variable_templates)) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__has_feature(__cpp_deduction_guides)) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__has_feature(__cpp_lib_is_invocable)) #elif __GNUC__ #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X__ #define __TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700) @@ -243,8 +260,13 @@ #define __TBB_OVERRIDE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700) #define __TBB_ALIGNAS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40800) #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L && __TBB_GCC_VERSION >= 50000) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L && __TBB_GCC_VERSION >= 50000) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cpp_deduction_guides >= 201606L) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L && __TBB_GCC_VERSION >= 70000) #elif _MSC_VER - // These definitions are also used with Intel Compiler in "default" mode; see a comment above. + // These definitions are also used with Intel C++ Compiler in "default" mode (__INTEL_CXX11_MODE__ == 0); + // see a comment in "__INTEL_COMPILER" section above. #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (_MSC_VER >= 1800) // Contains a workaround for ICC 13 @@ -265,47 +287,71 @@ #define __TBB_OVERRIDE_PRESENT (_MSC_VER >= 1700) #define __TBB_ALIGNAS_PRESENT (_MSC_VER >= 1900) #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT (_MSC_VER >= 1800) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (_MSC_VER >= 1900) + /* Variable templates are supported in VS2015 Update 2 or later */ + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (_MSC_FULL_VER >= 190023918 && (!__INTEL_COMPILER || __INTEL_COMPILER >= 1700)) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (_MSVC_LANG >= 201703L && _MSC_VER >= 1914) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (_MSVC_LANG >= 201703L && _MSC_VER >= 1911) #else - #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0 - #define __TBB_CPP11_RVALUE_REF_PRESENT 0 - #define __TBB_IMPLICIT_MOVE_PRESENT 0 - #define __TBB_EXCEPTION_PTR_PRESENT 0 - #define __TBB_STATIC_ASSERT_PRESENT 0 - #define __TBB_CPP11_TUPLE_PRESENT 0 - #define __TBB_INITIALIZER_LISTS_PRESENT 0 - #define __TBB_CONSTEXPR_PRESENT 0 - #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT 0 - #define __TBB_NOEXCEPT_PRESENT 0 - #define __TBB_CPP11_STD_BEGIN_END_PRESENT 0 - #define __TBB_CPP11_AUTO_PRESENT 0 - #define __TBB_CPP11_DECLTYPE_PRESENT 0 - #define __TBB_CPP11_LAMBDAS_PRESENT 0 - #define __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT 0 - #define __TBB_OVERRIDE_PRESENT 0 - #define __TBB_ALIGNAS_PRESENT 0 - #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT 0 + #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_RVALUE_REF_PRESENT __TBB_CPP11_PRESENT + #define __TBB_IMPLICIT_MOVE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_EXCEPTION_PTR_PRESENT __TBB_CPP11_PRESENT + #define __TBB_STATIC_ASSERT_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_TUPLE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_INITIALIZER_LISTS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CONSTEXPR_PRESENT __TBB_CPP11_PRESENT + #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT __TBB_CPP11_PRESENT + #define __TBB_NOEXCEPT_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_STD_BEGIN_END_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_AUTO_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_DECLTYPE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_LAMBDAS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_OVERRIDE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_ALIGNAS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cplusplus >= 201703L) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L) #endif // C++11 standard library features +#define __TBB_CPP11_ARRAY_PRESENT (_MSC_VER >= 1700 || _LIBCPP_VERSION || __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40300) + #ifndef __TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT #define __TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT #endif -#define __TBB_CPP11_VARIADIC_TUPLE_PRESENT (!_MSC_VER || _MSC_VER >=1800) +#define __TBB_CPP11_VARIADIC_TUPLE_PRESENT (!_MSC_VER || _MSC_VER >= 1800) -#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700 || (__TBB_GLIBCXX_VERSION >= 50000 && (__GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L))) -#define __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40300 || _MSC_VER >= 1600) +#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700 || (__TBB_GLIBCXX_VERSION >= 50000 && __GXX_EXPERIMENTAL_CXX0X__)) // GCC supported some of type properties since 4.7 -#define __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40700 || __TBB_CPP11_TYPE_PROPERTIES_PRESENT) +#define __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40700 || __TBB_CPP11_TYPE_PROPERTIES_PRESENT) // In GCC, std::move_if_noexcept appeared later than noexcept -#define __TBB_MOVE_IF_NOEXCEPT_PRESENT (__TBB_NOEXCEPT_PRESENT && (__TBB_GLIBCXX_VERSION >= 40700 || _MSC_VER >= 1900 || _LIBCPP_VERSION)) -#define __TBB_ALLOCATOR_TRAITS_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION || _MSC_VER >= 1700 || \ - __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40700 && !(__TBB_GLIBCXX_VERSION == 40700 && __TBB_DEFINE_MIC)) -#define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__TBB_EXCEPTION_PTR_PRESENT && (_MSC_VER >= 1700 || __TBB_GLIBCXX_VERSION >= 40600 || _LIBCPP_VERSION)) - -#define __TBB_CPP11_FUTURE_PRESENT (_MSC_VER >= 1700 || __TBB_GLIBCXX_VERSION >= 40600 && _GXX_EXPERIMENTAL_CXX0X__ || _LIBCPP_VERSION) - +#define __TBB_MOVE_IF_NOEXCEPT_PRESENT (__TBB_NOEXCEPT_PRESENT && (__TBB_GLIBCXX_VERSION >= 40700 || _MSC_VER >= 1900 || _LIBCPP_VERSION)) +#define __TBB_ALLOCATOR_TRAITS_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION || _MSC_VER >= 1800 || \ + __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40700 && !(__TBB_GLIBCXX_VERSION == 40700 && __TBB_DEFINE_MIC)) +#define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__TBB_EXCEPTION_PTR_PRESENT && (_MSC_VER >= 1700 || __TBB_GLIBCXX_VERSION >= 40600 || _LIBCPP_VERSION || __SUNPRO_CC)) + +// Due to libc++ limitations in C++03 mode, do not pass rvalues to std::make_shared() +#define __TBB_CPP11_SMART_POINTERS_PRESENT ( _MSC_VER >= 1600 || _LIBCPP_VERSION \ + || ((__cplusplus >= 201103L || __GXX_EXPERIMENTAL_CXX0X__) \ + && (__TBB_GLIBCXX_VERSION >= 40500 || __TBB_GLIBCXX_VERSION >= 40400 && __TBB_USE_OPTIONAL_RTTI)) ) + +#define __TBB_CPP11_FUTURE_PRESENT (_MSC_VER >= 1700 || __TBB_GLIBCXX_VERSION >= 40600 && __GXX_EXPERIMENTAL_CXX0X__ || _LIBCPP_VERSION) + +#define __TBB_CPP11_GET_NEW_HANDLER_PRESENT (_MSC_VER >= 1900 || __TBB_GLIBCXX_VERSION >= 40900 && __GXX_EXPERIMENTAL_CXX0X__ || _LIBCPP_VERSION) + +#define __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT (_MSC_VER >= 1900 || __GLIBCXX__ && __cpp_lib_uncaught_exceptions \ + || _LIBCPP_VERSION >= 3700 && (!__TBB_MACOS_TARGET_VERSION || __TBB_MACOS_TARGET_VERSION >= 101200)) +// TODO: wait when memory_resource will be fully supported in clang and define the right macro +// Currently it is in experimental stage since 6 version. +#define __TBB_CPP17_MEMORY_RESOURCE_PRESENT (_MSC_VER >= 1913 && (_MSVC_LANG > 201402L || __cplusplus > 201402L) || \ + __GLIBCXX__ && __cpp_lib_memory_resource >= 201603) +#define __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT (_MSC_VER >= 1911) // std::swap is in only since C++11, though MSVC had it at least since VS2005 #if _MSC_VER>=1400 || _LIBCPP_VERSION || __GXX_EXPERIMENTAL_CXX0X__ #define __TBB_STD_SWAP_HEADER @@ -340,11 +386,21 @@ #define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1 #endif +#if __TBB_GCC_VERSION >= 70000 && !__INTEL_COMPILER && !__clang__ + // After GCC7 there was possible reordering problem in generic atomic load/store operations. + // So always using builtins. + #define TBB_USE_GCC_BUILTINS 1 +#endif + #if __INTEL_COMPILER >= 1200 /** built-in C++11 style atomics available in ICC since 12.0 **/ #define __TBB_ICC_BUILTIN_ATOMICS_PRESENT 1 #endif +#if _MSC_VER>=1600 && (!__INTEL_COMPILER || __INTEL_COMPILER>=1310) + #define __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT 1 +#endif + #define __TBB_TSX_INTRINSICS_PRESENT ((__RTM__ || _MSC_VER>=1700 || __INTEL_COMPILER>=1300) && !__TBB_DEFINE_MIC && !__ANDROID__) /** Macro helpers **/ @@ -361,8 +417,8 @@ /* There are four cases that are supported: 1. "_DEBUG is undefined" means "no debug"; - 2. "_DEBUG defined to something that is evaluated to 0 (the "garbage" is also evaluated to 0 [cpp.cond])" means "no debug"; - 3. "_DEBUG defined to something that is evaluated to non-zero value" means "debug"; + 2. "_DEBUG defined to something that is evaluated to 0" (including "garbage", as per [cpp.cond]) means "no debug"; + 3. "_DEBUG defined to something that is evaluated to a non-zero value" means "debug"; 4. "_DEBUG defined to nothing (empty)" means "debug". */ #ifdef _DEBUG @@ -391,7 +447,7 @@ There are four cases that are supported: #define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS #else #define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG -#endif /* TBB_PEFORMANCE_WARNINGS */ +#endif /* TBB_PERFORMANCE_WARNINGS */ #endif /* TBB_USE_PERFORMANCE_WARNINGS */ #if __TBB_DEFINE_MIC @@ -410,18 +466,10 @@ There are four cases that are supported: #define TBB_USE_EXCEPTIONS 1 #endif -#if __clang__ && !__INTEL_COMPILER -#define __TBB_USE_OPTIONAL_RTTI __has_feature(cxx_rtti) -#elif defined(_CPPRTTI) -#define __TBB_USE_OPTIONAL_RTTI 1 -#else -#define __TBB_USE_OPTIONAL_RTTI (__GXX_RTTI || __RTTI || __INTEL_RTTI__) -#endif - #ifndef TBB_IMPLEMENT_CPP0X /** By default, use C++11 classes if available **/ #if __clang__ - /* Old versions of Intel Compiler do not have __has_include or cannot use it in #define */ + /* Old versions of Intel C++ Compiler do not have __has_include or cannot use it in #define */ #if (__INTEL_COMPILER && (__INTEL_COMPILER < 1500 || __INTEL_COMPILER == 1500 && __INTEL_COMPILER_UPDATE <= 1)) #define TBB_IMPLEMENT_CPP0X (__cplusplus < 201103L || !_LIBCPP_VERSION) #else @@ -458,6 +506,15 @@ There are four cases that are supported: /** Internal TBB features & modes **/ +/** __TBB_CONCURRENT_ORDERED_CONTAINERS indicates that all conditions of use + * concurrent_map and concurrent_set are met. **/ +// TODO: Add cpp11 random generation macro +#ifndef __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + #define __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT ( __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \ + && __TBB_IMPLICIT_MOVE_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT && __TBB_CPP11_ARRAY_PRESENT \ + && __TBB_INITIALIZER_LISTS_PRESENT ) +#endif + /** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak symbol mechanism **/ #ifndef __TBB_WEAK_SYMBOLS_PRESENT #define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && (__TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) ) @@ -499,29 +556,13 @@ There are four cases that are supported: #define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official #ifndef __TBB_ARENA_OBSERVER - #define __TBB_ARENA_OBSERVER ((__TBB_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER) + #define __TBB_ARENA_OBSERVER __TBB_SCHEDULER_OBSERVER #endif /* __TBB_ARENA_OBSERVER */ -#ifndef __TBB_SLEEP_PERMISSION - #define __TBB_SLEEP_PERMISSION ((__TBB_CPF_BUILD||TBB_PREVIEW_LOCAL_OBSERVER)&& __TBB_SCHEDULER_OBSERVER) -#endif /* __TBB_SLEEP_PERMISSION */ - #ifndef __TBB_TASK_ISOLATION - #define __TBB_TASK_ISOLATION (__TBB_CPF_BUILD||TBB_PREVIEW_TASK_ISOLATION) + #define __TBB_TASK_ISOLATION 1 #endif /* __TBB_TASK_ISOLATION */ -#if TBB_PREVIEW_FLOW_GRAPH_TRACE -// Users of flow-graph trace need to explicitly link against the preview library. This -// prevents the linker from implicitly linking an application with a preview version of -// TBB and unexpectedly bringing in other community preview features, which might change -// the behavior of the application. -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#endif /* TBB_PREVIEW_FLOW_GRAPH_TRACE */ - -#ifndef __TBB_ITT_STRUCTURE_API -#define __TBB_ITT_STRUCTURE_API ( !__TBB_DEFINE_MIC && (__TBB_CPF_BUILD || TBB_PREVIEW_FLOW_GRAPH_TRACE) ) -#endif - #if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabled #endif @@ -534,6 +575,10 @@ There are four cases that are supported: #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enabled #endif +#if TBB_PREVIEW_NUMA_SUPPORT || __TBB_BUILD + #define __TBB_NUMA_SUPPORT 1 +#endif + #if TBB_PREVIEW_WAITING_FOR_WORKERS || __TBB_BUILD #define __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE 1 #endif @@ -548,14 +593,8 @@ There are four cases that are supported: #endif /* __TBB_SURVIVE_THREAD_SWITCH */ #ifndef __TBB_DEFAULT_PARTITIONER -#if TBB_DEPRECATED -/** Default partitioner for parallel loop templates in TBB 1.0-2.1 */ -#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner -#else -/** Default partitioner for parallel loop templates since TBB 2.2 */ #define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner -#endif /* TBB_DEPRECATED */ -#endif /* !defined(__TBB_DEFAULT_PARTITIONER */ +#endif #ifndef __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES #define __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES 1 @@ -577,8 +616,45 @@ There are four cases that are supported: #endif #endif -/** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and limit a possibility to load +// Intel C++ Compiler starts analyzing usages of the deprecated content at the template +// instantiation site, which is too late for suppression of the corresponding messages for internal +// stuff. +#if !defined(__INTEL_COMPILER) && (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) + #if (__cplusplus >= 201402L) + #define __TBB_DEPRECATED [[deprecated]] + #define __TBB_DEPRECATED_MSG(msg) [[deprecated(msg)]] + #elif _MSC_VER + #define __TBB_DEPRECATED __declspec(deprecated) + #define __TBB_DEPRECATED_MSG(msg) __declspec(deprecated(msg)) + #elif (__GNUC__ && __TBB_GCC_VERSION >= 40805) || __clang__ + #define __TBB_DEPRECATED __attribute__((deprecated)) + #define __TBB_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) + #endif +#endif // !defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + +#if !defined(__TBB_DEPRECATED) + #define __TBB_DEPRECATED + #define __TBB_DEPRECATED_MSG(msg) +#elif !defined(__TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES) + // Suppress deprecated messages from self + #define __TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES 1 +#endif + +#if defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) && (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + #define __TBB_DEPRECATED_VERBOSE __TBB_DEPRECATED + #define __TBB_DEPRECATED_VERBOSE_MSG(msg) __TBB_DEPRECATED_MSG(msg) +#else + #define __TBB_DEPRECATED_VERBOSE + #define __TBB_DEPRECATED_VERBOSE_MSG(msg) +#endif // (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + +#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !__TBB_CPP11_PRESENT + #pragma message("TBB Warning: Support for C++98/03 is deprecated. Please use the compiler that supports C++11 features at least.") +#endif + +/** __TBB_WIN8UI_SUPPORT enables support of Windows* Store Apps and limit a possibility to load shared libraries at run time only from application container **/ +// TODO: Separate this single macro into two for Windows 8 Store* (win8ui mode) and UWP/UWD modes. #if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP #define __TBB_WIN8UI_SUPPORT 1 #else @@ -627,7 +703,7 @@ There are four cases that are supported: #endif #if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 - /** That's a bug in Intel(R) C++ Compiler 11.1.044/IA-32 architecture/Windows* OS, that leads to a worker thread crash on the thread's startup. **/ + /** That's a bug in Intel C++ Compiler 11.1.044/IA-32 architecture/Windows* OS, that leads to a worker thread crash on the thread's startup. **/ #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 #endif @@ -658,12 +734,12 @@ There are four cases that are supported: #endif /* __FreeBSD__ */ #if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) - /** The Intel(R) C++ Compiler for IA-32 architecture (Linux* OS|macOS) crashes or generates + /** The Intel C++ Compiler for IA-32 architecture (Linux* OS|macOS) crashes or generates incorrect code when __asm__ arguments have a cast to volatile. **/ #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 #endif -#if !__INTEL_COMPILER && (_MSC_VER || __GNUC__==3 && __GNUC_MINOR__<=2) +#if !__INTEL_COMPILER && (_MSC_VER && _MSC_VER < 1700 || __GNUC__==3 && __GNUC_MINOR__<=2) /** Bug in GCC 3.2 and MSVC compilers that sometimes return 0 for __alignof(T) when T has not yet been instantiated. **/ #define __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN 1 @@ -686,11 +762,7 @@ There are four cases that are supported: #endif #if __INTEL_COMPILER==1300 && __TBB_GLIBCXX_VERSION>=40700 && defined(__GXX_EXPERIMENTAL_CXX0X__) -/* Some C++11 features used inside libstdc++ are not supported by Intel compiler. - * Checking version of gcc instead of libstdc++ because - * - they are directly connected, - * - for now it is not possible to check version of any standard library in this file - */ +/* Some C++11 features used inside libstdc++ are not supported by Intel C++ Compiler. */ #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 1 #endif @@ -703,7 +775,7 @@ There are four cases that are supported: #endif #endif -/*In a PIC mode some versions of GCC 4.1.2 generate incorrect inlined code for 8 byte __sync_val_compare_and_swap intrinsic */ +/* In a PIC mode some versions of GCC 4.1.2 generate incorrect inlined code for 8 byte __sync_val_compare_and_swap intrinsic */ #if __TBB_GCC_VERSION == 40102 && __PIC__ && !defined(__INTEL_COMPILER) && !defined(__clang__) #define __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN 1 #endif @@ -734,16 +806,23 @@ There are four cases that are supported: #define __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN (_MSC_VER && (__INTEL_COMPILER >= 1300 && __INTEL_COMPILER <= 1310) && !__INTEL_CXX11_MODE__) #define __TBB_CPP11_DECLVAL_BROKEN (_MSC_VER == 1600 || (__GNUC__ && __TBB_GCC_VERSION < 40500) ) - -// Intel C++ compiler has difficulties with copying std::pair with VC11 std::reference_wrapper being a const member +// Intel C++ Compiler has difficulties with copying std::pair with VC11 std::reference_wrapper being a const member #define __TBB_COPY_FROM_NON_CONST_REF_BROKEN (_MSC_VER == 1700 && __INTEL_COMPILER && __INTEL_COMPILER < 1600) // The implicit upcasting of the tuple of a reference of a derived class to a base class fails on icc 13.X if the system's gcc environment is 4.8 // Also in gcc 4.4 standard library the implementation of the tuple<&> conversion (tuple a = tuple, B is inherited from A) is broken. -#if __GXX_EXPERIMENTAL_CXX0X__ && ((__INTEL_COMPILER >=1300 && __INTEL_COMPILER <=1310 && __TBB_GLIBCXX_VERSION>=40700) || (__TBB_GLIBCXX_VERSION < 40500)) +#if __GXX_EXPERIMENTAL_CXX0X__ && __GLIBCXX__ && ((__INTEL_COMPILER >=1300 && __INTEL_COMPILER <=1310 && __TBB_GLIBCXX_VERSION>=40700) || (__TBB_GLIBCXX_VERSION < 40500)) #define __TBB_UPCAST_OF_TUPLE_OF_REF_BROKEN 1 #endif +// In some cases decltype of a function adds a reference to a return type. +#define __TBB_CPP11_DECLTYPE_OF_FUNCTION_RETURN_TYPE_BROKEN (_MSC_VER == 1600 && !__INTEL_COMPILER) + +// Visual Studio 2013 does not delete the copy constructor when a user-defined move constructor is provided +#if _MSC_VER && _MSC_VER <= 1800 + #define __TBB_IMPLICIT_COPY_DELETION_BROKEN 1 +#endif + /** End of __TBB_XXX_BROKEN macro section **/ #if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER) @@ -764,7 +843,7 @@ There are four cases that are supported: #define __TBB_VARIADIC_PARALLEL_INVOKE (TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) #define __TBB_FLOW_GRAPH_CPP11_FEATURES (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \ - && __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_AUTO_PRESENT) \ + && __TBB_CPP11_SMART_POINTERS_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_AUTO_PRESENT) \ && __TBB_CPP11_VARIADIC_TUPLE_PRESENT && __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT \ && !__TBB_UPCAST_OF_TUPLE_OF_REF_BROKEN #define __TBB_PREVIEW_STREAMING_NODE (__TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT && __TBB_FLOW_GRAPH_CPP11_FEATURES \ @@ -773,7 +852,22 @@ There are four cases that are supported: #define __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING (TBB_PREVIEW_FLOW_GRAPH_FEATURES || __TBB_PREVIEW_OPENCL_NODE) #define __TBB_PREVIEW_ASYNC_MSG (TBB_PREVIEW_FLOW_GRAPH_FEATURES && __TBB_FLOW_GRAPH_CPP11_FEATURES) -#define __TBB_PREVIEW_GFX_FACTORY (__TBB_GFX_PRESENT && TBB_PREVIEW_FLOW_GRAPH_FEATURES && !__TBB_MIC_OFFLOAD \ - && __TBB_FLOW_GRAPH_CPP11_FEATURES && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT \ - && __TBB_CPP11_FUTURE_PRESENT) + +#ifndef __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#define __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES TBB_PREVIEW_FLOW_GRAPH_FEATURES +#endif + +// This feature works only in combination with critical tasks (__TBB_PREVIEW_CRITICAL_TASKS) +#ifndef __TBB_PREVIEW_RESUMABLE_TASKS +#define __TBB_PREVIEW_RESUMABLE_TASKS ((__TBB_CPF_BUILD || TBB_PREVIEW_RESUMABLE_TASKS) && !__TBB_WIN8UI_SUPPORT && !__ANDROID__ && !__TBB_ipf) +#endif + +#ifndef __TBB_PREVIEW_CRITICAL_TASKS +#define __TBB_PREVIEW_CRITICAL_TASKS (__TBB_CPF_BUILD || __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES || __TBB_PREVIEW_RESUMABLE_TASKS) +#endif + +#ifndef __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +#define __TBB_PREVIEW_FLOW_GRAPH_NODE_SET (TBB_PREVIEW_FLOW_GRAPH_FEATURES && __TBB_CPP11_PRESENT && __TBB_FLOW_GRAPH_CPP11_FEATURES) +#endif + #endif /* __TBB_tbb_config_H */ diff --git a/inst/include/tbb/tbb_disable_exceptions.h b/inst/include/tbb/tbb_disable_exceptions.h index a1d28a60..69ef5c57 100644 --- a/inst/include/tbb/tbb_disable_exceptions.h +++ b/inst/include/tbb/tbb_disable_exceptions.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ //! To disable use of exceptions, include this header before any other header file from the library. diff --git a/inst/include/tbb/tbb_exception.h b/inst/include/tbb/tbb_exception.h index 1c843309..668ee564 100644 --- a/inst/include/tbb/tbb_exception.h +++ b/inst/include/tbb/tbb_exception.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,31 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_exception_H #define __TBB_exception_H -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif +#define __TBB_tbb_exception_H_include_area +#include "internal/_warning_suppress_enable_notice.h" +#include "tbb_stddef.h" #include -#include //required for bad_alloc definition, operators new +#include // required for bad_alloc definition, operators new #include // required to construct std exception classes -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - namespace tbb { //! Exception for concurrent containers @@ -49,7 +37,7 @@ class bad_last_alloc : public std::bad_alloc { }; //! Exception for PPL locks -class improper_lock : public std::exception { +class __TBB_DEPRECATED improper_lock : public std::exception { public: const char* what() const throw() __TBB_override; }; @@ -146,7 +134,7 @@ namespace tbb { TBB provides two implementations of this interface: tbb::captured_exception and template class tbb::movable_exception. See their declarations for more info. **/ -class tbb_exception : public std::exception +class __TBB_DEPRECATED tbb_exception : public std::exception { /** No operator new is provided because the TBB usage model assumes dynamic creation of the TBB exception objects only by means of applying move() @@ -164,19 +152,19 @@ class tbb_exception : public std::exception //! Creates and returns pointer to the deep copy of this exception object. /** Move semantics is allowed. **/ - virtual tbb_exception* move () throw() = 0; + virtual tbb_exception* move() throw() = 0; //! Destroys objects created by the move() method. /** Frees memory and calls destructor for this exception object. Can and must be used only on objects created by the move method. **/ - virtual void destroy () throw() = 0; + virtual void destroy() throw() = 0; //! Throws this exception object. /** Make sure that if you have several levels of derivation from this interface you implement or override this method on the most derived level. The implementation is as simple as "throw *this;". Failure to do this will result in exception of a base class type being thrown. **/ - virtual void throw_self () = 0; + virtual void throw_self() = 0; //! Returns RTTI name of the originally intercepted exception virtual const char* name() const throw() = 0; @@ -200,22 +188,22 @@ class tbb_exception : public std::exception algorithm ) if an unhandled exception was intercepted during the algorithm execution in one of the workers. \sa tbb::tbb_exception **/ -class captured_exception : public tbb_exception +class __TBB_DEPRECATED captured_exception : public tbb_exception { public: - captured_exception ( const captured_exception& src ) + captured_exception( const captured_exception& src ) : tbb_exception(src), my_dynamic(false) { set(src.my_exception_name, src.my_exception_info); } - captured_exception ( const char* name_, const char* info ) + captured_exception( const char* name_, const char* info ) : my_dynamic(false) { set(name_, info); } - __TBB_EXPORTED_METHOD ~captured_exception () throw(); + __TBB_EXPORTED_METHOD ~captured_exception() throw(); captured_exception& operator= ( const captured_exception& src ) { if ( this != &src ) { @@ -225,25 +213,25 @@ class captured_exception : public tbb_exception return *this; } - captured_exception* __TBB_EXPORTED_METHOD move () throw() __TBB_override; + captured_exception* __TBB_EXPORTED_METHOD move() throw() __TBB_override; - void __TBB_EXPORTED_METHOD destroy () throw() __TBB_override; + void __TBB_EXPORTED_METHOD destroy() throw() __TBB_override; - void throw_self () __TBB_override { __TBB_THROW(*this); } + void throw_self() __TBB_override { __TBB_THROW(*this); } const char* __TBB_EXPORTED_METHOD name() const throw() __TBB_override; const char* __TBB_EXPORTED_METHOD what() const throw() __TBB_override; - void __TBB_EXPORTED_METHOD set ( const char* name, const char* info ) throw(); - void __TBB_EXPORTED_METHOD clear () throw(); + void __TBB_EXPORTED_METHOD set( const char* name, const char* info ) throw(); + void __TBB_EXPORTED_METHOD clear() throw(); private: - //! Used only by method clone(). - captured_exception() {} + //! Used only by method move(). + captured_exception() : my_dynamic(), my_exception_name(), my_exception_info() {} - //! Functionally equivalent to {captured_exception e(name,info); return e.clone();} - static captured_exception* allocate ( const char* name, const char* info ); + //! Functionally equivalent to {captured_exception e(name,info); return e.move();} + static captured_exception* allocate( const char* name, const char* info ); bool my_dynamic; const char* my_exception_name; @@ -256,12 +244,12 @@ class captured_exception : public tbb_exception and delivered to the root thread (). \sa tbb::tbb_exception **/ template -class movable_exception : public tbb_exception +class __TBB_DEPRECATED movable_exception : public tbb_exception { typedef movable_exception self_type; public: - movable_exception ( const ExceptionData& data_ ) + movable_exception( const ExceptionData& data_ ) : my_exception_data(data_) , my_dynamic(false) , my_exception_name( @@ -273,14 +261,14 @@ class movable_exception : public tbb_exception ) {} - movable_exception ( const movable_exception& src ) throw () + movable_exception( const movable_exception& src ) throw () : tbb_exception(src) , my_exception_data(src.my_exception_data) , my_dynamic(false) , my_exception_name(src.my_exception_name) {} - ~movable_exception () throw() {} + ~movable_exception() throw() {} const movable_exception& operator= ( const movable_exception& src ) { if ( this != &src ) { @@ -290,15 +278,15 @@ class movable_exception : public tbb_exception return *this; } - ExceptionData& data () throw() { return my_exception_data; } + ExceptionData& data() throw() { return my_exception_data; } - const ExceptionData& data () const throw() { return my_exception_data; } + const ExceptionData& data() const throw() { return my_exception_data; } - const char* name () const throw() __TBB_override { return my_exception_name; } + const char* name() const throw() __TBB_override { return my_exception_name; } - const char* what () const throw() __TBB_override { return "tbb::movable_exception"; } + const char* what() const throw() __TBB_override { return "tbb::movable_exception"; } - movable_exception* move () throw() __TBB_override { + movable_exception* move() throw() __TBB_override { void* e = internal::allocate_via_handler_v3(sizeof(movable_exception)); if ( e ) { ::new (e) movable_exception(*this); @@ -306,14 +294,14 @@ class movable_exception : public tbb_exception } return (movable_exception*)e; } - void destroy () throw() __TBB_override { + void destroy() throw() __TBB_override { __TBB_ASSERT ( my_dynamic, "Method destroy can be called only on dynamically allocated movable_exceptions" ); if ( my_dynamic ) { this->~movable_exception(); internal::deallocate_via_handler_v3(this); } } - void throw_self () __TBB_override { __TBB_THROW( *this ); } + void throw_self() __TBB_override { __TBB_THROW( *this ); } protected: //! User data @@ -333,26 +321,26 @@ namespace internal { //! Exception container that preserves the exact copy of the original exception /** This class can be used only when the appropriate runtime support (mandated - by C++0x) is present **/ + by C++11) is present **/ class tbb_exception_ptr { std::exception_ptr my_ptr; public: - static tbb_exception_ptr* allocate (); - static tbb_exception_ptr* allocate ( const tbb_exception& tag ); + static tbb_exception_ptr* allocate(); + static tbb_exception_ptr* allocate( const tbb_exception& tag ); //! This overload uses move semantics (i.e. it empties src) - static tbb_exception_ptr* allocate ( captured_exception& src ); + static tbb_exception_ptr* allocate( captured_exception& src ); //! Destroys this objects /** Note that objects of this type can be created only by the allocate() method. **/ - void destroy () throw(); + void destroy() throw(); //! Throws the contained exception . - void throw_self () { std::rethrow_exception(my_ptr); } + void throw_self() { std::rethrow_exception(my_ptr); } private: - tbb_exception_ptr ( const std::exception_ptr& src ) : my_ptr(src) {} - tbb_exception_ptr ( const captured_exception& src ) : + tbb_exception_ptr( const std::exception_ptr& src ) : my_ptr(src) {} + tbb_exception_ptr( const captured_exception& src ) : #if __TBB_MAKE_EXCEPTION_PTR_PRESENT my_ptr(std::make_exception_ptr(src)) // the final function name in C++11 #else @@ -368,4 +356,7 @@ class tbb_exception_ptr { #endif /* __TBB_TASK_GROUP_CONTEXT */ +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_exception_H_include_area + #endif /* __TBB_exception_H */ diff --git a/inst/include/tbb/tbb_machine.h b/inst/include/tbb/tbb_machine.h index 68d1d5d0..9752be58 100644 --- a/inst/include/tbb/tbb_machine.h +++ b/inst/include/tbb/tbb_machine.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_machine_H @@ -227,7 +223,7 @@ template<> struct atomic_selector<8> { #include "machine/linux_intel64.h" #endif -#elif __linux__ || __FreeBSD__ || __NetBSD__ +#elif __linux__ || __FreeBSD__ || __NetBSD__ || __OpenBSD__ #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) #include "machine/gcc_generic.h" @@ -241,8 +237,8 @@ template<> struct atomic_selector<8> { #include "machine/linux_ia64.h" #elif __powerpc__ #include "machine/mac_ppc.h" - #elif __ARM_ARCH_7A__ - #include "machine/gcc_armv7.h" + #elif __ARM_ARCH_7A__ || __aarch64__ + #include "machine/gcc_arm.h" #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT #include "machine/gcc_generic.h" #endif @@ -663,7 +659,15 @@ struct machine_load_store_seq_cst { return __TBB_machine_cmpswp8( (volatile void*)const_cast(&location), anyvalue, anyvalue ); } static void store ( volatile T &location, T value ) { +#if __TBB_GCC_VERSION >= 40702 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + // An atomic initialization leads to reading of uninitialized memory int64_t result = (volatile int64_t&)location; +#if __TBB_GCC_VERSION >= 40702 +#pragma GCC diagnostic pop +#endif while ( __TBB_machine_cmpswp8((volatile void*)&location, (int64_t)value, result) != result ) result = (volatile int64_t&)location; } @@ -780,7 +784,7 @@ struct __TBB_machine_type_with_alignment_##PowerOf2 { \ #endif /* Now declare types aligned to useful powers of two */ -// TODO: Is __TBB_DefineTypeWithAlignment(8) needed on 32 bit platforms? +__TBB_DefineTypeWithAlignment(8) // i386 ABI says that uint64_t is aligned on 4 bytes __TBB_DefineTypeWithAlignment(16) __TBB_DefineTypeWithAlignment(32) __TBB_DefineTypeWithAlignment(64) @@ -794,7 +798,7 @@ template struct type_with_alignment; template<> struct type_with_alignment<1> { char member; }; template<> struct type_with_alignment<2> { uint16_t member; }; template<> struct type_with_alignment<4> { uint32_t member; }; -template<> struct type_with_alignment<8> { uint64_t member; }; +template<> struct type_with_alignment<8> { __TBB_machine_type_with_alignment_8 member; }; template<> struct type_with_alignment<16> {__TBB_machine_type_with_alignment_16 member; }; template<> struct type_with_alignment<32> {__TBB_machine_type_with_alignment_32 member; }; template<> struct type_with_alignment<64> {__TBB_machine_type_with_alignment_64 member; }; diff --git a/inst/include/tbb/tbb_profiling.h b/inst/include/tbb/tbb_profiling.h index ffaf98b9..20f8f512 100644 --- a/inst/include/tbb/tbb_profiling.h +++ b/inst/include/tbb/tbb_profiling.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,22 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_profiling_H #define __TBB_profiling_H +#define __TBB_tbb_profiling_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + namespace tbb { namespace internal { - // - // This is not under __TBB_ITT_STRUCTURE_API because these values are used directly in flow_graph.h. - // - // include list of index names #define TBB_STRING_RESOURCE(index_name,str) index_name, enum string_index { @@ -126,6 +121,7 @@ namespace tbb { #endif /* no tools support */ #include "atomic.h" + // Need these to work regardless of tools support namespace tbb { namespace internal { @@ -138,13 +134,14 @@ namespace tbb { void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3(void *dst, void *src); void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3(const void *src); void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src ); -#if __TBB_ITT_STRUCTURE_API - enum itt_domain_enum { ITT_DOMAIN_FLOW=0 }; + enum itt_domain_enum { ITT_DOMAIN_FLOW=0, ITT_DOMAIN_MAIN=1, ITT_DOMAIN_ALGO=2, ITT_NUM_DOMAINS }; void __TBB_EXPORTED_FUNC itt_make_task_group_v7( itt_domain_enum domain, void *group, unsigned long long group_extra, void *parent, unsigned long long parent_extra, string_index name_index ); void __TBB_EXPORTED_FUNC itt_metadata_str_add_v7( itt_domain_enum domain, void *addr, unsigned long long addr_extra, string_index key, const char *value ); + void __TBB_EXPORTED_FUNC itt_metadata_ptr_add_v11( itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_index key, void* value ); void __TBB_EXPORTED_FUNC itt_relation_add_v7( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, itt_relation relation, void *addr1, unsigned long long addr1_extra ); void __TBB_EXPORTED_FUNC itt_task_begin_v7( itt_domain_enum domain, void *task, unsigned long long task_extra, @@ -154,7 +151,6 @@ namespace tbb { void __TBB_EXPORTED_FUNC itt_region_begin_v9( itt_domain_enum domain, void *region, unsigned long long region_extra, void *parent, unsigned long long parent_extra, string_index name_index ); void __TBB_EXPORTED_FUNC itt_region_end_v9( itt_domain_enum domain, void *region, unsigned long long region_extra ); -#endif // __TBB_ITT_STRUCTURE_API // two template arguments are to workaround /Wp64 warning with tbb::atomic specialized for unsigned type template @@ -238,12 +234,6 @@ namespace tbb { call_itt_notify_v5((int)t, ptr); } -#else - inline void call_itt_notify(notify_type /*t*/, void * /*ptr*/) {} - -#endif // TBB_USE_THREADING_TOOLS - -#if __TBB_ITT_STRUCTURE_API inline void itt_make_task_group( itt_domain_enum domain, void *group, unsigned long long group_extra, void *parent, unsigned long long parent_extra, string_index name_index ) { itt_make_task_group_v7( domain, group, group_extra, parent, parent_extra, name_index ); @@ -253,6 +243,11 @@ namespace tbb { string_index key, const char *value ) { itt_metadata_str_add_v7( domain, addr, addr_extra, key, value ); } + + inline void register_node_addr(itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_index key, void *value) { + itt_metadata_ptr_add_v11(domain, addr, addr_extra, key, value); + } inline void itt_relation_add( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, itt_relation relation, void *addr1, unsigned long long addr1_extra ) { @@ -276,9 +271,85 @@ namespace tbb { inline void itt_region_end( itt_domain_enum domain, void *region, unsigned long long region_extra ) { itt_region_end_v9( domain, region, region_extra ); } -#endif // __TBB_ITT_STRUCTURE_API +#else + inline void register_node_addr( itt_domain_enum /*domain*/, void* /*addr*/, unsigned long long /*addr_extra*/, string_index /*key*/, void* /*value*/ ) {} + inline void call_itt_notify(notify_type /*t*/, void* /*ptr*/) {} + + inline void itt_make_task_group( itt_domain_enum /*domain*/, void* /*group*/, unsigned long long /*group_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_index /*name_index*/ ) {} + + inline void itt_metadata_str_add( itt_domain_enum /*domain*/, void* /*addr*/, unsigned long long /*addr_extra*/, + string_index /*key*/, const char* /*value*/ ) {} + + inline void itt_relation_add( itt_domain_enum /*domain*/, void* /*addr0*/, unsigned long long /*addr0_extra*/, + itt_relation /*relation*/, void* /*addr1*/, unsigned long long /*addr1_extra*/ ) {} + + inline void itt_task_begin( itt_domain_enum /*domain*/, void* /*task*/, unsigned long long /*task_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_index /*name_index*/ ) {} + + inline void itt_task_end( itt_domain_enum /*domain*/ ) {} + + inline void itt_region_begin( itt_domain_enum /*domain*/, void* /*region*/, unsigned long long /*region_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_index /*name_index*/ ) {} + + inline void itt_region_end( itt_domain_enum /*domain*/, void* /*region*/, unsigned long long /*region_extra*/ ) {} +#endif // TBB_USE_THREADING_TOOLS } // namespace internal } // namespace tbb +#if TBB_PREVIEW_FLOW_GRAPH_TRACE +#include + +namespace tbb { +namespace profiling { +namespace interface10 { + +#if TBB_USE_THREADING_TOOLS && !(TBB_USE_THREADING_TOOLS == 2) +class event { +/** This class supports user event traces through itt. + Common use-case is tagging data flow graph tasks (data-id) + and visualization by Intel Advisor Flow Graph Analyzer (FGA) **/ +// TODO: Replace implementation by itt user event api. + + const std::string my_name; + + static void emit_trace(const std::string &input) { + itt_metadata_str_add( tbb::internal::ITT_DOMAIN_FLOW, NULL, tbb::internal::FLOW_NULL, tbb::internal::USER_EVENT, ( "FGA::DATAID::" + input ).c_str() ); + } + +public: + event(const std::string &input) + : my_name( input ) + { } + + void emit() { + emit_trace(my_name); + } + + static void emit(const std::string &description) { + emit_trace(description); + } + +}; +#else // TBB_USE_THREADING_TOOLS && !(TBB_USE_THREADING_TOOLS == 2) +// Using empty struct if user event tracing is disabled: +struct event { + event(const std::string &) { } + + void emit() { } + + static void emit(const std::string &) { } +}; +#endif // TBB_USE_THREADING_TOOLS && !(TBB_USE_THREADING_TOOLS == 2) + +} // interfaceX +using interface10::event; +} // namespace profiling +} // namespace tbb +#endif // TBB_PREVIEW_FLOW_GRAPH_TRACE + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_profiling_H_include_area + #endif /* __TBB_profiling_H */ diff --git a/inst/include/tbb/tbb_stddef.h b/inst/include/tbb/tbb_stddef.h index 236f3d83..1523a551 100644 --- a/inst/include/tbb/tbb_stddef.h +++ b/inst/include/tbb/tbb_stddef.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,21 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_tbb_stddef_H #define __TBB_tbb_stddef_H // Marketing-driven product version -#define TBB_VERSION_MAJOR 2017 -#define TBB_VERSION_MINOR 0 +#define TBB_VERSION_MAJOR 2020 +#define TBB_VERSION_MINOR 1 // Engineering-focused interface version -#define TBB_INTERFACE_VERSION 9107 +#define TBB_INTERFACE_VERSION 11101 #define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 // The oldest major interface version still supported @@ -155,8 +151,8 @@ namespace tbb { #if TBB_USE_ASSERT - //! Assert that x is true. - /** If x is false, print assertion failure message. + //! Assert that predicate is true. + /** If predicate is false, print assertion failure message. If the comment argument is not NULL, it is printed as part of the failure message. The comment argument has no other effect. */ #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_RELEASE(predicate,message) @@ -246,6 +242,14 @@ const size_t NFS_MaxLineSize = 128; #define __TBB_override // formal comment only #endif +#if __TBB_CPP17_FALLTHROUGH_PRESENT +#define __TBB_fallthrough [[fallthrough]] +#elif __TBB_FALLTHROUGH_PRESENT +#define __TBB_fallthrough __attribute__ ((fallthrough)) +#else +#define __TBB_fallthrough +#endif + template struct padded_base : T { char pad[S - R]; @@ -278,7 +282,7 @@ void __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info ); inline bool __TBB_false() { return false; } #define __TBB_TRY #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() ) - #define __TBB_THROW(e) ((void)0) + #define __TBB_THROW(e) tbb::internal::suppress_unused_warning(e) #define __TBB_RETHROW() ((void)0) #endif /* !TBB_USE_EXCEPTIONS */ @@ -312,6 +316,25 @@ inline T punned_cast( U* ptr ) { return reinterpret_cast(x); } +#if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT + +//! Base class for types that should not be assigned. +class no_assign { +public: + void operator=( const no_assign& ) = delete; + no_assign( const no_assign& ) = default; + no_assign() = default; +}; + +//! Base class for types that should not be copied or assigned. +class no_copy: no_assign { +public: + no_copy( const no_copy& ) = delete; + no_copy() = default; +}; + +#else /*__TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT*/ + //! Base class for types that should not be assigned. class no_assign { // Deny assignment @@ -332,6 +355,8 @@ class no_copy: no_assign { no_copy() {} }; +#endif /*__TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT*/ + #if TBB_DEPRECATED_MUTEX_COPYING class mutex_copy_deprecated_and_disabled {}; #else @@ -423,17 +448,24 @@ class proportional_split: internal::no_assign { // Following is a set of classes and functions typically used in compile-time "metaprogramming". // TODO: move all that to a separate header -#if __TBB_ALLOCATOR_TRAITS_PRESENT -#include //for allocator_traits +#if __TBB_CPP11_SMART_POINTERS_PRESENT +#include // for unique_ptr #endif -#if __TBB_CPP11_RVALUE_REF_PRESENT || _LIBCPP_VERSION -#include // for std::move +#if __TBB_CPP11_RVALUE_REF_PRESENT || __TBB_CPP11_DECLTYPE_PRESENT || _LIBCPP_VERSION +#include // for std::move, std::forward, std::declval #endif namespace tbb { namespace internal { +#if __TBB_CPP11_SMART_POINTERS_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + std::unique_ptr make_unique(Args&&... args) { + return std::unique_ptr(new T(std::forward(args)...)); + } +#endif + //! Class for determining type of std::allocator::value_type. template struct allocator_type { @@ -457,15 +489,6 @@ struct bool_constant { typedef bool_constant true_type; typedef bool_constant false_type; -#if __TBB_ALLOCATOR_TRAITS_PRESENT -using std::allocator_traits; -#else -template -struct allocator_traits{ - typedef tbb::internal::false_type propagate_on_container_move_assignment; -}; -#endif - //! A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size. template struct select_size_t_constant { @@ -526,7 +549,7 @@ struct STATIC_ASSERTION_FAILED; //intentionally left undefined to cause co //! @endcond }} // namespace tbb::internal -#if __TBB_STATIC_ASSERT_PRESENT +#if __TBB_STATIC_ASSERT_PRESENT #define __TBB_STATIC_ASSERT(condition,msg) static_assert(condition,msg) #else //please note condition is intentionally inverted to get a bit more understandable error msg diff --git a/inst/include/tbb/tbb_thread.h b/inst/include/tbb/tbb_thread.h index d1cafd65..17e87d96 100644 --- a/inst/include/tbb/tbb_thread.h +++ b/inst/include/tbb/tbb_thread.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,15 +12,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +*/ +#include "internal/_deprecated_header_message_guard.h" +#if !defined(__TBB_show_deprecation_message_tbb_thread_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_tbb_thread_H +#pragma message("TBB Warning: tbb/tbb_thread.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif - -*/ +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif #ifndef __TBB_tbb_thread_H #define __TBB_tbb_thread_H +#define __TBB_tbb_thread_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + #include "tbb_stddef.h" #if _WIN32||_WIN64 @@ -47,19 +57,9 @@ namespace tbb { namespace internal { #include "internal/_tbb_hash_compare_impl.h" #include "tick_count.h" -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - #include __TBB_STD_SWAP_HEADER #include -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - namespace tbb { namespace internal { @@ -256,7 +256,7 @@ namespace internal { friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); friend inline size_t tbb_hasher( const tbb_thread_v3::id& id ) { - __TBB_STATIC_ASSERT(sizeof(id.my_id) <= sizeof(size_t), "Implementaion assumes that thread_id_type fits into machine word"); + __TBB_STATIC_ASSERT(sizeof(id.my_id) <= sizeof(size_t), "Implementation assumes that thread_id_type fits into machine word"); return tbb::tbb_hasher(id.my_id); } @@ -307,7 +307,7 @@ namespace internal { } // namespace internal; //! Users reference thread class by name tbb_thread -typedef internal::tbb_thread_v3 tbb_thread; +__TBB_DEPRECATED_VERBOSE_MSG("tbb::thread is deprecated, use std::thread") typedef internal::tbb_thread_v3 tbb_thread; using internal::operator==; using internal::operator!=; @@ -328,15 +328,18 @@ inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __ } namespace this_tbb_thread { - inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); } + __TBB_DEPRECATED_VERBOSE inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); } //! Offers the operating system the opportunity to schedule another thread. - inline void yield() { internal::thread_yield_v3(); } + __TBB_DEPRECATED_VERBOSE inline void yield() { internal::thread_yield_v3(); } //! The current thread blocks at least until the time specified. - inline void sleep(const tick_count::interval_t &i) { + __TBB_DEPRECATED_VERBOSE inline void sleep(const tick_count::interval_t &i) { internal::thread_sleep_v3(i); } } // namespace this_tbb_thread } // namespace tbb +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_thread_H_include_area + #endif /* __TBB_tbb_thread_H */ diff --git a/inst/include/tbb/tbbmalloc_proxy.h b/inst/include/tbb/tbbmalloc_proxy.h index 76cbd6d7..28f6a405 100644 --- a/inst/include/tbb/tbbmalloc_proxy.h +++ b/inst/include/tbb/tbbmalloc_proxy.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ /* @@ -63,4 +59,7 @@ struct __TBB_malloc_proxy_caller { #endif // _MSC_VER +/* Public Windows API */ +extern "C" int TBB_malloc_replacement_log(char *** function_replacement_log_ptr); + #endif //__TBB_tbbmalloc_proxy_H diff --git a/inst/include/tbb/tick_count.h b/inst/include/tbb/tick_count.h index a7f4e0f3..bbc92476 100644 --- a/inst/include/tbb/tick_count.h +++ b/inst/include/tbb/tick_count.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2017 Intel Corporation + Copyright (c) 2005-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,10 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - - */ #ifndef __TBB_tick_count_H diff --git a/src/tbb/.gitattributes b/src/tbb/.gitattributes new file mode 100644 index 00000000..039edb3a --- /dev/null +++ b/src/tbb/.gitattributes @@ -0,0 +1,45 @@ +# Set the default behavior, in case people don't have core.autocrlf set. +* text=auto + +# Explicitly declare text files you want to always be normalized and converted +# to native line endings on checkout. +*.c text +*.h text +*.cpp text +*.def text +*.rc text +*.i text +*.sh text +*.csh text +*.mk text +*.java text +*.csv text +*.lst text +*.asm text +*.cfg text +*.css text +*.inc text +*.js text +*.rb text +*.strings text +*.txt text +*export.lst text +*.xml text +*.py text +*.md text +*.classpath text +*.cproject text +*.project text +*.properties text +*.java text +*.gradle text + +# Declare files that will always have CRLF line endings on checkout. +*.sln text eol=crlf +*.bat text eol=crlf + +# Denote all files that are truly binary and should not be modified. +*.png binary +*.jpg binary +*.ico binary +*.spir binary diff --git a/src/tbb/.gitignore b/src/tbb/.gitignore new file mode 100644 index 00000000..9b2411ec --- /dev/null +++ b/src/tbb/.gitignore @@ -0,0 +1,88 @@ +# Ignore the debug and release directories created with Makefile builds # +######################################################################### +build/*_debug/ +build/*_release/ + +# Compiled source # +################### +*.com +*.class +*.dll +*.lib +*.pdb +*.exe +*.o +*.so +*.so.1 +*.so.2 +*.dylib +*.a +*.obj +*.pyc + +*.orig +*.raw +*.sample +*.slo +*.swp +*.config +*.la +*.lai +*.lo +*.nhdr +*.nii.gz +*.nrrd + +# Packages # +############ +# it's better to unpack these files and commit the raw source +# git has its own built in compression methods +*.7z +*.dmg +*.gz +*.iso +*.jar +*.rar +*.tar +*.tgz +*.zip + +# Logs and databases # +###################### +*.log +*.sql +*.sqlite + +# OS generated files # +###################### +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# IDE generated files # +###################### +/.ninja_deps +/.ninja_log +/build.ninja +/rules.ninja +*~ +.emacs.desktop +.tags + +# Build system generated files # +################################ +CMakeCache.txt +CMakeFiles/ + +# Other # +######### +.clang_complete +.idea +.svn +crash* +*.tmp +/.vs diff --git a/src/tbb/CHANGES b/src/tbb/CHANGES index aa7f126f..eb40d4d7 100644 --- a/src/tbb/CHANGES +++ b/src/tbb/CHANGES @@ -2,6 +2,437 @@ The list of most significant changes made over time in Intel(R) Threading Building Blocks (Intel(R) TBB). +Intel TBB 2020 Update 1 +TBB_INTERFACE_VERSION == 11101 + +Changes (w.r.t. Intel TBB 2020): + +Preview features: + +- The NUMA support library (tbbbind) no more depends on the main + TBB library. + +Bugs fixed: + +- Fixed the issue of task_arena constraints not propagated on + copy construction. +- Fixed TBBGet.cmake script broken by TBB package name changes + (https://github.com/intel/tbb/issues/209). + +------------------------------------------------------------------------ +Intel TBB 2020 +TBB_INTERFACE_VERSION == 11100 + +Changes (w.r.t. Intel TBB 2019 Update 9): + +- Extended task_arena interface to simplify development of NUMA-aware + applications. +- Added warning notifications when the deprecated functionality is used. + +Open-source contributions integrated: + +- Fixed various build warnings + (https://github.com/intel/tbb/pull/179) by Raf Schietekat. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 9 +TBB_INTERFACE_VERSION == 11009 + +Changes (w.r.t. Intel TBB 2019 Update 8): + +- Multiple APIs are deprecated. For details, please see + Deprecated Features appendix in the TBB reference manual. +- Added C++17 deduction guides for flow graph nodes. + +Preview Features: + +- Added isolated_task_group class that allows multiple threads to add + and execute tasks sharing the same isolation. +- Extended the flow graph API to simplify connecting nodes. +- Added erase() by heterogeneous keys for concurrent ordered containers. +- Added a possibility to suspend task execution at a specific point + and resume it later. + +Bugs fixed: + +- Fixed the emplace() method of concurrent unordered containers to + destroy a temporary element that was not inserted. +- Fixed a bug in the merge() method of concurrent unordered + containers. +- Fixed behavior of a continue_node that follows buffering nodes. +- Fixed compilation error caused by missed stdlib.h when CMake + integration is used (https://github.com/intel/tbb/issues/195). + Inspired by Andrew Penkrat. + +Open-source contributions integrated: + +- Added support for move-only types to tbb::parallel_pipeline + (https://github.com/intel/tbb/pull/159) by Raf Schietekat. +- Fixed detection of clang version when CUDA toolkit is installed + (https://github.com/intel/tbb/pull/150) by Guilherme Amadio. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 8 +TBB_INTERFACE_VERSION == 11008 + +Changes (w.r.t. Intel TBB 2019 Update 7): + +Bugs fixed: + +- Fixed a bug in TBB 2019 Update 7 that could lead to incorrect memory + reallocation on Linux (https://github.com/intel/tbb/issues/148). +- Fixed enqueuing tbb::task into tbb::task_arena not to fail on threads + with no task scheduler initialized + (https://github.com/intel/tbb/issues/116). + +------------------------------------------------------------------------ +Intel TBB 2019 Update 7 +TBB_INTERFACE_VERSION == 11007 + +Changes (w.r.t. Intel TBB 2019 Update 6): + +- Added TBBMALLOC_SET_HUGE_SIZE_THRESHOLD parameter to set the lower + bound for allocations that are not released back to OS unless + a cleanup is explicitly requested. +- Added zip_iterator::base() method to get the tuple of underlying + iterators. +- Improved async_node to never block a thread that sends a message + through its gateway. +- Extended decrement port of the tbb::flow::limiter_node to accept + messages of integral types. +- Added support of Windows* to the CMake module TBBInstallConfig. +- Added packaging of CMake configuration files to TBB packages built + using build/build.py script + (https://github.com/intel/tbb/issues/141). + +Changes affecting backward compatibility: + +- Removed the number_of_decrement_predecessors parameter from the + constructor of flow::limiter_node. To allow its usage, set + TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR macro to 1. + +Preview Features: + +- Added ordered associative containers: + concurrent_{map,multimap,set,multiset} (requires C++11). + +Open-source contributions integrated: + +- Fixed makefiles to properly obtain the GCC version for GCC 7 + and later (https://github.com/intel/tbb/pull/147) by Timmmm. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 6 +TBB_INTERFACE_VERSION == 11006 + +Changes (w.r.t. Intel TBB 2019 Update 5): + +- Added support for Microsoft* Visual Studio* 2019. +- Added support for enqueuing tbb::task into tbb::task_arena + (https://github.com/01org/tbb/issues/116). +- Improved support for allocator propagation on concurrent_hash_map + assigning and swapping. +- Improved scalable_allocation_command cleanup operations to release + more memory buffered by the calling thread. +- Separated allocation of small and large objects into distinct memory + regions, which helps to reduce excessive memory caching inside the + TBB allocator. + +Preview Features: + +- Removed template class gfx_factory from the flow graph API. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 5 +TBB_INTERFACE_VERSION == 11005 + +Changes (w.r.t. Intel TBB 2019 Update 4): + +- Associating a task_scheduler_observer with an implicit or explicit + task arena is now a fully supported feature. +- Added a CMake module TBBInstallConfig that allows to generate and + install CMake configuration files for TBB packages. + Inspired by Hans Johnson (https://github.com/01org/tbb/pull/119). +- Added node handles, methods merge() and unsafe_extract() to concurrent + unordered containers. +- Added constructors with Compare argument to concurrent_priority_queue + (https://github.com/01org/tbb/issues/109). +- Controlling the stack size of worker threads is now supported for + Universal Windows Platform. +- Improved tbb::zip_iterator to work with algorithms that swap values + via iterators. +- Improved support for user-specified allocators in concurrent_hash_map, + including construction of allocator-aware data types. +- For ReaderWriterMutex types, upgrades and downgrades now succeed if + the mutex is already in the requested state. + Inspired by Niadb (https://github.com/01org/tbb/pull/122). + +Preview Features: + +- The task_scheduler_observer::may_sleep() method has been removed. + +Bugs fixed: + +- Fixed the issue with a pipeline parallel filter executing serially if + it follows a thread-bound filter. +- Fixed a performance regression observed when multiple parallel + algorithms start simultaneously. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 4 +TBB_INTERFACE_VERSION == 11004 + +Changes (w.r.t. Intel TBB 2019 Update 3): + +- global_control class is now a fully supported feature. +- Added deduction guides for tbb containers: concurrent_hash_map, + concurrent_unordered_map, concurrent_unordered_set. +- Added tbb::scalable_memory_resource function returning + std::pmr::memory_resource interface to the TBB memory allocator. +- Added tbb::cache_aligned_resource class that implements + std::pmr::memory_resource with cache alignment and no false sharing. +- Added rml::pool_msize function returning the usable size of a memory + block allocated from a given memory pool. +- Added default and copy constructors for tbb::counting_iterator + and tbb::zip_iterator. +- Added TBB_malloc_replacement_log function to obtain the status of + dynamic memory allocation replacement (Windows* only). +- CMake configuration file now supports release-only and debug-only + configurations (https://github.com/01org/tbb/issues/113). +- TBBBuild CMake module takes the C++ version from CMAKE_CXX_STANDARD. + +Bugs fixed: + +- Fixed compilation for tbb::concurrent_vector when used with + std::pmr::polymorphic_allocator. + +Open-source contributions integrated: + +- TBB_INTERFACE_VERSION is included into TBB version in CMake + configuration (https://github.com/01org/tbb/pull/100) + by Hans Johnson. +- Fixed detection of C++17 deduction guides for Visual C++* + (https://github.com/01org/tbb/pull/112) by Marian Klymov. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 3 +TBB_INTERFACE_VERSION == 11003 + +Changes (w.r.t. Intel TBB 2019 Update 2): + +- Added tbb::transform_iterator. +- Added new Makefile target 'profile' to flow graph examples enabling + additional support for Intel(R) Parallel Studio XE tools. +- Added TBB_MALLOC_DISABLE_REPLACEMENT environment variable to switch off + dynamic memory allocation replacement on Windows*. Inspired by + a contribution from Edward Lam. + +Preview Features: + +- Extended flow graph API to support relative priorities for functional + nodes, specified as an optional parameter to the node constructors. + +Open-source contributions integrated: + +- Enabled using process-local futex operations + (https://github.com/01org/tbb/pull/58) by Andrey Semashev. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 2 +TBB_INTERFACE_VERSION == 11002 + +Changes (w.r.t. Intel TBB 2019 Update 1): + +- Added overloads for parallel_reduce with default partitioner and + user-supplied context. +- Added deduction guides for tbb containers: concurrent_vector, + concurrent_queue, concurrent_bounded_queue, + concurrent_priority_queue. +- Reallocation of memory objects >1MB now copies and frees memory if + the size is decreased twice or more, trading performance off for + reduced memory usage. +- After a period of sleep, TBB worker threads now prefer returning to + their last used task arena. + +Bugs fixed: + +- Fixed compilation of task_group.h when targeting macOS* 10.11 or + earlier (https://github.com/conda-forge/tbb-feedstock/issues/42). + +Open-source contributions integrated: + +- Added constructors with HashCompare argument to concurrent_hash_map + (https://github.com/01org/tbb/pull/63) by arewedancer. + +------------------------------------------------------------------------ +Intel TBB 2019 Update 1 +TBB_INTERFACE_VERSION == 11001 + +Changes (w.r.t. Intel TBB 2019): + +- Doxygen documentation could be built with 'make doxygen' command now. + +Changes affecting backward compatibility: + +- Enforced 8 byte alignment for tbb::atomic and + tbb::atomic. On IA-32 architecture it may cause layout + changes in structures that use these types. + +Bugs fixed: + +- Fixed an issue with dynamic memory allocation replacement on Windows* + occurred for some versions of ucrtbase.dll. +- Fixed possible deadlock in tbbmalloc cleanup procedure during process + shutdown. Inspired by a contribution from Edward Lam. +- Fixed usage of std::uncaught_exception() deprecated in C++17 + (https://github.com/01org/tbb/issues/67). +- Fixed a crash when a local observer is activated after an arena + observer. +- Fixed compilation of task_group.h by Visual C++* 15.7 with + /permissive- option (https://github.com/01org/tbb/issues/53). +- Fixed tbb4py to avoid dependency on Intel(R) C++ Compiler shared + libraries. +- Fixed compilation for Anaconda environment with GCC 7.3 and higher. + +Open-source contributions integrated: + +- Fix various warnings when building with Visual C++ + (https://github.com/01org/tbb/pull/70) by Edward Lam. + +------------------------------------------------------------------------ +Intel TBB 2019 +TBB_INTERFACE_VERSION == 11000 + +Changes (w.r.t. Intel TBB 2018 Update 5): + +- Lightweight policy for functional nodes in the flow graph is now + a fully supported feature. +- Reservation support in flow::write_once_node and flow::overwrite_node + is now a fully supported feature. +- Support for Flow Graph Analyzer and improvements for + Intel(R) VTune(TM) Amplifier become a regular feature enabled by + TBB_USE_THREADING_TOOLS macro. +- Added support for std::new_handler in the replacement functions for + global operator new. +- Added C++14 constructors to concurrent unordered containers. +- Added tbb::counting_iterator and tbb::zip_iterator. +- Fixed multiple -Wextra warnings in TBB source files. + +Preview Features: + +- Extracting nodes from a flow graph is deprecated and disabled by + default. To enable, use TBB_DEPRECATED_FLOW_NODE_EXTRACTION macro. + +Changes affecting backward compatibility: + +- Due to internal changes in the flow graph classes, recompilation is + recommended for all binaries that use the flow graph. + +Open-source contributions integrated: + +- Added support for OpenBSD by Anthony J. Bentley. + +------------------------------------------------------------------------ +Intel TBB 2018 Update 6 +TBB_INTERFACE_VERSION == 10006 + +Changes (w.r.t. Intel TBB 2018 Update 5): + +Bugs fixed: + +- Fixed an issue with dynamic memory allocation replacement on Windows* + occurred for some versions of ucrtbase.dll. + +------------------------------------------------------------------------ +Intel TBB 2018 Update 5 +TBB_INTERFACE_VERSION == 10005 + +Changes (w.r.t. Intel TBB 2018 Update 4): + +Preview Features: + +- Added user event tracing API for Intel(R) VTune(TM) Amplifier and + Flow Graph Analyzer. + +Bugs fixed: + +- Fixed the memory allocator to properly support transparent huge pages. +- Removed dynamic exception specifications in tbbmalloc_proxy for C++11 + and later (https://github.com/01org/tbb/issues/41). +- Added -flifetime-dse=1 option when building with GCC on macOS* + (https://github.com/01org/tbb/issues/60). + +Open-source contributions integrated: + +- Added ARMv8 support by Siddhesh Poyarekar. +- Avoid GCC warnings for clearing an object of non-trivial type + (https://github.com/01org/tbb/issues/54) by Daniel Arndt. + +------------------------------------------------------------------------ +Intel TBB 2018 Update 4 +TBB_INTERFACE_VERSION == 10004 + +Changes (w.r.t. Intel TBB 2018 Update 3): + +Preview Features: + +- Improved support for Flow Graph Analyzer and Intel(R) VTune(TM) + Amplifier in the task scheduler and generic parallel algorithms. +- Default device set for opencl_node now includes all the devices from + the first available OpenCL* platform. +- Added lightweight policy for functional nodes in the flow graph. It + indicates that the node body has little work and should, if possible, + be executed immediately upon receiving a message, avoiding task + scheduling overhead. + +------------------------------------------------------------------------ +Intel TBB 2018 Update 3 +TBB_INTERFACE_VERSION == 10003 + +Changes (w.r.t. Intel TBB 2018 Update 2): + +Preview Features: + +- Added template class blocked_rangeNd for a generic multi-dimensional + range (requires C++11). Inspired by a contribution from Jeff Hammond. + +Bugs fixed: + +- Fixed a crash with dynamic memory allocation replacement on + Windows* for applications using system() function. +- Fixed parallel_deterministic_reduce to split range correctly when used + with static_partitioner. +- Fixed a synchronization issue in task_group::run_and_wait() which + caused a simultaneous call to task_group::wait() to return + prematurely. + +------------------------------------------------------------------------ +Intel TBB 2018 Update 2 +TBB_INTERFACE_VERSION == 10002 + +Changes (w.r.t. Intel TBB 2018 Update 1): + +- Added support for Android* NDK r16, macOS* 10.13, Fedora* 26. +- Binaries for Universal Windows Driver (vc14_uwd) now link with static + Microsoft* runtime libraries, and are only available in commercial + releases. +- Extended flow graph documentation with more code samples. + +Preview Features: + +- Added a Python* module for multi-processing computations in numeric + Python* libraries. + +Bugs fixed: + +- Fixed constructors of concurrent_hash_map to be exception-safe. +- Fixed auto-initialization in the main thread to be cleaned up at + shutdown. +- Fixed a crash when tbbmalloc_proxy is used together with dbghelp. +- Fixed static_partitioner to assign tasks properly in case of nested + parallelism. + +------------------------------------------------------------------------ Intel TBB 2018 Update 1 TBB_INTERFACE_VERSION == 10001 @@ -235,7 +666,7 @@ Changes (w.r.t. Intel TBB 4.4 Update 5): - Added TBB_USE_GLIBCXX_VERSION macro to specify the version of GNU libstdc++ when it cannot be properly recognized, e.g. when used with Clang on Linux* OS. Inspired by a contribution from David A. -- Added graph/stereo example to demostrate tbb::flow::async_msg. +- Added graph/stereo example to demonstrate tbb::flow::async_msg. - Removed a few cases of excessive user data copying in the flow graph. - Reworked split_node to eliminate unnecessary overheads. - Added support for C++11 move semantics to the argument of @@ -946,7 +1377,7 @@ Changes (w.r.t. Intel TBB 4.1 Update 4): were added on OS X*. - For OS X* exact exception propagation is supported with Clang; it requires use of libc++ and corresponding Intel TBB binaries. -- Support for C++11 initializer lists in constructor and assigment +- Support for C++11 initializer lists in constructor and assignment has been added to concurrent_hash_map, concurrent_unordered_set, concurrent_unordered_multiset, concurrent_unordered_map, concurrent_unordered_multimap. @@ -1542,7 +1973,7 @@ Changes (w.r.t. 20100310 open-source release): - Reworked enumerable_thread_specific to use a custom implementation of hash map that is more efficient for ETS usage models. - Added example for class task_group; see examples/task_group/sudoku. -- Removed two examples, as they were long outdated and superceded: +- Removed two examples, as they were long outdated and superseded: pipeline/text_filter (use pipeline/square); parallel_while/parallel_preorder (use parallel_do/parallel_preorder). - PDF documentation updated. @@ -1760,7 +2191,7 @@ Changes (w.r.t. Intel TBB 2.1 U3 commercial-aligned release): - Added tests for aligned memory allocations and malloc replacement. - Several improvements for better bundling with Intel(R) C++ Compiler. -- A few other small changes in code and documentaion. +- A few other small changes in code and documentation. Bugs fixed: diff --git a/src/tbb/Doxyfile b/src/tbb/Doxyfile new file mode 100644 index 00000000..3c7727f8 --- /dev/null +++ b/src/tbb/Doxyfile @@ -0,0 +1,1325 @@ +# Doxyfile 1.4.7 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = "Intel(R) Threading Building Blocks Doxygen Documentation" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = "version 4.2.3" + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, +# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, +# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, +# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, +# Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# This tag can be used to specify the encoding used in the generated output. +# The encoding is not always determined by the language that is chosen, +# but also whether or not the output is meant for Windows or non-Windows users. +# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES +# forces the Windows encoding (this is the default for the Windows binary), +# whereas setting the tag to NO uses a Unix-style encoding (the default for +# all platforms other than Windows). + +USE_WINDOWS_ENCODING = NO + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = YES + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explicit @brief command for a brief description. + +JAVADOC_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the DETAILS_AT_TOP tag is set to YES then Doxygen +# will output the detailed description near the top, like JavaDoc. +# If set to NO, the detailed description appears after the member +# documentation. + +DETAILS_AT_TOP = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for Java. +# For instance, namespaces will be presented as packages, qualified scopes +# will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to +# include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = YES + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = NO + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = NO + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = NO + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = INTERNAL + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from the +# version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = include/ src/tbb/ + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = YES + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. Otherwise they will link to the documentstion. + +REFERENCES_LINK_SOURCE = NO + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = doc/copyright_brand_disclaimer_doxygen.txt + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = YES + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = NO + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = TBB_PREVIEW_FLOW_GRAPH_FEATURES \ + TBB_PREVIEW_FLOW_GRAPH_NODES \ + __TBB_PREVIEW_OPENCL_NODE \ + __TBB_CPP11_RVALUE_REF_PRESENT \ + __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \ + __TBB_IMPLICIT_MOVE_PRESENT \ + __TBB_EXCEPTION_PTR_PRESENT \ + __TBB_STATIC_ASSERT_PRESENT \ + __TBB_CPP11_TUPLE_PRESENT \ + __TBB_INITIALIZER_LISTS_PRESENT \ + __TBB_CONSTEXPR_PRESENT \ + __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT \ + __TBB_NOEXCEPT_PRESENT \ + __TBB_CPP11_STD_BEGIN_END_PRESENT \ + __TBB_CPP11_AUTO_PRESENT \ + __TBB_CPP11_DECLTYPE_PRESENT \ + __TBB_CPP11_LAMBDAS_PRESENT \ + __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT \ + __TBB_OVERRIDE_PRESENT \ + __TBB_ALIGNAS_PRESENT \ + __TBB_CPP11_TEMPLATE_ALIASES_PRESENT \ + __TBB_FLOW_GRAPH_CPP11_FEATURES \ + __TBB_PREVIEW_STREAMING_NODE + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = YES + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. + +CALL_GRAPH = YES + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a caller dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable caller graphs for selected +# functions only using the \callergraph command. + +CALLER_GRAPH = YES + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = svg + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# +# Note that this requires a modern browser other than Internet Explorer. Tested +# and working are Firefox, Chrome, Safari, and Opera. +# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make +# the SVG files visible. Older versions of IE do not have SVG support. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +INTERACTIVE_SVG = YES + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_WIDTH = 1024 + +# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_HEIGHT = 1024 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that a graph may be further truncated if the graph's +# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH +# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), +# the graph is not depth-constrained. + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes +# that will be shown in the graph. If the number of nodes in a graph becomes +# larger than this value, doxygen will truncate the graph, which is visualized +# by representing a node as a red box. Note that doxygen if the number of direct +# children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that +# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. +# Minimum value: 0, maximum value: 10000, default value: 50. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_GRAPH_MAX_NODES = 200 + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, which results in a white background. +# Warning: Depending on the platform used, enabling this option may lead to +# badly anti-aliased labels on the edges of a graph (i.e. they become hard to +# read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = YES diff --git a/src/tbb/COPYING b/src/tbb/LICENSE similarity index 100% rename from src/tbb/COPYING rename to src/tbb/LICENSE diff --git a/src/tbb/Makefile b/src/tbb/Makefile index d7dd56f9..f0641893 100644 --- a/src/tbb/Makefile +++ b/src/tbb/Makefile @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,12 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# tbb_root?=. +cfg?=release include $(tbb_root)/build/common.inc .PHONY: default all tbb tbbmalloc tbbproxy test examples @@ -29,38 +26,38 @@ default: tbb tbbmalloc $(if $(use_proxy),tbbproxy) all: tbb tbbmalloc tbbproxy test examples tbb: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbb cfg=debug $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbb cfg=release tbbmalloc: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc tbbproxy: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=debug tbbproxy $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbproxy cfg=release tbbproxy +tbbbind: mkdir + $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbbind cfg=release tbbbind + test: tbb tbbmalloc $(if $(use_proxy),tbbproxy) - -$(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_test - -$(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.test cfg=debug -$(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_test -$(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.test cfg=release rml: mkdir - $(MAKE) -C "$(work_dir)_debug" -r -f $(tbb_root)/build/Makefile.rml cfg=debug $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.rml cfg=release - examples: tbb tbbmalloc $(MAKE) -C examples -r -f Makefile tbb_root=.. release test +python: tbb + $(MAKE) -C "$(work_dir)_release" -rf $(tbb_root)/python/Makefile install + +doxygen: + doxygen Doxyfile + .PHONY: clean clean_examples mkdir info clean: clean_examples $(shell $(RM) $(work_dir)_release$(SLASH)*.* >$(NUL) 2>$(NUL)) $(shell $(RD) $(work_dir)_release >$(NUL) 2>$(NUL)) - $(shell $(RM) $(work_dir)_debug$(SLASH)*.* >$(NUL) 2>$(NUL)) - $(shell $(RD) $(work_dir)_debug >$(NUL) 2>$(NUL)) @echo clean done clean_examples: @@ -68,8 +65,7 @@ clean_examples: mkdir: $(shell $(MD) "$(work_dir)_release" >$(NUL) 2>$(NUL)) - $(shell $(MD) "$(work_dir)_debug" >$(NUL) 2>$(NUL)) - @echo Created $(work_dir)_release and ..._debug directories + @echo Created the $(work_dir)_release directory info: @echo OS: $(tbb_os) @@ -77,4 +73,3 @@ info: @echo compiler=$(compiler) @echo runtime=$(runtime) @echo tbb_build_prefix=$(tbb_build_prefix) - diff --git a/src/tbb/README.md b/src/tbb/README.md new file mode 100644 index 00000000..909be027 --- /dev/null +++ b/src/tbb/README.md @@ -0,0 +1,33 @@ +# Threading Building Blocks 2020 +[![Stable release](https://img.shields.io/badge/version-2020.1-green.svg)](https://github.com/intel/tbb/releases/tag/v2020.1) +[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE) + +Threading Building Blocks (TBB) lets you easily write parallel C++ programs that take +full advantage of multicore performance, that are portable, composable and have future-proof scalability. + +## Release Information +Here are the latest [Changes](CHANGES) and [Release Notes](doc/Release_Notes.txt) (contains system requirements and known issues). + +Since [2018 U5](https://github.com/intel/tbb/releases/tag/2018_U5) TBB binary packages include [Parallel STL](https://github.com/intel/parallelstl) as a high-level component. + +## Documentation +* TBB [tutorial](https://software.intel.com/en-us/tbb-tutorial) +* TBB general documentation: [stable](https://software.intel.com/en-us/tbb-documentation). For latest documentation please refer to the [latest](https://github.com/intel/tbb/releases/latest) release assets. + +## Support +Please report issues and suggestions via +[GitHub issues](https://github.com/intel/tbb/issues) or start a topic on the +[TBB forum](http://software.intel.com/en-us/forums/intel-threading-building-blocks/). + +## How to Contribute +To contribute to TBB, please open a GitHub pull request (preferred) or send us a patch by e-mail. +Threading Building Blocks is licensed under [Apache License, Version 2.0](LICENSE). +By its terms, contributions submitted to the project are also done under that license. + +## Engineering team contacts +* [E-mail us.](mailto:inteltbbdevelopers@intel.com) + +------------------------------------------------------------------------ +Intel and the Intel logo are trademarks of Intel Corporation or its subsidiaries in the U.S. and/or other countries. + +\* Other names and brands may be claimed as the property of others. diff --git a/src/tbb/build/.gitignore b/src/tbb/build/.gitignore deleted file mode 100644 index 53dc09fc..00000000 --- a/src/tbb/build/.gitignore +++ /dev/null @@ -1 +0,0 @@ -lib_*/ diff --git a/src/tbb/build/AIX.gcc.inc b/src/tbb/build/AIX.gcc.inc index 18e6e6ad..2f45d739 100644 --- a/src/tbb/build/AIX.gcc.inc +++ b/src/tbb/build/AIX.gcc.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# COMPILE_ONLY = -c -MMD PREPROC_ONLY = -E -x c++ diff --git a/src/tbb/build/AIX.inc b/src/tbb/build/AIX.inc index abe12d52..e02a6d31 100644 --- a/src/tbb/build/AIX.inc +++ b/src/tbb/build/AIX.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# ifndef arch arch:=$(shell uname -p) @@ -22,7 +18,7 @@ ifndef arch endif ifndef runtime - gcc_version:=$(shell gcc -dumpversion) + gcc_version:=$(shell gcc -dumpfullversion -dumpversion) os_version:=$(shell uname -r) os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) diff --git a/src/tbb/build/BSD.clang.inc b/src/tbb/build/BSD.clang.inc new file mode 100644 index 00000000..7e9e4ebc --- /dev/null +++ b/src/tbb/build/BSD.clang.inc @@ -0,0 +1,106 @@ +# Copyright (c) 2005-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPILE_ONLY = -c -MMD +PREPROC_ONLY = -E -x c++ +INCLUDE_KEY = -I +DEFINE_KEY = -D +OUTPUT_KEY = -o # +OUTPUTOBJ_KEY = -o # +PIC_KEY = -fPIC +WARNING_AS_ERROR_KEY = -Werror +WARNING_KEY = -Wall +TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor +WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor -Wno-dangling-else +DYLIB_KEY = -shared +EXPORT_KEY = -Wl,--version-script, +LIBDL = + +CPLUS = clang++ +CONLY = clang +LIB_LINK_FLAGS = $(DYLIB_KEY) -Wl,-soname=$(BUILDING_LIBRARY) +LIBS += -lpthread +LINK_FLAGS = -Wl,-rpath-link=. -Wl,-rpath=. -rdynamic +C_FLAGS = $(CPLUS_FLAGS) + +ifeq ($(cfg), release) + CPLUS_FLAGS = $(ITT_NOTIFY) -g -O2 -DUSE_PTHREAD +endif +ifeq ($(cfg), debug) + CPLUS_FLAGS = -DTBB_USE_DEBUG $(ITT_NOTIFY) -g -O0 -DUSE_PTHREAD +endif + +ifneq (,$(stdlib)) + CPLUS_FLAGS += -stdlib=$(stdlib) + LIB_LINK_FLAGS += -stdlib=$(stdlib) +endif + +TBB_ASM.OBJ= +MALLOC_ASM.OBJ= + +ifeq (intel64,$(arch)) + ITT_NOTIFY = -DDO_ITT_NOTIFY + CPLUS_FLAGS += -m64 + LIB_LINK_FLAGS += -m64 +endif + +ifeq (ia32,$(arch)) + ITT_NOTIFY = -DDO_ITT_NOTIFY + CPLUS_FLAGS += -m32 -march=pentium4 + LIB_LINK_FLAGS += -m32 +endif + +ifeq (ppc64,$(arch)) + CPLUS_FLAGS += -m64 + LIB_LINK_FLAGS += -m64 +endif + +ifeq (ppc32,$(arch)) + CPLUS_FLAGS += -m32 + LIB_LINK_FLAGS += -m32 +endif + +ifeq (bg,$(arch)) + CPLUS = bgclang++ + CONLY = bgclang +endif + +#------------------------------------------------------------------------------ +# Setting assembler data. +#------------------------------------------------------------------------------ +ASM = as +ifeq (intel64,$(arch)) + ASM_FLAGS += --64 +endif +ifeq (ia32,$(arch)) + ASM_FLAGS += --32 +endif +ifeq ($(cfg),debug) + ASM_FLAGS += -g +endif + +ASSEMBLY_SOURCE=$(arch)-gas +#------------------------------------------------------------------------------ +# End of setting assembler data. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# Setting tbbmalloc data. +#------------------------------------------------------------------------------ + +M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions + +#------------------------------------------------------------------------------ +# End of setting tbbmalloc data. +#------------------------------------------------------------------------------ diff --git a/src/tbb/build/BSD.inc b/src/tbb/build/BSD.inc new file mode 100644 index 00000000..e5ea784d --- /dev/null +++ b/src/tbb/build/BSD.inc @@ -0,0 +1,70 @@ +# Copyright (c) 2005-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ifndef arch + ifeq ($(shell uname -m),i386) + export arch:=ia32 + endif + ifeq ($(shell uname -m),ia64) + export arch:=ia64 + endif + ifeq ($(shell uname -m),amd64) + export arch:=intel64 + endif +endif + +ifndef runtime + clang_version:=$(shell clang --version | sed -n "1s/.*version \(.*[0-9]\) .*/\1/p") + os_version:=$(shell uname -r) + os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') + export runtime:=cc$(clang_version)_kernel$(os_kernel_version) +endif + +native_compiler := clang +export compiler ?= clang +debugger ?= gdb + +CMD=$(SHELL) -c +CWD=$(shell pwd) +RM?=rm -f +RD?=rmdir +MD?=mkdir -p +NUL= /dev/null +SLASH=/ +MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(VERSION_FLAGS) >version_string.ver +MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh + +ifdef LD_LIBRARY_PATH + export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) +else + export LD_LIBRARY_PATH := . +endif + +####### Build settings ######################################################## + +OBJ = o +DLL = so +LIBEXT=so + +TBB.LST = +TBB.DEF = +TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) +TBB.LIB = $(TBB.DLL) +LINK_TBB.LIB = $(TBB.LIB) + +MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) +MALLOC.LIB = $(MALLOC.DLL) +LINK_MALLOC.LIB = $(MALLOC.LIB) + +TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) diff --git a/src/tbb/build/FreeBSD.clang.inc b/src/tbb/build/FreeBSD.clang.inc index 3579603d..f4cdf128 100644 --- a/src/tbb/build/FreeBSD.clang.inc +++ b/src/tbb/build/FreeBSD.clang.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,100 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c++ -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor -Wno-dangling-else -DYLIB_KEY = -shared -EXPORT_KEY = -Wl,--version-script, -LIBDL = - -CPLUS = clang++ -CONLY = clang -LIB_LINK_FLAGS = $(DYLIB_KEY) -Wl,-soname=$(BUILDING_LIBRARY) -LIBS += -lpthread -lrt -LINK_FLAGS = -Wl,-rpath-link=. -Wl,-rpath=. -rdynamic -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(ITT_NOTIFY) -g -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG $(ITT_NOTIFY) -g -O0 -DUSE_PTHREAD -endif - -ifneq (,$(stdlib)) - CPLUS_FLAGS += -stdlib=$(stdlib) - LIB_LINK_FLAGS += -stdlib=$(stdlib) -endif - -TBB_ASM.OBJ= -MALLOC_ASM.OBJ= - -ifeq (intel64,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - ITT_NOTIFY = -DDO_ITT_NOTIFY - CPLUS_FLAGS += -m32 -march=pentium4 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (ppc64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ppc32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (bg,$(arch)) - CPLUS = bgclang++ - CONLY = bgclang -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASM = as -ifeq (intel64,$(arch)) - ASM_FLAGS += --64 -endif -ifeq (ia32,$(arch)) - ASM_FLAGS += --32 -endif -ifeq ($(cfg),debug) - ASM_FLAGS += -g -endif - -ASSEMBLY_SOURCE=$(arch)-gas -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions +include $(tbb_root)/build/BSD.clang.inc -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ +LIBS += -lrt diff --git a/src/tbb/build/FreeBSD.gcc.inc b/src/tbb/build/FreeBSD.gcc.inc index 794cb7c4..7bd8b073 100644 --- a/src/tbb/build/FreeBSD.gcc.inc +++ b/src/tbb/build/FreeBSD.gcc.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# COMPILE_ONLY = -c -MMD PREPROC_ONLY = -E -x c++ @@ -36,7 +32,7 @@ C_FLAGS = $(CPLUS_FLAGS) # gcc 6.0 and later have -flifetime-dse option that controls # elimination of stores done outside the object lifetime -ifneq (,$(shell gcc -dumpversion | egrep "^([6-9])")) +ifneq (,$(shell gcc -dumpfullversion -dumpversion | egrep "^([6-9]|1[0-9])")) # keep pre-contruction stores for zero initialization DSE_KEY = -flifetime-dse=1 endif diff --git a/src/tbb/build/FreeBSD.inc b/src/tbb/build/FreeBSD.inc index 3bd6a064..8b85bf02 100644 --- a/src/tbb/build/FreeBSD.inc +++ b/src/tbb/build/FreeBSD.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,64 +11,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# - -ifndef arch - ifeq ($(shell uname -m),i386) - export arch:=ia32 - endif - ifeq ($(shell uname -m),ia64) - export arch:=ia64 - endif - ifeq ($(shell uname -m),amd64) - export arch:=intel64 - endif -endif - -ifndef runtime - clang_version:=$(shell clang -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/") - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export runtime:=cc$(clang_version)_kernel$(os_kernel_version) -endif - -native_compiler := clang -export compiler ?= clang -debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(VERSION_FLAGS) >version_string.ver -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -ifdef LD_LIBRARY_PATH - export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) -else - export LD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so -LIBEXT=so - -TBB.LST = -TBB.DEF = -TBB.DLL = libtbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) -LINK_MALLOC.LIB = $(MALLOC.LIB) -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh $(largs) +include $(tbb_root)/build/BSD.inc diff --git a/src/tbb/build/Makefile.rml b/src/tbb/build/Makefile.rml index 241d4bfb..fc0cb774 100644 --- a/src/tbb/build/Makefile.rml +++ b/src/tbb/build/Makefile.rml @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# # TODO: investigate why version_string.ver is not complete when $(RML_SERVER.OBJ) is being compiled. .NOTPARALLEL: @@ -64,6 +60,13 @@ TBB_DEP_NON_RML_TEST?= cache_aligned_allocator_rml.$(OBJ) dynamic_link_rml.$(OBJ ifeq ($(cfg),debug) RML_TBB_DEP+= spin_mutex_rml.$(OBJ) TBB_DEP_RML_TEST?= $(RML_ASM.OBJ) tbb_misc_rml.$(OBJ) + +ifeq (windows icl,$(tbb_os) $(compiler_name)) +# Some versions of ICC link to the wrong version of the vc runtime +# libcpmtd.lib should be used instead of libcpmt.lib +LIB_LINK_FLAGS += /nodefaultlib:libcpmt.lib +endif + else TBB_DEP_RML_TEST?= $(RML_ASM.OBJ) endif diff --git a/src/tbb/build/Makefile.tbb b/src/tbb/build/Makefile.tbb index b66c309b..15568998 100644 --- a/src/tbb/build/Makefile.tbb +++ b/src/tbb/build/Makefile.tbb @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# #------------------------------------------------------------------------------ # Define rules for making the TBB shared library. @@ -75,7 +71,7 @@ TBB_CPLUS.OBJ = concurrent_hash_map.$(OBJ) \ tbb_main.$(OBJ) # OLD/Legacy object files for backward binary compatibility -ifeq (,$(findstring $(DEFINE_KEY)TBB_NO_LEGACY,$(CXXFLAGS))) +ifeq (,$(findstring $(DEFINE_KEY)TBB_NO_LEGACY,$(CPLUS_FLAGS))) TBB_CPLUS_OLD.OBJ = \ concurrent_vector_v2.$(OBJ) \ concurrent_queue_v2.$(OBJ) \ diff --git a/src/tbb/build/Makefile.tbbbind b/src/tbb/build/Makefile.tbbbind new file mode 100644 index 00000000..821009a3 --- /dev/null +++ b/src/tbb/build/Makefile.tbbbind @@ -0,0 +1,69 @@ +# Copyright (c) 2005-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#------------------------------------------------------------------------------ +# Define rules for making the tbbbind shared library. +#------------------------------------------------------------------------------ + +tbb_root ?= "$(TBBROOT)" +BUILDING_PHASE=1 +include $(tbb_root)/build/common.inc +CPLUS_FLAGS += $(SDL_FLAGS) +DEBUG_SUFFIX=$(findstring _debug,_$(cfg)) + +#------------------------------------------------------------ +# Define static pattern rules dealing with .cpp source files +#------------------------------------------------------------ +$(warning CONFIG: cfg=$(cfg) arch=$(arch) compiler=$(compiler) target=$(target) runtime=$(runtime)) + +.PHONY: tbbbind +.PRECIOUS: %.$(OBJ) + +VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/old $(tbb_root)/src/rml/client + +CPLUS_FLAGS += $(PIC_KEY) $(DSE_KEY) $(DEFINE_KEY)__TBBBIND_BUILD=1 + +# Suppress superfluous warnings for tbbbind compilation +WARNING_KEY += $(WARNING_SUPPRESS) + +include $(tbb_root)/build/common_rules.inc + +TBBBIND.OBJ = tbb_bind.$(OBJ) + +ifneq (,$(TBBBIND.DEF)) +tbbbind.def: $(TBBBIND.DEF) + $(CPLUS) $(PREPROC_ONLY) $< $(CPLUS_FLAGS) $(INCLUDES) > $@ + +LIB_LINK_FLAGS += $(EXPORT_KEY)tbbbind.def +$(TBBBIND.DLL): tbbbind.def +endif + +ifneq (,$(TBBBIND.DLL)) +$(TBBBIND.DLL): BUILDING_LIBRARY = $(TBBBIND.DLL) +$(TBBBIND.DLL): $(TBBBIND.OBJ) $(TBBBIND_NO_VERSION.DLL) + $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(TBBBIND.DLL) $(TBBBIND.OBJ) $(HWLOC.LIB) $(LIB_LINK_FLAGS) +endif + +ifneq (,$(TBBBIND_NO_VERSION.DLL)) +$(TBBBIND_NO_VERSION.DLL): + echo "INPUT ($(TBBBIND.DLL))" > $(TBBBIND_NO_VERSION.DLL) +endif + +tbbbind: $(TBBBIND.DLL) + +#clean: +# $(RM) *.$(OBJ) *.$(DLL) *.res *.map *.ilk *.pdb *.exp *.manifest *.tmp *.d core core.*[0-9][0-9] *.ver + +# Include automatically generated dependencies +-include *.d diff --git a/src/tbb/build/Makefile.tbbmalloc b/src/tbb/build/Makefile.tbbmalloc index dedcfa56..4ba504f8 100644 --- a/src/tbb/build/Makefile.tbbmalloc +++ b/src/tbb/build/Makefile.tbbmalloc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# # default target default_malloc: malloc malloc_test @@ -51,11 +47,11 @@ ORIG_LINK_MALLOC.LIB:=$(LINK_MALLOC.LIB) MALLOC_CPLUS.OBJ = backend.$(OBJ) large_objects.$(OBJ) backref.$(OBJ) tbbmalloc.$(OBJ) MALLOC.OBJ := $(MALLOC_CPLUS.OBJ) $(MALLOC_ASM.OBJ) itt_notify_malloc.$(OBJ) frontend.$(OBJ) PROXY.OBJ := proxy.$(OBJ) tbb_function_replacement.$(OBJ) -M_CPLUS_FLAGS := $(subst $(WARNING_KEY),,$(M_CPLUS_FLAGS)) $(DEFINE_KEY)__TBBMALLOC_BUILD=1 +M_CPLUS_FLAGS += $(DEFINE_KEY)__TBBMALLOC_BUILD=1 M_INCLUDES := $(INCLUDES) $(INCLUDE_KEY)$(MALLOC_ROOT) $(INCLUDE_KEY)$(MALLOC_SOURCE_ROOT) # Suppress superfluous warnings for TBBMalloc compilation -$(MALLOC.OBJ): M_CPLUS_FLAGS += $(WARNING_SUPPRESS) +$(MALLOC.OBJ): M_CPLUS_FLAGS := $(subst $(WARNING_KEY),,$(M_CPLUS_FLAGS)) $(WARNING_SUPPRESS) # Suppress superfluous warnings for TBBMalloc proxy compilation $(PROXY.OBJ): CPLUS_FLAGS += $(WARNING_SUPPRESS) @@ -138,11 +134,14 @@ MALLOC_TESTS = test_ScalableAllocator.$(TEST_EXT) \ test_malloc_pure_c.$(TEST_EXT) \ test_malloc_whitebox.$(TEST_EXT) \ test_malloc_used_by_lib.$(TEST_EXT) \ - test_malloc_lib_unload.$(TEST_EXT) + test_malloc_lib_unload.$(TEST_EXT) \ + test_malloc_shutdown_hang.$(TEST_EXT) ifneq (,$(MALLOCPROXY.DLL)) MALLOC_TESTS += test_malloc_overload.$(TEST_EXT) \ test_malloc_overload_proxy.$(TEST_EXT) \ - test_malloc_atexit.$(TEST_EXT) + test_malloc_overload_disable.$(TEST_EXT) \ + test_malloc_atexit.$(TEST_EXT) \ + test_malloc_new_handler.$(TEST_EXT) endif # ----------------------------------------------------- @@ -161,11 +160,14 @@ MALLOC_M_CPLUS_TESTS = test_malloc_whitebox.$(TEST_EXT) test_malloc_lib_unloa test_malloc_used_by_lib.$(TEST_EXT) MALLOC_NO_LIB_TESTS = test_malloc_whitebox.$(TEST_EXT) test_malloc_lib_unload.$(TEST_EXT) \ test_malloc_used_by_lib.$(TEST_EXT) test_malloc_overload.$(TEST_EXT) -MALLOC_LINK_PROXY_TESTS = test_malloc_overload_proxy.$(TEST_EXT) +MALLOC_LINK_PROXY_TESTS = test_malloc_overload_proxy.$(TEST_EXT) test_malloc_new_handler.$(TEST_EXT) MALLOC_ADD_DLL_TESTS = test_malloc_lib_unload.$(TEST_EXT) test_malloc_used_by_lib.$(TEST_EXT) \ test_malloc_atexit.$(TEST_EXT) +MALLOC_SUPPRESS_WARNINGS = test_malloc_whitebox.$(TEST_EXT) test_malloc_pure_c.$(TEST_EXT) -$(MALLOC_M_CPLUS_TESTS): CPLUS_FLAGS=$(M_CPLUS_FLAGS) +$(MALLOC_SUPPRESS_WARNINGS): WARNING_KEY= +$(MALLOC_SUPPRESS_WARNINGS): TEST_WARNING_KEY= +$(MALLOC_M_CPLUS_TESTS): CPLUS_FLAGS:=$(M_CPLUS_FLAGS) $(MALLOC_M_CPLUS_TESTS): INCLUDES=$(M_INCLUDES) $(MALLOC_NO_LIB_TESTS): LINK_MALLOC.LIB= $(MALLOC_NO_LIB_TESTS): LINK_FLAGS+=$(LIBDL) @@ -175,12 +177,12 @@ $(MALLOC_ADD_DLL_TESTS): %.$(TEST_EXT): %_dll.$(DLL) $(MALLOC_ADD_DLL_TESTS): TEST_LIBS+=$(@:.$(TEST_EXT)=_dll.$(LIBEXT)) endif -test_malloc_over%.$(TEST_EXT): CPLUS_FLAGS=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) +test_malloc_over%.$(TEST_EXT): CPLUS_FLAGS:=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) test_malloc_over%.$(TEST_EXT): INCLUDES=$(M_INCLUDES) test_malloc_overload_proxy.$(TEST_EXT): LINK_FLAGS+=$(LIBDL) -test_malloc_atexit_dll.$(DLL): CPLUS_FLAGS=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) -test_malloc_atexit.$(TEST_EXT): CPLUS_FLAGS=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) +test_malloc_atexit_dll.$(DLL): CPLUS_FLAGS:=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) +test_malloc_atexit.$(TEST_EXT): CPLUS_FLAGS:=$(subst /MT,/MD,$(M_CPLUS_FLAGS)) test_malloc_atexit.$(TEST_EXT): LINK_FLAGS+=$(LIBDL) # on Ubuntu 11.10 linker called with --as-needed, so dependency on libtbbmalloc_proxy # is not created, and malloc overload via linking with -ltbbmalloc_proxy is not working. @@ -205,9 +207,20 @@ test_malloc_whitebox.$(TEST_EXT): LINK_FILES+=$(MALLOC_ASM.OBJ) test_malloc_lib_unload_dll.$(DLL): CPLUS_FLAGS=$(ORIG_CPLUS_FLAGS) $(if $(no_exceptions),$(DEFINE_KEY)__TBB_TEST_NO_EXCEPTIONS=1) test_malloc_lib_unload_dll.$(DLL): INCLUDES=$(ORIG_INCLUDES) $(INCLUDE_TEST_HEADERS) -test_malloc_used_by_lib_dll.$(DLL): CPLUS_FLAGS=$(subst /MT,/LD,$(M_CPLUS_FLAGS)) +test_malloc_used_by_lib_dll.$(DLL): CPLUS_FLAGS:=$(subst /MT,/LD,$(M_CPLUS_FLAGS)) test_malloc_used_by_lib_dll.$(DLL): LINK_FILES+=$(ORIG_LINK_MALLOC.LIB) test_malloc_used_by_lib_dll.$(DLL): LIBDL= + +# The test needs both tbb and tbbmalloc. +# For static build LINK_TBB.LIB is resolved in tbb.a static lib name (Linux), which cannot be found (dynamic tbb is used only). +# In order to link properly, have to define LINK_TBB.LIB ourselves except for Windows where linkage with *.lib file expected. +ifdef extra_inc +ifneq ($(tbb_os),windows) +DYNAMIC_TBB_LIB=$(LIBPREF)tbb$(CPF_SUFFIX)$(DEBUG_SUFFIX).$(DLL) +endif +endif +test_malloc_shutdown_hang.$(TEST_EXT): LINK_FILES += $(if $(DYNAMIC_TBB_LIB), $(DYNAMIC_TBB_LIB), $(LINK_TBB.LIB)) + # ----------------------------------------------------- # ---- The list of TBBMalloc test running commands ---- @@ -220,6 +233,8 @@ ifneq (,$(MALLOCPROXY.DLL)) $(run_cmd) ./test_malloc_atexit.$(TEST_EXT) $(args) $(run_cmd) $(TEST_LAUNCHER) -l $(MALLOCPROXY.DLL) ./test_malloc_overload.$(TEST_EXT) $(args) $(run_cmd) $(TEST_LAUNCHER) ./test_malloc_overload_proxy.$(TEST_EXT) $(args) + $(run_cmd) ./test_malloc_overload_disable.$(TEST_EXT) $(args) + $(run_cmd) $(TEST_LAUNCHER) ./test_malloc_new_handler.$(TEST_EXT) $(args) endif $(run_cmd) $(TEST_LAUNCHER) ./test_malloc_lib_unload.$(TEST_EXT) $(args) $(run_cmd) $(TEST_LAUNCHER) ./test_malloc_used_by_lib.$(TEST_EXT) @@ -230,6 +245,7 @@ endif $(run_cmd) ./test_malloc_regression.$(TEST_EXT) $(args) $(run_cmd) ./test_malloc_init_shutdown.$(TEST_EXT) $(args) $(run_cmd) ./test_malloc_pure_c.$(TEST_EXT) $(args) + $(run_cmd) ./test_malloc_shutdown_hang.$(TEST_EXT) # ----------------------------------------------------- #------------------------------------------------------ diff --git a/src/tbb/build/Makefile.tbbproxy b/src/tbb/build/Makefile.tbbproxy index eb2e0fd6..54f5e9cd 100644 --- a/src/tbb/build/Makefile.tbbproxy +++ b/src/tbb/build/Makefile.tbbproxy @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# # default target default_tbbproxy: tbbproxy tbbproxy_test diff --git a/src/tbb/build/Makefile.test b/src/tbb/build/Makefile.test index 9de7860d..9ae464c1 100644 --- a/src/tbb/build/Makefile.test +++ b/src/tbb/build/Makefile.test @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# #------------------------------------------------------------------------------ # Define rules for making the TBB tests. @@ -107,6 +103,9 @@ test_dynamic_link.$(TEST_EXT): LIBS += $(LIBDL) ifneq (,$(BIGOBJ_KEY)) TEST_BIGOBJ = test_opencl_node.$(TEST_EXT) \ test_atomic.$(TEST_EXT) \ + test_concurrent_hash_map.$(TEST_EXT) \ + test_concurrent_set.$(TEST_EXT) \ + test_concurrent_map.$(TEST_EXT) \ test_concurrent_unordered_set.$(TEST_EXT) \ test_concurrent_unordered_map.$(TEST_EXT) \ test_join_node_key_matching.$(TEST_EXT) \ @@ -115,7 +114,7 @@ TEST_BIGOBJ = test_opencl_node.$(TEST_EXT) \ $(TEST_BIGOBJ): override CXXFLAGS += $(BIGOBJ_KEY) endif -# TODO: remove repetition of .$(TEST_EXT) in the list bellow +# TODO: remove repetition of .$(TEST_EXT) in the list below # The main list of TBB tests TEST_TBB_PLAIN.EXE = test_assembly.$(TEST_EXT) \ test_global_control.$(TEST_EXT) \ @@ -128,11 +127,14 @@ TEST_TBB_PLAIN.EXE = test_assembly.$(TEST_EXT) \ test_blocked_range.$(TEST_EXT) \ test_blocked_range2d.$(TEST_EXT) \ test_blocked_range3d.$(TEST_EXT) \ + test_blocked_rangeNd.$(TEST_EXT) \ test_concurrent_queue.$(TEST_EXT) \ test_concurrent_vector.$(TEST_EXT) \ test_concurrent_unordered_set.$(TEST_EXT) \ test_concurrent_unordered_map.$(TEST_EXT) \ test_concurrent_hash_map.$(TEST_EXT) \ + test_concurrent_set.$(TEST_EXT) \ + test_concurrent_map.$(TEST_EXT) \ test_enumerable_thread_specific.$(TEST_EXT) \ test_handle_perror.$(TEST_EXT) \ test_halt.$(TEST_EXT) \ @@ -214,12 +216,12 @@ TEST_TBB_PLAIN.EXE = test_assembly.$(TEST_EXT) \ test_composite_node.$(TEST_EXT) \ test_async_node.$(TEST_EXT) \ test_async_msg.$(TEST_EXT) \ + test_resumable_tasks.$(TEST_EXT) \ test_tbb_version.$(TEST_EXT) # insert new files right above # These tests depend on other technologies TEST_TBB_SPECIAL.EXE = test_openmp.$(TEST_EXT) \ test_cilk_interop.$(TEST_EXT) \ - test_gfx_factory.$(TEST_EXT) \ test_opencl_node.$(TEST_EXT) # skip mode_plugin for now @@ -240,7 +242,6 @@ ifdef CILK_AVAILABLE # The issue reveals itself if a version of binutils is prior to 2.17 ifeq (linux_icc,$(tbb_os)_$(compiler)) test_cilk_interop.$(TEST_EXT): LIBS += -lcilkrts -test_gfx_factory.$(TEST_EXT): LIBS += -lcilkrts endif test_tbb_cilk: test_cilk_interop.$(TEST_EXT) $(run_cmd) ./test_cilk_interop.$(TEST_EXT) $(args) @@ -251,6 +252,8 @@ endif test_opencl_node.$(TEST_EXT): LIBS += $(OPENCL.LIB) +test_arena_constraints_hwloc.$(TEST_EXT): LIBS += $(HWLOC.LIB) + $(TEST_TBB_PLAIN.EXE) $(TEST_TBB_SPECIAL.EXE): WARNING_KEY += $(TEST_WARNING_KEY) # Run tests that are in SCHEDULER_DIRECTLY_INCLUDED and TEST_TBB_PLAIN.EXE but not in skip_tests (which is specified by user) @@ -272,7 +275,7 @@ test_tbb_plain: $(TEST_PREREQUISITE) $(TESTS_TO_RUN) # For deprecated files, we don't mind warnings etc., thus compilation rules are most relaxed CPLUS_FLAGS_DEPRECATED = $(DEFINE_KEY)__TBB_TEST_DEPRECATED=1 $(subst $(WARNING_KEY),,$(CPLUS_FLAGS)) $(WARNING_SUPPRESS) $(INCLUDE_KEY)$(tbb_root)/src/test -TEST_TBB_OLD.OBJ = test_concurrent_vector_v2.$(OBJ) test_concurrent_queue_v2.$(OBJ) test_mutex_v2.$(OBJ) test_task_scheduler_observer_v3.$(OBJ) +TEST_TBB_OLD.OBJ = test_concurrent_vector_v2.$(OBJ) test_concurrent_queue_v2.$(OBJ) test_mutex_v2.$(OBJ) test_task_scheduler_observer_v3.$(OBJ) $(TEST_TBB_OLD.OBJ): CPLUS_FLAGS := $(CPLUS_FLAGS_DEPRECATED) diff --git a/src/tbb/build/OpenBSD.clang.inc b/src/tbb/build/OpenBSD.clang.inc new file mode 100644 index 00000000..0acc5eb2 --- /dev/null +++ b/src/tbb/build/OpenBSD.clang.inc @@ -0,0 +1,15 @@ +# Copyright (c) 2005-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include $(tbb_root)/build/BSD.clang.inc diff --git a/src/tbb/build/OpenBSD.inc b/src/tbb/build/OpenBSD.inc new file mode 100644 index 00000000..8b85bf02 --- /dev/null +++ b/src/tbb/build/OpenBSD.inc @@ -0,0 +1,15 @@ +# Copyright (c) 2005-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include $(tbb_root)/build/BSD.inc diff --git a/src/tbb/build/SunOS.gcc.inc b/src/tbb/build/SunOS.gcc.inc index 2ebed62f..2af7a682 100644 --- a/src/tbb/build/SunOS.gcc.inc +++ b/src/tbb/build/SunOS.gcc.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# COMPILE_ONLY = -c -MMD PREPROC_ONLY = -E -x c++ @@ -25,8 +21,8 @@ OUTPUTOBJ_KEY = -o # PIC_KEY = -fPIC WARNING_AS_ERROR_KEY = -Werror WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor $(if $(findstring cc4., $(runtime)),-Wextra) -WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor +TEST_WARNING_KEY = -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -Wextra +WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor DYLIB_KEY = -shared LIBDL = -ldl @@ -64,12 +60,9 @@ ifeq (ia32,$(arch)) endif # for some gcc versions on Solaris, -m64 may imply V9, but perhaps not everywhere (TODO: verify) -# RcppParallel: CRAN uses 32-bit SPARC so we force that here ifeq (sparc,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -# CPLUS_FLAGS += -mcpu=v9 -m64 -# LIB_LINK_FLAGS += -mcpu=v9 -m64 + CPLUS_FLAGS += -mcpu=v9 -m64 + LIB_LINK_FLAGS += -mcpu=v9 -m64 endif #------------------------------------------------------------------------------ diff --git a/src/tbb/build/SunOS.inc b/src/tbb/build/SunOS.inc index 447e059e..30a2e684 100644 --- a/src/tbb/build/SunOS.inc +++ b/src/tbb/build/SunOS.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# ifndef arch arch:=$(shell uname -p) @@ -31,7 +27,7 @@ ifndef arch endif ifndef runtime - gcc_version:=$(shell gcc -dumpversion) + gcc_version:=$(shell gcc -dumpfullversion -dumpversion) os_version:=$(shell uname -r) os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) diff --git a/src/tbb/build/SunOS.suncc.inc b/src/tbb/build/SunOS.suncc.inc index b9cc946e..b0dfa484 100644 --- a/src/tbb/build/SunOS.suncc.inc +++ b/src/tbb/build/SunOS.suncc.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# COMPILE_ONLY = -c -xMMD -errtags PREPROC_ONLY = -E -xMMD @@ -31,8 +27,7 @@ WARNING_AS_ERROR_KEY = Warning as error WARNING_SUPPRESS = -erroff=unassigned,attrskipunsup,badargtype2w,badbinaryopw,wbadasg,wvarhidemem,inlasmpnu tbb_strict=0 -# RcppParallel patch: -library=stlport4 -CPLUS = CC -library=stlport4 +CPLUS = CC CONLY = cc OPENMP_FLAG = -xopenmp diff --git a/src/tbb/build/android.clang.inc b/src/tbb/build/android.clang.inc index a935968c..6edc48f7 100644 --- a/src/tbb/build/android.clang.inc +++ b/src/tbb/build/android.clang.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# COMPILE_ONLY = -c -MMD @@ -68,8 +64,20 @@ ifeq (0, $(dynamic_load)) endif # Paths to the NDK prebuilt tools and libraries -CPLUS_FLAGS += --sysroot=$(SYSROOT) +ifeq (,$(findstring $(ndk_version), $(foreach v, 7 8 9 10 11 12 13 14 15,r$(v) r$(v)b r$(v)c r$(v)d r$(v)e))) + # Since Android* NDK r16 another sysroot and isystem paths have to be specified + CPLUS_FLAGS += --sysroot=$(NDK_ROOT)/sysroot -isystem $(NDK_ROOT)/sysroot/usr/include/$(TRIPLE) + # Android* version flag required since r16 + CPLUS_FLAGS += -D__ANDROID_API__=$(API_LEVEL) +else + CPLUS_FLAGS += --sysroot=$(SYSROOT) +endif + +# Library sysroot flag LIB_LINK_FLAGS += --sysroot=$(SYSROOT) +# Flag for test executables +LINK_FLAGS += --sysroot=$(SYSROOT) + LIBS = -L$(CPLUS_LIB_PATH) -lc++_shared ifeq (,$(findstring $(ndk_version),$(foreach v, 7 8 9 10 11,r$(v) r$(v)b r$(v)c r$(v)d r$(v)e))) LIBS += -lc++abi diff --git a/src/tbb/build/android.gcc.inc b/src/tbb/build/android.gcc.inc index 973ca3f0..980a8cac 100644 --- a/src/tbb/build/android.gcc.inc +++ b/src/tbb/build/android.gcc.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# COMPILE_ONLY = -c -MMD diff --git a/src/tbb/build/android.icc.inc b/src/tbb/build/android.icc.inc index 64da9fd1..6ba64d19 100644 --- a/src/tbb/build/android.icc.inc +++ b/src/tbb/build/android.icc.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# COMPILE_ONLY = -c -MMD diff --git a/src/tbb/build/android.inc b/src/tbb/build/android.inc index aeeb68c5..3832ee53 100644 --- a/src/tbb/build/android.inc +++ b/src/tbb/build/android.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# # # Extra gmake command-line parameters for use with Android: diff --git a/src/tbb/build/android.linux.inc b/src/tbb/build/android.linux.inc index 7d022434..a7d2b183 100644 --- a/src/tbb/build/android.linux.inc +++ b/src/tbb/build/android.linux.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# ####### Detections and Commands ############################################### diff --git a/src/tbb/build/android.linux.launcher.sh b/src/tbb/build/android.linux.launcher.sh index b3cf8764..2643d3e0 100644 --- a/src/tbb/build/android.linux.launcher.sh +++ b/src/tbb/build/android.linux.launcher.sh @@ -1,6 +1,6 @@ #!/bin/sh # -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,10 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# # Usage: # android.linux.launcher.sh [-v] [-q] [-s] [-r ] [-u] [-l ] diff --git a/src/tbb/build/android.macos.inc b/src/tbb/build/android.macos.inc index bf84578a..a48ee32b 100644 --- a/src/tbb/build/android.macos.inc +++ b/src/tbb/build/android.macos.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# ####### Detections and Commands ############################################### diff --git a/src/tbb/build/android.windows.inc b/src/tbb/build/android.windows.inc index f58e5d52..a56f9a98 100644 --- a/src/tbb/build/android.windows.inc +++ b/src/tbb/build/android.windows.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# ####### Detections and Commands ############################################### diff --git a/src/tbb/build/big_iron.inc b/src/tbb/build/big_iron.inc index efed2125..abe6accc 100644 --- a/src/tbb/build/big_iron.inc +++ b/src/tbb/build/big_iron.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# #------------------------------------------------------------------------------ # Defines settings for building the TBB run-time as a static library. diff --git a/src/tbb/build/build.py b/src/tbb/build/build.py new file mode 100644 index 00000000..c0ab1519 --- /dev/null +++ b/src/tbb/build/build.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python +# +# Copyright (c) 2017-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Provides unified tool for preparing TBB for packaging + +from __future__ import print_function +import os +import re +import sys +import shutil +import platform +import argparse +from glob import glob +from collections import OrderedDict + +jp = os.path.join +is_win = (platform.system() == 'Windows') +is_lin = (platform.system() == 'Linux') +is_mac = (platform.system() == 'Darwin') + +default_prefix = os.getenv('PREFIX', 'install_prefix') +if is_win: + default_prefix = jp(default_prefix, 'Library') # conda-specific by default on Windows + +parser = argparse.ArgumentParser() +parser.add_argument('--tbbroot', default='.', help='Take Intel TBB from here') +parser.add_argument('--prefix', default=default_prefix, help='Prefix') +parser.add_argument('--prebuilt', default=[], action='append', help='Directories to find prebuilt files') +parser.add_argument('--no-rebuild', default=False, action='store_true', help='do not rebuild') +parser.add_argument('--install', default=False, action='store_true', help='install all') +parser.add_argument('--install-libs', default=False, action='store_true', help='install libs') +parser.add_argument('--install-devel', default=False, action='store_true', help='install devel') +parser.add_argument('--install-docs', default=False, action='store_true', help='install docs') +parser.add_argument('--install-python', default=False, action='store_true', help='install python module') +parser.add_argument('--make-tool', default='make', help='Use different make command instead') +parser.add_argument('--copy-tool', default=None, help='Use this command for copying ($ tool file dest-dir)') +parser.add_argument('--build-args', default="", help='specify extra build args') +parser.add_argument('--build-prefix', default='local', help='build dir prefix') +parser.add_argument('--cmake-dir', help='directory to install CMake configuration files. Default: /lib/cmake/tbb') +if is_win: + parser.add_argument('--msbuild', default=False, action='store_true', help='Use msbuild') + parser.add_argument('--vs', default="2012", help='select VS version for build') + parser.add_argument('--vs-platform', default="x64", help='select VS platform for build') +parser.add_argument('ignore', nargs='?', help="workaround conda-build issue #2512") + +args = parser.parse_args() + +if args.install: + args.install_libs = True + args.install_devel = True + args.install_docs = True + args.install_python= True + +def custom_cp(src, dst): + assert os.system(' '.join([args.copy_tool, src, dst])) == 0 + +if args.copy_tool: + install_cp = custom_cp # e.g. to use install -p -D -m 755 on Linux +else: + install_cp = shutil.copy + +bin_dir = jp(args.prefix, "bin") +lib_dir = jp(args.prefix, "lib") +inc_dir = jp(args.prefix, 'include') +doc_dir = jp(args.prefix, 'share', 'doc', 'tbb') +cmake_dir = jp(args.prefix, "lib", "cmake", "tbb") if args.cmake_dir is None else args.cmake_dir + +if is_win: + os.environ["OS"] = "Windows_NT" # make sure TBB will interpret it correctly + libext = '.dll' + libpref = '' + dll_dir = bin_dir +else: + libext = '.dylib' if is_mac else '.so.2' + libpref = 'lib' + dll_dir = lib_dir + +tbb_names = ["tbb", "tbbmalloc", "tbbmalloc_proxy"] + +############################################################## + +def system(arg): + print('$ ', arg) + return os.system(arg) + +def run_make(arg): + if system('%s -j %s'% (args.make_tool, arg)) != 0: + print("\nBummer. Running serial build in order to recover the log and have a chance to fix the build") + assert system('%s %s'% (args.make_tool, arg)) == 0 + +os.chdir(args.tbbroot) +if args.prebuilt: + release_dirs = sum([glob(d) for d in args.prebuilt], []) + print("Using pre-built files from ", release_dirs) +else: + if is_win and args.msbuild: + preview_release_dir = release_dir = jp(args.tbbroot, 'build', 'vs'+args.vs, args.vs_platform, 'Release') + if not args.no_rebuild or not os.path.isdir(release_dir): + assert os.system('msbuild /m /p:Platform=%s /p:Configuration=Release %s build/vs%s/makefile.sln'% \ + (args.vs_platform, args.build_args, args.vs)) == 0 + preview_debug_dir = debug_dir = jp(args.tbbroot, 'build', 'vs'+args.vs, args.vs_platform, 'Debug') + if not args.no_rebuild or not os.path.isdir(debug_dir): + assert os.system('msbuild /m /p:Platform=%s /p:Configuration=Debug %s build/vs%s/makefile.sln'% \ + (args.vs_platform, args.build_args, args.vs)) == 0 + else: + release_dir = jp(args.tbbroot, 'build', args.build_prefix+'_release') + debug_dir = jp(args.tbbroot, 'build', args.build_prefix+'_debug') + if not args.no_rebuild or not (os.path.isdir(release_dir) and os.path.isdir(debug_dir)): + run_make('tbb_build_prefix=%s %s'% (args.build_prefix, args.build_args)) + preview_release_dir = jp(args.tbbroot, 'build', args.build_prefix+'_preview_release') + preview_debug_dir = jp(args.tbbroot, 'build', args.build_prefix+'_preview_debug') + if not args.no_rebuild or not (os.path.isdir(preview_release_dir) and os.path.isdir(preview_debug_dir)): + run_make('tbb_build_prefix=%s_preview %s tbb_cpf=1 tbb'% (args.build_prefix, args.build_args)) + release_dirs = [release_dir, debug_dir, preview_release_dir, preview_debug_dir] + +filemap = OrderedDict() +def append_files(names, dst, paths=release_dirs): + global filemap + files = sum([glob(jp(d, f)) for d in paths for f in names], []) + filemap.update(dict(zip(files, [dst]*len(files)))) + + +if args.install_libs: + append_files([libpref+f+libext for f in tbb_names], dll_dir) + +if args.install_devel: + dll_files = [libpref+f+'_debug'+libext for f in tbb_names] # adding debug libraries + if not is_win or not args.msbuild: + dll_files += [libpref+"tbb_preview"+libext, libpref+"tbb_preview_debug"+libext] + if is_win: + dll_files += ['tbb*.pdb'] # copying debug info + if is_lin: + dll_files += ['libtbb*.so'] # copying linker scripts + # symlinks .so -> .so.2 should not be created instead + # since linking with -ltbb when using links can result in + # incorrect dependence upon unversioned .so files + append_files(dll_files, dll_dir) + if is_win: + append_files(['*.lib', '*.def'], lib_dir) # copying linker libs and defs + for rootdir, dirnames, filenames in os.walk(jp(args.tbbroot,'include')): + files = [f for f in filenames if not '.html' in f] + append_files(files, jp(inc_dir, rootdir.split('include')[1][1:]), paths=(rootdir,)) + + # Preparing CMake configuration files + cmake_build_dir = jp(args.tbbroot, 'build', args.build_prefix+'_release', 'cmake_configs') + assert system('cmake -DINSTALL_DIR=%s -DSYSTEM_NAME=%s -DTBB_VERSION_FILE=%s -DINC_REL_PATH=%s -DLIB_REL_PATH=%s -DBIN_REL_PATH=%s -P %s' % \ + (cmake_build_dir, + platform.system(), + jp(args.tbbroot, 'include', 'tbb', 'tbb_stddef.h'), + os.path.relpath(inc_dir, cmake_dir), + os.path.relpath(lib_dir, cmake_dir), + os.path.relpath(bin_dir, cmake_dir), + jp(args.tbbroot, 'cmake', 'tbb_config_installer.cmake'))) == 0 + append_files(['TBBConfig.cmake', 'TBBConfigVersion.cmake'], cmake_dir, paths=[cmake_build_dir]) + +if args.install_python: # RML part + irml_dir = jp(args.tbbroot, 'build', args.build_prefix+'_release') + run_make('-C src tbb_build_prefix=%s %s python_rml'% (args.build_prefix, args.build_args)) + if is_lin: + append_files(['libirml.so.1'], dll_dir, paths=[irml_dir]) + +if args.install_docs: + files = [ + 'CHANGES', + 'LICENSE', + 'README', + 'README.md', + 'Release_Notes.txt', + ] + append_files(files, doc_dir, paths=release_dirs+[jp(args.tbbroot, d) for d in ('.', 'doc')]) + +for f in filemap.keys(): + assert os.path.exists(f) + assert os.path.isfile(f) + +if filemap: + print("Copying to prefix =", args.prefix) +for f, dest in filemap.items(): + if not os.path.isdir(dest): + os.makedirs(dest) + print("+ %s to $prefix%s"%(f,dest.replace(args.prefix, ''))) + install_cp(f, dest) + +if args.install_python: # Python part + paths = [os.path.abspath(d) for d in [args.prefix, inc_dir, irml_dir, lib_dir]+release_dirs] + os.environ["TBBROOT"] = paths[0] + # all the paths must be relative to python/ directory or be absolute + assert system('python python/setup.py build -b%s build_ext -I%s -L%s install -f'% \ + (paths[2], paths[1], ':'.join(paths[2:]))) == 0 + +print("done") diff --git a/src/tbb/build/common.inc b/src/tbb/build/common.inc index b2c20de6..815fa682 100644 --- a/src/tbb/build/common.inc +++ b/src/tbb/build/common.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# ifndef tbb_os @@ -54,6 +50,14 @@ ifdef cpp0x override cpp0x= endif +# Define C & C++ compilers according to platform defaults or CXX & CC environment variables +ifneq (,$(findstring environment, $(origin CXX))) + CPLUS = $(CXX) +endif +ifneq (,$(findstring environment, $(origin CC))) + CONLY = $(CC) +endif + ifneq (,$(stdver)) ifeq (,$(findstring ++, $(stdver))) $(warning "Warning: unexpected stdver=$(stdver) is used.") @@ -152,8 +156,8 @@ ifndef BUILDING_PHASE .DELETE_ON_ERROR: # Make will delete target if error occurred when building it. -# MAKEOVERRIDES contains the command line variable definitions. Reseting it to -# empty allows propogating all exported overridden variables to nested makes. +# MAKEOVERRIDES contains the command line variable definitions. Resetting it to +# empty allows propagating all exported overridden variables to nested makes. # NOTEs: # 1. All variable set in command line are propagated to nested makes. # 2. All variables declared with the "export" keyword are propagated to diff --git a/src/tbb/build/common_rules.inc b/src/tbb/build/common_rules.inc index faf6546f..64bd74ab 100644 --- a/src/tbb/build/common_rules.inc +++ b/src/tbb/build/common_rules.inc @@ -1,4 +1,4 @@ -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# ifeq ($(tbb_strict),1) @@ -46,6 +42,10 @@ endif INCLUDES += $(INCLUDE_KEY)$(tbb_root)/src $(INCLUDE_KEY)$(tbb_root)/src/rml/include $(INCLUDE_KEY)$(tbb_root)/include CPLUS_FLAGS += $(WARNING_KEY) $(CXXFLAGS) + +# Suppress warnings about usage of deprecated content +CPLUS_FLAGS += $(DEFINE_KEY)TBB_SUPPRESS_DEPRECATED_MESSAGES=1 + ifeq (1,$(tbb_cpf)) CPLUS_FLAGS += $(DEFINE_KEY)__TBB_CPF_BUILD=1 endif @@ -63,6 +63,7 @@ ifeq ($(origin LIB_LINK_LIBS), undefined) LIB_LINK_LIBS = $(LIBDL) $(LIBS) endif +# some platforms do not provide separate C-only compiler CONLY ?= $(CPLUS) # The most generic rules @@ -88,7 +89,7 @@ $(foreach t,%.$(OBJ) $(TEST_AFFIXES_OBJS),$(eval $(call make-cxx-obj,$(t)))) #$(2) - is the input obj files and libraries define make-test-binary $(CPLUS) $(OUTPUT_KEY)$(strip $1) $(CPLUS_FLAGS) $(2) $(LIBS) $(LINK_FLAGS) -endef +endef # LINK_FILES the list of options to link test specific files (libraries and object files) LINK_FILES+=$(TEST_LIBS) @@ -100,7 +101,7 @@ LINK_FILES+=$(TEST_LIBS) %_dll.$(DLL): LINK_FLAGS += $(PIC_KEY) $(DYLIB_KEY) %_dll.$(DLL): TEST_LIBS := $(subst %_dll.$(DLL),,$(TEST_LIBS)) %_dll.$(DLL): %_dll.$(OBJ) - $(call make-test-binary,$@,$< $(subst %_dll.$(DLL),,$(LINK_FILES))) + $(call make-test-binary,$@,$< $(LINK_FILES)) .PRECIOUS: %_dll.$(OBJ) %_dll.$(DLL) %.$(OBJ): %.c @@ -113,12 +114,6 @@ LINK_FILES+=$(TEST_LIBS) cpp <$< | grep -v '^#' >$*.tmp $(ASM) $(ASM_FLAGS) -o $@ $*.tmp -ifdef rtools -# Line 70 doesn't work with rtool's version of make. The symptom being that the asm rule kicks off instead, and these rules are cl only -%.$(OBJ): %.cpp - $(CPLUS) $(OUTPUTOBJ_KEY)$@ $(COMPILE_ONLY) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $< -endif - # Rule for generating .E file if needed for visual inspection # Note that ICL treats an argument after PREPROC_ONLY as a file to open, # so all uses of PREPROC_ONLY should be immediately followed by a file name diff --git a/src/tbb/build/detect.js b/src/tbb/build/detect.js index b2a0943b..ef8ccc15 100644 --- a/src/tbb/build/detect.js +++ b/src/tbb/build/detect.js @@ -1,4 +1,4 @@ -// Copyright (c) 2005-2017 Intel Corporation +// Copyright (c) 2005-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,10 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -// -// -// function readAllFromFile(fname) { var fso = new ActiveXObject("Scripting.FileSystemObject"); @@ -55,18 +51,24 @@ function doWork() { WScript.Echo("unknown"); } } else { - tmpExec = WshShell.Exec(compilerPath + " -dumpversion"); + tmpExec = WshShell.Exec(compilerPath + " -dumpfullversion -dumpversion"); var gccVersion = tmpExec.StdOut.ReadLine(); if (WScript.Arguments(0) == "/runtime") { WScript.Echo("mingw" + gccVersion); } else if (WScript.Arguments(0) == "/minversion") { - // Comparing strings, not numbers; will not work for two-digit versions - if (gccVersion >= WScript.Arguments(2)) { - WScript.Echo("ok"); - } else { - WScript.Echo("fail"); + for (var i = 0; i < 3; i++) { + v1 = parseInt(gccVersion.split('.')[i]); + v2 = parseInt(WScript.Arguments(2).split('.')[i]); + + if (v1 > v2) { + break; + } else if (v1 < v2) { + WScript.Echo("fail"); + return; + } } + WScript.Echo("ok"); } } return; @@ -125,6 +127,8 @@ function doWork() { } else if (mapContext.match(vc140)) { if (WshShell.ExpandEnvironmentStrings("%VisualStudioVersion%") == "15.0") WScript.Echo("vc14.1"); + else if (WshShell.ExpandEnvironmentStrings("%VisualStudioVersion%") == "16.0") + WScript.Echo("vc14.2"); else WScript.Echo("vc14"); } else { diff --git a/src/tbb/build/generate_tbbvars.bat b/src/tbb/build/generate_tbbvars.bat index 8925f577..12173280 100644 --- a/src/tbb/build/generate_tbbvars.bat +++ b/src/tbb/build/generate_tbbvars.bat @@ -1,6 +1,6 @@ @echo off REM -REM Copyright (c) 2005-2017 Intel Corporation +REM Copyright (c) 2005-2020 Intel Corporation REM REM Licensed under the Apache License, Version 2.0 (the "License"); REM you may not use this file except in compliance with the License. @@ -14,10 +14,6 @@ REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. REM See the License for the specific language governing permissions and REM limitations under the License. REM -REM -REM -REM -REM setlocal for %%D in ("%tbb_root%") do set actual_root=%%~fD set fslash_root=%actual_root:\=/% diff --git a/src/tbb/build/generate_tbbvars.sh b/src/tbb/build/generate_tbbvars.sh index 0ca0965a..4106ed3f 100644 --- a/src/tbb/build/generate_tbbvars.sh +++ b/src/tbb/build/generate_tbbvars.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 2005-2017 Intel Corporation +# Copyright (c) 2005-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,10 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# -# -# # Script used to generate tbbvars.[c]sh scripts bin_dir="$PWD" # diff --git a/src/tbb/build/index.html b/src/tbb/build/index.html index 4d0ac7c8..0aeafa73 100644 --- a/src/tbb/build/index.html +++ b/src/tbb/build/index.html @@ -18,6 +18,9 @@

Files

Main Makefile to build the Intel TBB scalable memory allocator library as well as its tests. Invoked via 'make tbbmalloc' from top-level Makefile. +
Makefile.tbbbind +
Main Makefile to build the tbbbind library. + Invoked via 'make tbbbind' from top-level Makefile
Makefile.test
Main Makefile to build and run the tests for the Intel TBB library. Invoked via 'make test' from top-level Makefile. @@ -73,7 +76,7 @@

Software prerequisites:

  • Explicitly specify the architecture when invoking GNU make, e.g. make arch=ia32. -

    The default make target will build the release and debug versions of the Intel TBB library.

    +

    The default make target will build the release version of the Intel TBB library.

    Other targets are available in the top-level Makefile. You might find the following targets useful:

  • Makefile.tbbmalloc