partitioner.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. Copyright (c) 2005-2022 Intel Corporation
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. #ifndef __TBB_partitioner_H
  14. #define __TBB_partitioner_H
  15. #ifndef __TBB_INITIAL_CHUNKS
  16. // initial task divisions per thread
  17. #define __TBB_INITIAL_CHUNKS 2
  18. #endif
  19. #ifndef __TBB_RANGE_POOL_CAPACITY
  20. // maximum number of elements in range pool
  21. #define __TBB_RANGE_POOL_CAPACITY 8
  22. #endif
  23. #ifndef __TBB_INIT_DEPTH
  24. // initial value for depth of range pool
  25. #define __TBB_INIT_DEPTH 5
  26. #endif
  27. #ifndef __TBB_DEMAND_DEPTH_ADD
  28. // when imbalance is found range splits this value times more
  29. #define __TBB_DEMAND_DEPTH_ADD 1
  30. #endif
  31. #include "detail/_config.h"
  32. #include "detail/_namespace_injection.h"
  33. #include "detail/_aligned_space.h"
  34. #include "detail/_utils.h"
  35. #include "detail/_template_helpers.h"
  36. #include "detail/_range_common.h"
  37. #include "detail/_task.h"
  38. #include "detail/_small_object_pool.h"
  39. #include "cache_aligned_allocator.h"
  40. #include "task_group.h" // task_group_context
  41. #include "task_arena.h"
  42. #include <algorithm>
  43. #include <atomic>
  44. #include <type_traits>
  45. #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
  46. // Workaround for overzealous compiler warnings
  47. #pragma warning (push)
  48. #pragma warning (disable: 4244)
  49. #endif
  50. namespace tbb {
  51. namespace detail {
  52. namespace d1 {
  53. class auto_partitioner;
  54. class simple_partitioner;
  55. class static_partitioner;
  56. class affinity_partitioner;
  57. class affinity_partition_type;
  58. class affinity_partitioner_base;
  59. inline std::size_t get_initial_auto_partitioner_divisor() {
  60. const std::size_t factor = 4;
  61. return factor * max_concurrency();
  62. }
  63. //! Defines entry point for affinity partitioner into oneTBB run-time library.
  64. class affinity_partitioner_base: no_copy {
  65. friend class affinity_partitioner;
  66. friend class affinity_partition_type;
  67. //! Array that remembers affinities of tree positions to affinity_id.
  68. /** nullptr if my_size==0. */
  69. slot_id* my_array;
  70. //! Number of elements in my_array.
  71. std::size_t my_size;
  72. //! Zeros the fields.
  73. affinity_partitioner_base() : my_array(nullptr), my_size(0) {}
  74. //! Deallocates my_array.
  75. ~affinity_partitioner_base() { resize(0); }
  76. //! Resize my_array.
  77. /** Retains values if resulting size is the same. */
  78. void resize(unsigned factor) {
  79. // Check factor to avoid asking for number of workers while there might be no arena.
  80. unsigned max_threads_in_arena = max_concurrency();
  81. std::size_t new_size = factor ? factor * max_threads_in_arena : 0;
  82. if (new_size != my_size) {
  83. if (my_array) {
  84. r1::cache_aligned_deallocate(my_array);
  85. // Following two assignments must be done here for sake of exception safety.
  86. my_array = nullptr;
  87. my_size = 0;
  88. }
  89. if (new_size) {
  90. my_array = static_cast<slot_id*>(r1::cache_aligned_allocate(new_size * sizeof(slot_id)));
  91. std::fill_n(my_array, new_size, no_slot);
  92. my_size = new_size;
  93. }
  94. }
  95. }
  96. };
  97. template<typename Range, typename Body, typename Partitioner> struct start_for;
  98. template<typename Range, typename Body, typename Partitioner> struct start_scan;
  99. template<typename Range, typename Body, typename Partitioner> struct start_reduce;
  100. template<typename Range, typename Body, typename Partitioner> struct start_deterministic_reduce;
  101. struct node {
  102. node* my_parent{};
  103. std::atomic<int> m_ref_count{};
  104. node() = default;
  105. node(node* parent, int ref_count) :
  106. my_parent{parent}, m_ref_count{ref_count} {
  107. __TBB_ASSERT(ref_count > 0, "The ref count must be positive");
  108. }
  109. };
  110. struct wait_node : node {
  111. wait_node() : node{ nullptr, 1 } {}
  112. wait_context m_wait{1};
  113. };
  114. //! Join task node that contains shared flag for stealing feedback
  115. struct tree_node : public node {
  116. small_object_allocator m_allocator;
  117. std::atomic<bool> m_child_stolen{false};
  118. tree_node(node* parent, int ref_count, small_object_allocator& alloc)
  119. : node{parent, ref_count}
  120. , m_allocator{alloc} {}
  121. void join(task_group_context*) {/*dummy, required only for reduction algorithms*/};
  122. template <typename Task>
  123. static void mark_task_stolen(Task &t) {
  124. std::atomic<bool> &flag = static_cast<tree_node*>(t.my_parent)->m_child_stolen;
  125. #if TBB_USE_PROFILING_TOOLS
  126. // Threading tools respect lock prefix but report false-positive data-race via plain store
  127. flag.exchange(true);
  128. #else
  129. flag.store(true, std::memory_order_relaxed);
  130. #endif // TBB_USE_PROFILING_TOOLS
  131. }
  132. template <typename Task>
  133. static bool is_peer_stolen(Task &t) {
  134. return static_cast<tree_node*>(t.my_parent)->m_child_stolen.load(std::memory_order_relaxed);
  135. }
  136. };
  137. // Context used to check cancellation state during reduction join process
  138. template<typename TreeNodeType>
  139. void fold_tree(node* n, const execution_data& ed) {
  140. for (;;) {
  141. __TBB_ASSERT(n->m_ref_count.load(std::memory_order_relaxed) > 0, "The refcount must be positive.");
  142. call_itt_task_notify(releasing, n);
  143. if (--n->m_ref_count > 0) {
  144. return;
  145. }
  146. node* parent = n->my_parent;
  147. if (!parent) {
  148. break;
  149. };
  150. call_itt_task_notify(acquired, n);
  151. TreeNodeType* self = static_cast<TreeNodeType*>(n);
  152. self->join(ed.context);
  153. self->m_allocator.delete_object(self, ed);
  154. n = parent;
  155. }
  156. // Finish parallel for execution when the root (last node) is reached
  157. static_cast<wait_node*>(n)->m_wait.release();
  158. }
  159. //! Depth is a relative depth of recursive division inside a range pool. Relative depth allows
  160. //! infinite absolute depth of the recursion for heavily unbalanced workloads with range represented
  161. //! by a number that cannot fit into machine word.
  162. typedef unsigned char depth_t;
  163. //! Range pool stores ranges of type T in a circular buffer with MaxCapacity
  164. template <typename T, depth_t MaxCapacity>
  165. class range_vector {
  166. depth_t my_head;
  167. depth_t my_tail;
  168. depth_t my_size;
  169. depth_t my_depth[MaxCapacity]; // relative depths of stored ranges
  170. tbb::detail::aligned_space<T, MaxCapacity> my_pool;
  171. public:
  172. //! initialize via first range in pool
  173. range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) {
  174. my_depth[0] = 0;
  175. new( static_cast<void *>(my_pool.begin()) ) T(elem);//TODO: std::move?
  176. }
  177. ~range_vector() {
  178. while( !empty() ) pop_back();
  179. }
  180. bool empty() const { return my_size == 0; }
  181. depth_t size() const { return my_size; }
  182. //! Populates range pool via ranges up to max depth or while divisible
  183. //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up to two 1/4 pieces
  184. void split_to_fill(depth_t max_depth) {
  185. while( my_size < MaxCapacity && is_divisible(max_depth) ) {
  186. depth_t prev = my_head;
  187. my_head = (my_head + 1) % MaxCapacity;
  188. new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move?
  189. my_pool.begin()[prev].~T(); // instead of assignment
  190. new(my_pool.begin()+prev) T(my_pool.begin()[my_head], detail::split()); // do 'inverse' split
  191. my_depth[my_head] = ++my_depth[prev];
  192. my_size++;
  193. }
  194. }
  195. void pop_back() {
  196. __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size");
  197. my_pool.begin()[my_head].~T();
  198. my_size--;
  199. my_head = (my_head + MaxCapacity - 1) % MaxCapacity;
  200. }
  201. void pop_front() {
  202. __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size");
  203. my_pool.begin()[my_tail].~T();
  204. my_size--;
  205. my_tail = (my_tail + 1) % MaxCapacity;
  206. }
  207. T& back() {
  208. __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size");
  209. return my_pool.begin()[my_head];
  210. }
  211. T& front() {
  212. __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size");
  213. return my_pool.begin()[my_tail];
  214. }
  215. //! similarly to front(), returns depth of the first range in the pool
  216. depth_t front_depth() {
  217. __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size");
  218. return my_depth[my_tail];
  219. }
  220. depth_t back_depth() {
  221. __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size");
  222. return my_depth[my_head];
  223. }
  224. bool is_divisible(depth_t max_depth) {
  225. return back_depth() < max_depth && back().is_divisible();
  226. }
  227. };
  228. //! Provides default methods for partition objects and common algorithm blocks.
  229. template <typename Partition>
  230. struct partition_type_base {
  231. typedef detail::split split_type;
  232. // decision makers
  233. void note_affinity( slot_id ) {}
  234. template <typename Task>
  235. bool check_being_stolen(Task&, const execution_data&) { return false; } // part of old should_execute_range()
  236. template <typename Range> split_type get_split() { return split(); }
  237. Partition& self() { return *static_cast<Partition*>(this); } // CRTP helper
  238. template<typename StartType, typename Range>
  239. void work_balance(StartType &start, Range &range, const execution_data&) {
  240. start.run_body( range ); // static partitioner goes here
  241. }
  242. template<typename StartType, typename Range>
  243. void execute(StartType &start, Range &range, execution_data& ed) {
  244. // The algorithm in a few words ([]-denotes calls to decision methods of partitioner):
  245. // [If this task is stolen, adjust depth and divisions if necessary, set flag].
  246. // If range is divisible {
  247. // Spread the work while [initial divisions left];
  248. // Create trap task [if necessary];
  249. // }
  250. // If not divisible or [max depth is reached], execute, else do the range pool part
  251. if ( range.is_divisible() ) {
  252. if ( self().is_divisible() ) {
  253. do { // split until is divisible
  254. typename Partition::split_type split_obj = self().template get_split<Range>();
  255. start.offer_work( split_obj, ed );
  256. } while ( range.is_divisible() && self().is_divisible() );
  257. }
  258. }
  259. self().work_balance(start, range, ed);
  260. }
  261. };
  262. //! Provides default splitting strategy for partition objects.
  263. template <typename Partition>
  264. struct adaptive_mode : partition_type_base<Partition> {
  265. typedef Partition my_partition;
  266. std::size_t my_divisor;
  267. // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves.
  268. // A task which has only one index must produce the right split without reserved index in order to avoid
  269. // it to be overwritten in note_affinity() of the created (right) task.
  270. // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order)
  271. static const unsigned factor = 1;
  272. adaptive_mode() : my_divisor(get_initial_auto_partitioner_divisor() / 4 * my_partition::factor) {}
  273. adaptive_mode(adaptive_mode &src, split) : my_divisor(do_split(src, split())) {}
  274. adaptive_mode(adaptive_mode&, const proportional_split&) : my_divisor(0)
  275. {
  276. // left blank as my_divisor gets overridden in the successors' constructors
  277. }
  278. /*! Override do_split methods in order to specify splitting strategy */
  279. std::size_t do_split(adaptive_mode &src, split) {
  280. return src.my_divisor /= 2u;
  281. }
  282. };
  283. //! Provides proportional splitting strategy for partition objects
  284. template <typename Partition>
  285. struct proportional_mode : adaptive_mode<Partition> {
  286. typedef Partition my_partition;
  287. using partition_type_base<Partition>::self; // CRTP helper to get access to derived classes
  288. proportional_mode() : adaptive_mode<Partition>() {}
  289. proportional_mode(proportional_mode &src, split) : adaptive_mode<Partition>(src, split()) {}
  290. proportional_mode(proportional_mode &src, const proportional_split& split_obj)
  291. : adaptive_mode<Partition>(src, split_obj)
  292. {
  293. self().my_divisor = do_split(src, split_obj);
  294. }
  295. std::size_t do_split(proportional_mode &src, const proportional_split& split_obj) {
  296. std::size_t portion = split_obj.right() * my_partition::factor;
  297. portion = (portion + my_partition::factor/2) & (0ul - my_partition::factor);
  298. src.my_divisor -= portion;
  299. return portion;
  300. }
  301. bool is_divisible() { // part of old should_execute_range()
  302. return self().my_divisor > my_partition::factor;
  303. }
  304. template <typename Range>
  305. proportional_split get_split() {
  306. // Create the proportion from partitioner internal resources (threads) that would be used:
  307. // - into proportional_mode constructor to split the partitioner
  308. // - if Range supports the proportional_split constructor it would use proposed proportion,
  309. // otherwise, the tbb::proportional_split object will be implicitly (for Range implementor)
  310. // casted to tbb::split
  311. std::size_t n = self().my_divisor / my_partition::factor;
  312. std::size_t right = n / 2;
  313. std::size_t left = n - right;
  314. return proportional_split(left, right);
  315. }
  316. };
  317. static std::size_t get_initial_partition_head() {
  318. int current_index = tbb::this_task_arena::current_thread_index();
  319. if (current_index == tbb::task_arena::not_initialized)
  320. current_index = 0;
  321. return size_t(current_index);
  322. }
  323. //! Provides default linear indexing of partitioner's sequence
  324. template <typename Partition>
  325. struct linear_affinity_mode : proportional_mode<Partition> {
  326. std::size_t my_head;
  327. std::size_t my_max_affinity;
  328. using proportional_mode<Partition>::self;
  329. linear_affinity_mode() : proportional_mode<Partition>(), my_head(get_initial_partition_head()),
  330. my_max_affinity(self().my_divisor) {}
  331. linear_affinity_mode(linear_affinity_mode &src, split) : proportional_mode<Partition>(src, split())
  332. , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {}
  333. linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : proportional_mode<Partition>(src, split_obj)
  334. , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {}
  335. void spawn_task(task& t, task_group_context& ctx) {
  336. if (self().my_divisor) {
  337. spawn(t, ctx, slot_id(my_head));
  338. } else {
  339. spawn(t, ctx);
  340. }
  341. }
  342. };
  343. static bool is_stolen_task(const execution_data& ed) {
  344. return execution_slot(ed) != original_slot(ed);
  345. }
  346. /*! Determine work-balance phase implementing splitting & stealing actions */
  347. template<class Mode>
  348. struct dynamic_grainsize_mode : Mode {
  349. using Mode::self;
  350. enum {
  351. begin = 0,
  352. run,
  353. pass
  354. } my_delay;
  355. depth_t my_max_depth;
  356. static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY;
  357. dynamic_grainsize_mode(): Mode()
  358. , my_delay(begin)
  359. , my_max_depth(__TBB_INIT_DEPTH) {}
  360. dynamic_grainsize_mode(dynamic_grainsize_mode& p, split)
  361. : Mode(p, split())
  362. , my_delay(pass)
  363. , my_max_depth(p.my_max_depth) {}
  364. dynamic_grainsize_mode(dynamic_grainsize_mode& p, const proportional_split& split_obj)
  365. : Mode(p, split_obj)
  366. , my_delay(begin)
  367. , my_max_depth(p.my_max_depth) {}
  368. template <typename Task>
  369. bool check_being_stolen(Task &t, const execution_data& ed) { // part of old should_execute_range()
  370. if( !(self().my_divisor / Mode::my_partition::factor) ) { // if not from the top P tasks of binary tree
  371. self().my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)?
  372. if( is_stolen_task(ed) && t.my_parent->m_ref_count >= 2 ) { // runs concurrently with the left task
  373. #if __TBB_USE_OPTIONAL_RTTI
  374. // RTTI is available, check whether the cast is valid
  375. // TODO: TBB_REVAMP_TODO __TBB_ASSERT(dynamic_cast<tree_node*>(t.m_parent), 0);
  376. // correctness of the cast relies on avoiding the root task for which:
  377. // - initial value of my_divisor != 0 (protected by separate assertion)
  378. // - is_stolen_task() always returns false for the root task.
  379. #endif
  380. tree_node::mark_task_stolen(t);
  381. if( !my_max_depth ) my_max_depth++;
  382. my_max_depth += __TBB_DEMAND_DEPTH_ADD;
  383. return true;
  384. }
  385. }
  386. return false;
  387. }
  388. depth_t max_depth() { return my_max_depth; }
  389. void align_depth(depth_t base) {
  390. __TBB_ASSERT(base <= my_max_depth, nullptr);
  391. my_max_depth -= base;
  392. }
  393. template<typename StartType, typename Range>
  394. void work_balance(StartType &start, Range &range, execution_data& ed) {
  395. if( !range.is_divisible() || !self().max_depth() ) {
  396. start.run_body( range );
  397. }
  398. else { // do range pool
  399. range_vector<Range, range_pool_size> range_pool(range);
  400. do {
  401. range_pool.split_to_fill(self().max_depth()); // fill range pool
  402. if( self().check_for_demand( start ) ) {
  403. if( range_pool.size() > 1 ) {
  404. start.offer_work( range_pool.front(), range_pool.front_depth(), ed );
  405. range_pool.pop_front();
  406. continue;
  407. }
  408. if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task
  409. continue; // note: next split_to_fill() should split range at least once
  410. }
  411. start.run_body( range_pool.back() );
  412. range_pool.pop_back();
  413. } while( !range_pool.empty() && !ed.context->is_group_execution_cancelled() );
  414. }
  415. }
  416. template <typename Task>
  417. bool check_for_demand(Task& t) {
  418. if ( pass == my_delay ) {
  419. if ( self().my_divisor > 1 ) // produce affinitized tasks while they have slot in array
  420. return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more
  421. else if ( self().my_divisor && my_max_depth ) { // make balancing task
  422. self().my_divisor = 0; // once for each task; depth will be decreased in align_depth()
  423. return true;
  424. }
  425. else if ( tree_node::is_peer_stolen(t) ) {
  426. my_max_depth += __TBB_DEMAND_DEPTH_ADD;
  427. return true;
  428. }
  429. } else if( begin == my_delay ) {
  430. my_delay = pass;
  431. }
  432. return false;
  433. }
  434. };
  435. class auto_partition_type: public dynamic_grainsize_mode<adaptive_mode<auto_partition_type> > {
  436. public:
  437. auto_partition_type( const auto_partitioner& ) {
  438. my_divisor *= __TBB_INITIAL_CHUNKS;
  439. }
  440. auto_partition_type( auto_partition_type& src, split)
  441. : dynamic_grainsize_mode<adaptive_mode<auto_partition_type> >(src, split()) {}
  442. bool is_divisible() { // part of old should_execute_range()
  443. if( my_divisor > 1 ) return true;
  444. if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead
  445. // keep same fragmentation while splitting for the local task pool
  446. my_max_depth--;
  447. my_divisor = 0; // decrease max_depth once per task
  448. return true;
  449. } else return false;
  450. }
  451. template <typename Task>
  452. bool check_for_demand(Task& t) {
  453. if (tree_node::is_peer_stolen(t)) {
  454. my_max_depth += __TBB_DEMAND_DEPTH_ADD;
  455. return true;
  456. } else return false;
  457. }
  458. void spawn_task(task& t, task_group_context& ctx) {
  459. spawn(t, ctx);
  460. }
  461. };
  462. class simple_partition_type: public partition_type_base<simple_partition_type> {
  463. public:
  464. simple_partition_type( const simple_partitioner& ) {}
  465. simple_partition_type( const simple_partition_type&, split ) {}
  466. //! simplified algorithm
  467. template<typename StartType, typename Range>
  468. void execute(StartType &start, Range &range, execution_data& ed) {
  469. split_type split_obj = split(); // start.offer_work accepts split_type as reference
  470. while( range.is_divisible() )
  471. start.offer_work( split_obj, ed );
  472. start.run_body( range );
  473. }
  474. void spawn_task(task& t, task_group_context& ctx) {
  475. spawn(t, ctx);
  476. }
  477. };
  478. class static_partition_type : public linear_affinity_mode<static_partition_type> {
  479. public:
  480. typedef detail::proportional_split split_type;
  481. static_partition_type( const static_partitioner& ) {}
  482. static_partition_type( static_partition_type& p, const proportional_split& split_obj )
  483. : linear_affinity_mode<static_partition_type>(p, split_obj) {}
  484. };
  485. class affinity_partition_type : public dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> > {
  486. static const unsigned factor_power = 4; // TODO: get a unified formula based on number of computing units
  487. slot_id* my_array;
  488. public:
  489. static const unsigned factor = 1 << factor_power; // number of slots in affinity array per task
  490. typedef detail::proportional_split split_type;
  491. affinity_partition_type( affinity_partitioner_base& ap ) {
  492. __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" );
  493. ap.resize(factor);
  494. my_array = ap.my_array;
  495. my_max_depth = factor_power + 1;
  496. __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, nullptr );
  497. }
  498. affinity_partition_type(affinity_partition_type& p, split)
  499. : dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> >(p, split())
  500. , my_array(p.my_array) {}
  501. affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj)
  502. : dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> >(p, split_obj)
  503. , my_array(p.my_array) {}
  504. void note_affinity(slot_id id) {
  505. if( my_divisor )
  506. my_array[my_head] = id;
  507. }
  508. void spawn_task(task& t, task_group_context& ctx) {
  509. if (my_divisor) {
  510. if (!my_array[my_head]) {
  511. // TODO: consider new ideas with my_array for both affinity and static partitioner's, then code reuse
  512. spawn(t, ctx, slot_id(my_head / factor));
  513. } else {
  514. spawn(t, ctx, my_array[my_head]);
  515. }
  516. } else {
  517. spawn(t, ctx);
  518. }
  519. }
  520. };
  521. //! A simple partitioner
  522. /** Divides the range until the range is not divisible.
  523. @ingroup algorithms */
  524. class simple_partitioner {
  525. public:
  526. simple_partitioner() {}
  527. private:
  528. template<typename Range, typename Body, typename Partitioner> friend struct start_for;
  529. template<typename Range, typename Body, typename Partitioner> friend struct start_reduce;
  530. template<typename Range, typename Body, typename Partitioner> friend struct start_deterministic_reduce;
  531. template<typename Range, typename Body, typename Partitioner> friend struct start_scan;
  532. // new implementation just extends existing interface
  533. typedef simple_partition_type task_partition_type;
  534. // TODO: consider to make split_type public
  535. typedef simple_partition_type::split_type split_type;
  536. // for parallel_scan only
  537. class partition_type {
  538. public:
  539. bool should_execute_range(const execution_data& ) {return false;}
  540. partition_type( const simple_partitioner& ) {}
  541. partition_type( const partition_type&, split ) {}
  542. };
  543. };
  544. //! An auto partitioner
  545. /** The range is initial divided into several large chunks.
  546. Chunks are further subdivided into smaller pieces if demand detected and they are divisible.
  547. @ingroup algorithms */
  548. class auto_partitioner {
  549. public:
  550. auto_partitioner() {}
  551. private:
  552. template<typename Range, typename Body, typename Partitioner> friend struct start_for;
  553. template<typename Range, typename Body, typename Partitioner> friend struct start_reduce;
  554. template<typename Range, typename Body, typename Partitioner> friend struct start_deterministic_reduce;
  555. template<typename Range, typename Body, typename Partitioner> friend struct start_scan;
  556. // new implementation just extends existing interface
  557. typedef auto_partition_type task_partition_type;
  558. // TODO: consider to make split_type public
  559. typedef auto_partition_type::split_type split_type;
  560. //! Backward-compatible partition for auto and affinity partition objects.
  561. class partition_type {
  562. size_t num_chunks;
  563. static const size_t VICTIM_CHUNKS = 4;
  564. public:
  565. bool should_execute_range(const execution_data& ed) {
  566. if( num_chunks<VICTIM_CHUNKS && is_stolen_task(ed) )
  567. num_chunks = VICTIM_CHUNKS;
  568. return num_chunks==1;
  569. }
  570. partition_type( const auto_partitioner& )
  571. : num_chunks(get_initial_auto_partitioner_divisor()*__TBB_INITIAL_CHUNKS/4) {}
  572. partition_type( partition_type& pt, split ) {
  573. num_chunks = pt.num_chunks = (pt.num_chunks+1u) / 2u;
  574. }
  575. };
  576. };
  577. //! A static partitioner
  578. class static_partitioner {
  579. public:
  580. static_partitioner() {}
  581. private:
  582. template<typename Range, typename Body, typename Partitioner> friend struct start_for;
  583. template<typename Range, typename Body, typename Partitioner> friend struct start_reduce;
  584. template<typename Range, typename Body, typename Partitioner> friend struct start_deterministic_reduce;
  585. template<typename Range, typename Body, typename Partitioner> friend struct start_scan;
  586. // new implementation just extends existing interface
  587. typedef static_partition_type task_partition_type;
  588. // TODO: consider to make split_type public
  589. typedef static_partition_type::split_type split_type;
  590. };
  591. //! An affinity partitioner
  592. class affinity_partitioner : affinity_partitioner_base {
  593. public:
  594. affinity_partitioner() {}
  595. private:
  596. template<typename Range, typename Body, typename Partitioner> friend struct start_for;
  597. template<typename Range, typename Body, typename Partitioner> friend struct start_reduce;
  598. template<typename Range, typename Body, typename Partitioner> friend struct start_deterministic_reduce;
  599. template<typename Range, typename Body, typename Partitioner> friend struct start_scan;
  600. // new implementation just extends existing interface
  601. typedef affinity_partition_type task_partition_type;
  602. // TODO: consider to make split_type public
  603. typedef affinity_partition_type::split_type split_type;
  604. };
  605. } // namespace d1
  606. } // namespace detail
  607. inline namespace v1 {
  608. // Partitioners
  609. using detail::d1::auto_partitioner;
  610. using detail::d1::simple_partitioner;
  611. using detail::d1::static_partitioner;
  612. using detail::d1::affinity_partitioner;
  613. // Split types
  614. using detail::split;
  615. using detail::proportional_split;
  616. } // namespace v1
  617. } // namespace tbb
  618. #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
  619. #pragma warning (pop)
  620. #endif // warning 4244 is back
  621. #undef __TBB_INITIAL_CHUNKS
  622. #undef __TBB_RANGE_POOL_CAPACITY
  623. #undef __TBB_INIT_DEPTH
  624. #endif /* __TBB_partitioner_H */