2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
32 #include "tbb_stddef.h"
33 #include "tbb_machine.h"
36 typedef struct ___itt_caller *__itt_caller;
43 #if __TBB_TASK_GROUP_CONTEXT
44 class task_group_context;
45 #endif /* __TBB_TASK_GROUP_CONTEXT */
47 // MSVC does not allow taking the address of a member that was defined
48 // privately in task_base and made public in class task via a using declaration.
49 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
50 #define __TBB_TASK_BASE_ACCESS public
52 #define __TBB_TASK_BASE_ACCESS private
57 class allocate_additional_child_of_proxy: no_assign {
58 //! No longer used, but retained for binary layout compatibility. Always NULL.
62 explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
63 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
64 void __TBB_EXPORTED_METHOD free( task& ) const;
69 namespace interface5 {
71 //! Base class for methods that became static in TBB 3.0.
72 /** TBB's evolution caused the "this" argument for several methods to become obsolete.
73 However, for backwards binary compatibility, the new methods need distinct names,
74 otherwise the One Definition Rule would be broken. Hence the new methods are
75 defined in this private base class, and then exposed in class task via
76 using declarations. */
77 class task_base: tbb::internal::no_copy {
78 __TBB_TASK_BASE_ACCESS:
79 friend class tbb::task;
81 //! Schedule task for execution when a worker becomes available.
82 static void spawn( task& t );
84 //! Spawn multiple tasks and clear list.
85 static void spawn( task_list& list );
87 //! Like allocate_child, except that task's parent becomes "t", not this.
88 /** Typically used in conjunction with schedule_to_reexecute to implement while loops.
89 Atomically increments the reference count of t.parent() */
90 static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
91 return tbb::internal::allocate_additional_child_of_proxy(t);
95 /** Usually, calling this method is unnecessary, because a task is
96 implicitly deleted after its execute() method runs. However,
97 sometimes a task needs to be explicitly deallocated, such as
98 when a root task is used as the parent in spawn_and_wait_for_all. */
99 static void __TBB_EXPORTED_FUNC destroy( task& victim );
107 class scheduler: no_copy {
109 //! For internal use only
110 virtual void spawn( task& first, task*& next ) = 0;
112 //! For internal use only
113 virtual void wait_for_all( task& parent, task* child ) = 0;
115 //! For internal use only
116 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
118 //! Pure virtual destructor;
119 // Have to have it just to shut up overzealous compilation warnings
120 virtual ~scheduler() = 0;
122 //! For internal use only
123 virtual void enqueue( task& t, void* reserved ) = 0;
126 //! A reference count
127 /** Should always be non-negative. A signed type is used so that underflow can be detected. */
128 typedef intptr_t reference_count;
130 //! An id as used for specifying affinity.
131 typedef unsigned short affinity_id;
133 #if __TBB_TASK_GROUP_CONTEXT
134 class generic_scheduler;
136 struct context_list_node_t {
137 context_list_node_t *my_prev,
141 class allocate_root_with_context_proxy: no_assign {
142 task_group_context& my_context;
144 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
145 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
146 void __TBB_EXPORTED_METHOD free( task& ) const;
148 #endif /* __TBB_TASK_GROUP_CONTEXT */
150 class allocate_root_proxy: no_assign {
152 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
153 static void __TBB_EXPORTED_FUNC free( task& );
156 class allocate_continuation_proxy: no_assign {
158 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
159 void __TBB_EXPORTED_METHOD free( task& ) const;
162 class allocate_child_proxy: no_assign {
164 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
165 void __TBB_EXPORTED_METHOD free( task& ) const;
168 //! Memory prefix to a task object.
169 /** This class is internal to the library.
170 Do not reference it directly, except within the library itself.
171 Fields are ordered in way that preserves backwards compatibility and yields
172 good packing on typical 32-bit and 64-bit platforms.
174 In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64
175 architectures correspondingly, consider dynamic setting of task_alignment
176 and task_prefix_reservation_size based on the maximal operand size supported
179 @ingroup task_scheduling */
182 friend class tbb::task;
183 friend class tbb::interface5::internal::task_base;
184 friend class tbb::task_list;
185 friend class internal::scheduler;
186 friend class internal::allocate_root_proxy;
187 friend class internal::allocate_child_proxy;
188 friend class internal::allocate_continuation_proxy;
189 friend class internal::allocate_additional_child_of_proxy;
191 #if __TBB_TASK_GROUP_CONTEXT
192 //! Shared context that is used to communicate asynchronous state changes
193 /** Currently it is used to broadcast cancellation requests generated both
194 by users and as the result of unhandled exceptions in the task::execute()
196 task_group_context *context;
197 #endif /* __TBB_TASK_GROUP_CONTEXT */
199 //! The scheduler that allocated the task, or NULL if the task is big.
200 /** Small tasks are pooled by the scheduler that allocated the task.
201 If a scheduler needs to free a small task allocated by another scheduler,
202 it returns the task to that other scheduler. This policy avoids
203 memory space blowup issues for memory allocators that allocate from
204 thread-specific pools. */
207 #if __TBB_TASK_PRIORITY
209 #endif /* __TBB_TASK_PRIORITY */
210 //! Obsolete. The scheduler that owns the task.
211 /** Retained only for the sake of backward binary compatibility.
212 Still used by inline methods in the task.h header. **/
215 #if __TBB_TASK_PRIORITY
216 //! Pointer to the next offloaded lower priority task.
217 /** Used to maintain a list of offloaded tasks inside the scheduler. **/
218 task* next_offloaded;
220 #endif /* __TBB_TASK_PRIORITY */
222 //! The task whose reference count includes me.
223 /** In the "blocking style" of programming, this field points to the parent task.
224 In the "continuation-passing style" of programming, this field points to the
225 continuation of the parent. */
228 //! Reference count used for synchronization.
229 /** In the "continuation-passing style" of programming, this field is
230 the difference of the number of allocated children minus the
231 number of children that have completed.
232 In the "blocking style" of programming, this field is one more than the difference. */
233 __TBB_atomic reference_count ref_count;
235 //! Obsolete. Used to be scheduling depth before TBB 2.2
236 /** Retained only for the sake of backward binary compatibility.
237 Not used by TBB anymore. **/
240 //! A task::state_type, stored as a byte for compactness.
241 /** This state is exposed to users via method task::state(). */
244 //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
245 /** 0x0 -> version 1.0 task
246 0x1 -> version >=2.1 task
248 0x40 -> task has live ref_count
249 0x80 -> a stolen task */
250 unsigned char extra_state;
252 affinity_id affinity;
254 //! "next" field for list of task
257 //! The task corresponding to this task_prefix.
258 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
261 } // namespace internal
264 #if __TBB_TASK_GROUP_CONTEXT
266 #if __TBB_TASK_PRIORITY
268 static const int priority_stride_v4 = INT_MAX / 4;
272 priority_normal = internal::priority_stride_v4 * 2,
273 priority_low = priority_normal - internal::priority_stride_v4,
274 priority_high = priority_normal + internal::priority_stride_v4
277 #endif /* __TBB_TASK_PRIORITY */
279 #if TBB_USE_CAPTURED_EXCEPTION
283 class tbb_exception_ptr;
285 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
287 class task_scheduler_init;
289 //! Used to form groups of tasks
290 /** @ingroup task_scheduling
291 The context services explicit cancellation requests from user code, and unhandled
292 exceptions intercepted during tasks execution. Intercepting an exception results
293 in generating internal cancellation requests (which is processed in exactly the
294 same way as external ones).
296 The context is associated with one or more root tasks and defines the cancellation
297 group that includes all the descendants of the corresponding root task(s). Association
298 is established when a context object is passed as an argument to the task::allocate_root()
299 method. See task_group_context::task_group_context for more details.
301 The context can be bound to another one, and other contexts can be bound to it,
302 forming a tree-like structure: parent -> this -> children. Arrows here designate
303 cancellation propagation direction. If a task in a cancellation group is canceled
304 all the other tasks in this group and groups bound to it (as children) get canceled too.
307 When adding new members to task_group_context or changing types of existing ones,
308 update the size of both padding buffers (_leading_padding and _trailing_padding)
309 appropriately. See also VERSIONING NOTE at the constructor definition below. **/
310 class task_group_context : internal::no_copy {
312 friend class internal::generic_scheduler;
313 friend class task_scheduler_init;
315 #if TBB_USE_CAPTURED_EXCEPTION
316 typedef tbb_exception exception_container_type;
318 typedef internal::tbb_exception_ptr exception_container_type;
321 enum version_traits_word_layout {
323 version_mask = 0xFFFF,
324 traits_mask = 0xFFFFul << traits_offset
334 exact_exception = 0x0001ul << traits_offset,
335 concurrent_wait = 0x0004ul << traits_offset,
336 #if TBB_USE_CAPTURED_EXCEPTION
339 default_traits = exact_exception
340 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
345 may_have_children = 1
349 //! Flavor of this context: bound or isolated.
351 uintptr_t _my_kind_aligner;
354 //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.
355 task_group_context *my_parent;
357 //! Used to form the thread specific list of contexts without additional memory allocation.
358 /** A context is included into the list of the current thread when its binding to
359 its parent happens. Any context can be present in the list of one thread only. **/
360 internal::context_list_node_t my_node;
362 //! Used to set and maintain stack stitching point for Intel Performance Tools.
363 __itt_caller itt_caller;
365 //! Leading padding protecting accesses to frequently used members from false sharing.
366 /** Read accesses to the field my_cancellation_requested are on the hot path inside
367 the scheduler. This padding ensures that this field never shares the same cache
368 line with a local variable that is frequently written to. **/
369 char _leading_padding[internal::NFS_MaxLineSize
370 - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
371 - sizeof(__itt_caller)];
373 //! Specifies whether cancellation was request for this task group.
374 uintptr_t my_cancellation_requested;
376 //! Version for run-time checks and behavioral traits of the context.
377 /** Version occupies low 16 bits, and traits (zero or more ORed enumerators
378 from the traits_type enumerations) take the next 16 bits.
379 Original (zeroth) version of the context did not support any traits. **/
380 uintptr_t my_version_and_traits;
382 //! Pointer to the container storing exception being propagated across this task group.
383 exception_container_type *my_exception;
385 //! Scheduler instance that registered this context in its thread specific list.
386 internal::generic_scheduler *my_owner;
388 //! Internal state (combination of state flags).
391 #if __TBB_TASK_PRIORITY
392 //! Priority level of the task group (in normalized representation)
393 intptr_t my_priority;
394 #endif /* __TBB_TASK_PRIORITY */
396 //! Trailing padding protecting accesses to frequently used members from false sharing
397 /** \sa _leading_padding **/
398 char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
399 #if __TBB_TASK_PRIORITY
401 #endif /* __TBB_TASK_PRIORITY */
405 //! Default & binding constructor.
406 /** By default a bound context is created. That is this context will be bound
407 (as child) to the context of the task calling task::allocate_root(this_context)
408 method. Cancellation requests passed to the parent context are propagated
409 to all the contexts bound to it. Similarly priority change is propagated
410 from the parent context to its children.
412 If task_group_context::isolated is used as the argument, then the tasks associated
413 with this context will never be affected by events in any other context.
415 Creating isolated contexts involve much less overhead, but they have limited
416 utility. Normally when an exception occurs in an algorithm that has nested
417 ones running, it is desirably to have all the nested algorithms canceled
418 as well. Such a behavior requires nested algorithms to use bound contexts.
420 There is one good place where using isolated algorithms is beneficial. It is
421 a master thread. That is if a particular algorithm is invoked directly from
422 the master thread (not from a TBB task), supplying it with explicitly
423 created isolated context will result in a faster algorithm startup.
426 Implementation(s) of task_group_context constructor(s) cannot be made
427 entirely out-of-line because the run-time version must be set by the user
428 code. This will become critically important for binary compatibility, if
429 we ever have to change the size of the context object.
431 Boosting the runtime version will also be necessary if new data fields are
432 introduced in the currently unused padding areas and these fields are updated
433 by inline methods. **/
434 task_group_context ( kind_type relation_with_parent = bound,
435 uintptr_t traits = default_traits )
436 : my_kind(relation_with_parent)
437 , my_version_and_traits(1 | traits)
442 __TBB_EXPORTED_METHOD ~task_group_context ();
444 //! Forcefully reinitializes the context after the task tree it was associated with is completed.
445 /** Because the method assumes that all the tasks that used to be associated with
446 this context have already finished, calling it while the context is still
447 in use somewhere in the task hierarchy leads to undefined behavior.
449 IMPORTANT: This method is not thread safe!
451 The method does not change the context's parent if it is set. **/
452 void __TBB_EXPORTED_METHOD reset ();
454 //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
455 /** \return false if cancellation has already been requested, true otherwise.
457 Note that canceling never fails. When false is returned, it just means that
458 another thread (or this one) has already sent cancellation request to this
459 context or to one of its ancestors (if this context is bound). It is guaranteed
460 that when this method is concurrently called on the same not yet cancelled
461 context, true will be returned by one and only one invocation. **/
462 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
464 //! Returns true if the context received cancellation request.
465 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
467 //! Records the pending exception, and cancels the task group.
468 /** May be called only from inside a catch-block. If the context is already
469 canceled, does nothing.
470 The method brings the task group associated with this context exactly into
471 the state it would be in, if one of its tasks threw the currently pending
472 exception during its execution. In other words, it emulates the actions
473 of the scheduler's dispatch loop exception handler. **/
474 void __TBB_EXPORTED_METHOD register_pending_exception ();
476 #if __TBB_TASK_PRIORITY
477 //! Changes priority of the task grop
478 void set_priority ( priority_t );
480 //! Retrieves current priority of the current task group
481 priority_t priority () const;
482 #endif /* __TBB_TASK_PRIORITY */
485 //! Out-of-line part of the constructor.
486 /** Singled out to ensure backward binary compatibility of the future versions. **/
487 void __TBB_EXPORTED_METHOD init ();
491 friend class internal::allocate_root_with_context_proxy;
493 static const kind_type binding_required = bound;
494 static const kind_type binding_completed = kind_type(bound+1);
495 static const kind_type detached = kind_type(binding_completed+1);
496 static const kind_type dying = kind_type(detached+1);
498 //! Propagates state change (if any) from an ancestor
499 /** Checks if one of this object's ancestors is in a new state, and propagates
500 the new state to all its descendants in this object's heritage line. **/
501 template <typename T>
502 void propagate_state_from_ancestors ( T task_group_context::*mptr_state, T new_state );
504 //! Makes sure that the context is registered with a scheduler instance.
505 inline void finish_initialization ( internal::generic_scheduler *local_sched );
507 //! Registers this context with the local scheduler and binds it to its parent context
508 void bind_to ( internal::generic_scheduler *local_sched );
510 //! Registers this context with the local scheduler
511 void register_with ( internal::generic_scheduler *local_sched );
513 }; // class task_group_context
515 #endif /* __TBB_TASK_GROUP_CONTEXT */
517 //! Base class for user-defined tasks.
518 /** @ingroup task_scheduling */
519 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
521 //! Set reference count
522 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
524 //! Decrement reference count and return its new value.
525 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
528 //! Default constructor.
529 task() {prefix().extra_state=1;}
535 //! Should be overridden by derived classes.
536 virtual task* execute() = 0;
538 //! Enumeration of task states that the scheduler considers.
540 //! task is running, and will be destroyed after method execute() completes.
542 //! task to be rescheduled.
544 //! task is in ready pool, or is going to be put there, or was just taken off.
546 //! task object is freshly allocated or recycled.
548 //! task object is on free list, or is going to be put there, or was just taken off.
550 //! task to be recycled as continuation
554 //------------------------------------------------------------------------
556 //------------------------------------------------------------------------
558 //! Returns proxy for overloaded new that allocates a root task.
559 static internal::allocate_root_proxy allocate_root() {
560 return internal::allocate_root_proxy();
563 #if __TBB_TASK_GROUP_CONTEXT
564 //! Returns proxy for overloaded new that allocates a root task associated with user supplied context.
565 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
566 return internal::allocate_root_with_context_proxy(ctx);
568 #endif /* __TBB_TASK_GROUP_CONTEXT */
570 //! Returns proxy for overloaded new that allocates a continuation task of *this.
571 /** The continuation's parent becomes the parent of *this. */
572 internal::allocate_continuation_proxy& allocate_continuation() {
573 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
576 //! Returns proxy for overloaded new that allocates a child task of *this.
577 internal::allocate_child_proxy& allocate_child() {
578 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
581 //! Define recommended static form via import from base class.
582 using task_base::allocate_additional_child_of;
584 #if __TBB_DEPRECATED_TASK_INTERFACE
586 /** Usually, calling this method is unnecessary, because a task is
587 implicitly deleted after its execute() method runs. However,
588 sometimes a task needs to be explicitly deallocated, such as
589 when a root task is used as the parent in spawn_and_wait_for_all. */
590 void __TBB_EXPORTED_METHOD destroy( task& t );
591 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
592 //! Define recommended static form via import from base class.
593 using task_base::destroy;
594 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
596 //------------------------------------------------------------------------
597 // Recycling of tasks
598 //------------------------------------------------------------------------
600 //! Change this to be a continuation of its former self.
601 /** The caller must guarantee that the task's refcount does not become zero until
602 after the method execute() returns. Typically, this is done by having
603 method execute() return a pointer to a child of the task. If the guarantee
604 cannot be made, use method recycle_as_safe_continuation instead.
606 Because of the hazard, this method may be deprecated in the future. */
607 void recycle_as_continuation() {
608 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
609 prefix().state = allocated;
612 //! Recommended to use, safe variant of recycle_as_continuation
613 /** For safety, it requires additional increment of ref_count.
614 With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */
615 void recycle_as_safe_continuation() {
616 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
617 prefix().state = recycle;
620 //! Change this to be a child of new_parent.
621 void recycle_as_child_of( task& new_parent ) {
622 internal::task_prefix& p = prefix();
623 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
624 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
625 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
626 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
627 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
629 p.parent = &new_parent;
630 #if __TBB_TASK_GROUP_CONTEXT
631 p.context = new_parent.prefix().context;
632 #endif /* __TBB_TASK_GROUP_CONTEXT */
635 //! Schedule this for reexecution after current execute() returns.
636 /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */
637 void recycle_to_reexecute() {
638 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
639 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
640 prefix().state = reexecute;
643 // All depth-related methods are obsolete, and are retained for the sake
644 // of backward source compatibility only
645 intptr_t depth() const {return 0;}
646 void set_depth( intptr_t ) {}
647 void add_to_depth( int ) {}
650 //------------------------------------------------------------------------
651 // Spawning and blocking
652 //------------------------------------------------------------------------
654 //! Set reference count
655 void set_ref_count( int count ) {
656 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
657 internal_set_ref_count(count);
659 prefix().ref_count = count;
660 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
663 //! Atomically increment reference count and returns its old value.
664 /** Has acquire semantics */
665 void increment_ref_count() {
666 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
669 //! Atomically decrement reference count and returns its new value.
670 /** Has release semantics. */
671 int decrement_ref_count() {
672 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
673 return int(internal_decrement_ref_count());
675 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
676 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
679 //! Define recommended static forms via import from base class.
680 using task_base::spawn;
682 //! Similar to spawn followed by wait_for_all, but more efficient.
683 void spawn_and_wait_for_all( task& child ) {
684 prefix().owner->wait_for_all( *this, &child );
687 //! Similar to spawn followed by wait_for_all, but more efficient.
688 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
690 //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
691 static void spawn_root_and_wait( task& root ) {
692 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
695 //! Spawn root tasks on list and wait for all of them to finish.
696 /** If there are more tasks than worker threads, the tasks are spawned in
697 order of front to back. */
698 static void spawn_root_and_wait( task_list& root_list );
700 //! Wait for reference count to become one, and set reference count to zero.
701 /** Works on tasks while waiting. */
702 void wait_for_all() {
703 prefix().owner->wait_for_all( *this, NULL );
706 //! Enqueue task for starvation-resistant execution.
707 #if __TBB_TASK_PRIORITY
708 /** The task will be enqueued on the normal priority level disregarding the
709 priority of its task group.
711 The rationale of such semantics is that priority of an enqueued task is
712 statically fixed at the moment of its enqueuing, while task group priority
713 is dynamic. Thus automatic priority inheritance would be generally a subject
714 to the race, which may result in unexpected behavior.
716 Use enqueue() overload with explicit priority value and task::group_priority()
717 method to implement such priority inheritance when it is really necessary. **/
718 #endif /* __TBB_TASK_PRIORITY */
719 static void enqueue( task& t ) {
720 t.prefix().owner->enqueue( t, NULL );
723 #if __TBB_TASK_PRIORITY
724 //! Enqueue task for starvation-resistant execution on the specified priority level.
725 static void enqueue( task& t, priority_t p ) {
726 __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" );
727 t.prefix().owner->enqueue( t, (void*)p );
729 #endif /* __TBB_TASK_PRIORITY */
731 //! The innermost task being executed or destroyed by the current thread at the moment.
732 static task& __TBB_EXPORTED_FUNC self();
734 //! task on whose behalf this task is working, or NULL if this is a root.
735 task* parent() const {return prefix().parent;}
737 //! sets parent task pointer to specified value
738 void set_parent(task* p) {
739 #if __TBB_TASK_GROUP_CONTEXT
740 __TBB_ASSERT(prefix().context == p->prefix().context, "The tasks must be in the same context");
745 #if __TBB_TASK_GROUP_CONTEXT
746 //! This method is deprecated and will be removed in the future.
747 /** Use method group() instead. **/
748 task_group_context* context() {return prefix().context;}
750 //! Pointer to the task group descriptor.
751 task_group_context* group () { return prefix().context; }
752 #endif /* __TBB_TASK_GROUP_CONTEXT */
754 //! True if task was stolen from the task pool of another thread.
755 bool is_stolen_task() const {
756 return (prefix().extra_state & 0x80)!=0;
759 //------------------------------------------------------------------------
761 //------------------------------------------------------------------------
763 //! Current execution state
764 state_type state() const {return state_type(prefix().state);}
766 //! The internal reference count.
767 int ref_count() const {
769 internal::reference_count ref_count_ = prefix().ref_count;
770 __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
772 return int(prefix().ref_count);
775 //! Obsolete, and only retained for the sake of backward compatibility. Always returns true.
776 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
778 //------------------------------------------------------------------------
780 //------------------------------------------------------------------------
782 //! An id as used for specifying affinity.
783 /** Guaranteed to be integral type. Value of 0 means no affinity. */
784 typedef internal::affinity_id affinity_id;
786 //! Set affinity for this task.
787 void set_affinity( affinity_id id ) {prefix().affinity = id;}
789 //! Current affinity of this task
790 affinity_id affinity() const {return prefix().affinity;}
792 //! Invoked by scheduler to notify task that it ran on unexpected thread.
793 /** Invoked before method execute() runs, if task is stolen, or task has
794 affinity but will be executed on another thread.
796 The default action does nothing. */
797 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
799 #if __TBB_TASK_GROUP_CONTEXT
800 //! Moves this task from its current group into another one.
801 /** Argument ctx specifies the new group.
803 The primary purpose of this method is to associate unique task group context
804 with a task allocated for subsequent enqueuing. In contrast to spawned tasks
805 enqueued ones normally outlive the scope where they were created. This makes
806 traditional usage model where task group context are allocated locally on
807 the stack inapplicable. Dynamic allocation of context objects is performance
808 inefficient. Method change_group() allows to make task group context object
809 a member of the task class, and then associate it with its containing task
810 object in the latter's constructor. **/
811 void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
813 //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
814 /** \return false if cancellation has already been requested, true otherwise. **/
815 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
817 //! Returns true if the context has received cancellation request.
818 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
819 #endif /* __TBB_TASK_GROUP_CONTEXT */
821 #if __TBB_TASK_PRIORITY
822 //! Changes priority of the task group this task belongs to.
823 void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
825 //! Retrieves current priority of the task group this task belongs to.
826 priority_t group_priority () const { return prefix().context->priority(); }
828 #endif /* __TBB_TASK_PRIORITY */
831 friend class interface5::internal::task_base;
832 friend class task_list;
833 friend class internal::scheduler;
834 friend class internal::allocate_root_proxy;
835 #if __TBB_TASK_GROUP_CONTEXT
836 friend class internal::allocate_root_with_context_proxy;
837 #endif /* __TBB_TASK_GROUP_CONTEXT */
838 friend class internal::allocate_continuation_proxy;
839 friend class internal::allocate_child_proxy;
840 friend class internal::allocate_additional_child_of_proxy;
842 //! Get reference to corresponding task_prefix.
843 /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/
844 internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
845 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
849 //! task that does nothing. Useful for synchronization.
850 /** @ingroup task_scheduling */
851 class empty_task: public task {
852 /*override*/ task* execute() {
857 //! A list of children.
858 /** Used for method task::spawn_children
859 @ingroup task_scheduling */
860 class task_list: internal::no_copy {
865 friend class interface5::internal::task_base;
867 //! Construct empty list
868 task_list() : first(NULL), next_ptr(&first) {}
870 //! Destroys the list, but does not destroy the task objects.
873 //! True if list if empty; false otherwise.
874 bool empty() const {return !first;}
876 //! Push task onto back of list.
877 void push_back( task& task ) {
878 task.prefix().next = NULL;
880 next_ptr = &task.prefix().next;
883 //! Pop the front task from the list.
885 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
886 task* result = first;
887 first = result->prefix().next;
888 if( !first ) next_ptr = &first;
899 inline void interface5::internal::task_base::spawn( task& t ) {
900 t.prefix().owner->spawn( t, t.prefix().next );
903 inline void interface5::internal::task_base::spawn( task_list& list ) {
904 if( task* t = list.first ) {
905 t->prefix().owner->spawn( *t, *list.next_ptr );
910 inline void task::spawn_root_and_wait( task_list& root_list ) {
911 if( task* t = root_list.first ) {
912 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
919 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
920 return &tbb::internal::allocate_root_proxy::allocate(bytes);
923 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
924 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
927 #if __TBB_TASK_GROUP_CONTEXT
928 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
929 return &p.allocate(bytes);
932 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
933 p.free( *static_cast<tbb::task*>(task) );
935 #endif /* __TBB_TASK_GROUP_CONTEXT */
937 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
938 return &p.allocate(bytes);
941 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
942 p.free( *static_cast<tbb::task*>(task) );
945 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
946 return &p.allocate(bytes);
949 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
950 p.free( *static_cast<tbb::task*>(task) );
953 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
954 return &p.allocate(bytes);
957 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
958 p.free( *static_cast<tbb::task*>(task) );
961 #endif /* __TBB_task_H */