2 Copyright 2005-2010 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
32 #include "tbb_stddef.h"
33 #include "tbb_machine.h"
35 typedef struct ___itt_caller *__itt_caller;
42 #if __TBB_TASK_GROUP_CONTEXT
43 class task_group_context;
44 #endif /* __TBB_TASK_GROUP_CONTEXT */
46 // MSVC does not allow taking the address of a member that was defined
47 // privately in task_base and made public in class task via a using declaration.
48 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
49 #define __TBB_TASK_BASE_ACCESS public
51 #define __TBB_TASK_BASE_ACCESS private
56 class allocate_additional_child_of_proxy: no_assign {
57 //! No longer used, but retained for binary layout compatibility. Always NULL.
61 explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
62 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
63 void __TBB_EXPORTED_METHOD free( task& ) const;
68 namespace interface5 {
70 //! Base class for methods that became static in TBB 3.0.
71 /** TBB's evolution caused the "this" argument for several methods to become obsolete.
72 However, for backwards binary compatibility, the new methods need distinct names,
73 otherwise the One Definition Rule would be broken. Hence the new methods are
74 defined in this private base class, and then exposed in class task via
75 using declarations. */
76 class task_base: tbb::internal::no_copy {
77 __TBB_TASK_BASE_ACCESS:
78 friend class tbb::task;
80 //! Schedule task for execution when a worker becomes available.
81 static void spawn( task& t );
83 //! Spawn multiple tasks and clear list.
84 static void spawn( task_list& list );
86 //! Like allocate_child, except that task's parent becomes "t", not this.
87 /** Typically used in conjunction with schedule_to_reexecute to implement while loops.
88 Atomically increments the reference count of t.parent() */
89 static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
90 return tbb::internal::allocate_additional_child_of_proxy(t);
94 /** Usually, calling this method is unnecessary, because a task is
95 implicitly deleted after its execute() method runs. However,
96 sometimes a task needs to be explicitly deallocated, such as
97 when a root task is used as the parent in spawn_and_wait_for_all. */
98 static void __TBB_EXPORTED_FUNC destroy( task& victim );
106 class scheduler: no_copy {
108 //! For internal use only
109 virtual void spawn( task& first, task*& next ) = 0;
111 //! For internal use only
112 virtual void wait_for_all( task& parent, task* child ) = 0;
114 //! For internal use only
115 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
117 //! Pure virtual destructor;
118 // Have to have it just to shut up overzealous compilation warnings
119 virtual ~scheduler() = 0;
120 #if __TBB_ARENA_PER_MASTER
122 //! For internal use only
123 virtual void enqueue( task& t, void* reserved ) = 0;
124 #endif /* __TBB_ARENA_PER_MASTER */
127 //! A reference count
128 /** Should always be non-negative. A signed type is used so that underflow can be detected. */
129 typedef intptr_t reference_count;
131 //! An id as used for specifying affinity.
132 typedef unsigned short affinity_id;
134 #if __TBB_TASK_GROUP_CONTEXT
135 struct context_list_node_t {
136 context_list_node_t *my_prev,
140 class allocate_root_with_context_proxy: no_assign {
141 task_group_context& my_context;
143 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
144 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
145 void __TBB_EXPORTED_METHOD free( task& ) const;
147 #endif /* __TBB_TASK_GROUP_CONTEXT */
149 class allocate_root_proxy: no_assign {
151 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
152 static void __TBB_EXPORTED_FUNC free( task& );
155 class allocate_continuation_proxy: no_assign {
157 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
158 void __TBB_EXPORTED_METHOD free( task& ) const;
161 class allocate_child_proxy: no_assign {
163 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
164 void __TBB_EXPORTED_METHOD free( task& ) const;
167 //! Memory prefix to a task object.
168 /** This class is internal to the library.
169 Do not reference it directly, except within the library itself.
170 Fields are ordered in way that preserves backwards compatibility and yields
171 good packing on typical 32-bit and 64-bit platforms.
172 @ingroup task_scheduling */
175 friend class tbb::task;
176 friend class tbb::interface5::internal::task_base;
177 friend class tbb::task_list;
178 friend class internal::scheduler;
179 friend class internal::allocate_root_proxy;
180 friend class internal::allocate_child_proxy;
181 friend class internal::allocate_continuation_proxy;
182 friend class internal::allocate_additional_child_of_proxy;
184 #if __TBB_TASK_GROUP_CONTEXT
185 //! Shared context that is used to communicate asynchronous state changes
186 /** Currently it is used to broadcast cancellation requests generated both
187 by users and as the result of unhandled exceptions in the task::execute()
189 task_group_context *context;
190 #endif /* __TBB_TASK_GROUP_CONTEXT */
192 //! The scheduler that allocated the task, or NULL if the task is big.
193 /** Small tasks are pooled by the scheduler that allocated the task.
194 If a scheduler needs to free a small task allocated by another scheduler,
195 it returns the task to that other scheduler. This policy avoids
196 memory space blowup issues for memory allocators that allocate from
197 thread-specific pools. */
200 //! The scheduler that owns the task.
203 //! The task whose reference count includes me.
204 /** In the "blocking style" of programming, this field points to the parent task.
205 In the "continuation-passing style" of programming, this field points to the
206 continuation of the parent. */
209 //! Reference count used for synchronization.
210 /** In the "continuation-passing style" of programming, this field is
211 the difference of the number of allocated children minus the
212 number of children that have completed.
213 In the "blocking style" of programming, this field is one more than the difference. */
214 reference_count ref_count;
216 //! Obsolete. Used to be scheduling depth before TBB 2.2
217 /** Retained only for the sake of backward binary compatibility. **/
220 //! A task::state_type, stored as a byte for compactness.
221 /** This state is exposed to users via method task::state(). */
224 //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
225 /** 0x0 -> version 1.0 task
226 0x1 -> version >=2.1 task
228 0x40 -> task has live ref_count
229 0x80 -> a stolen task */
230 unsigned char extra_state;
232 affinity_id affinity;
234 //! "next" field for list of task
237 //! The task corresponding to this task_prefix.
238 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
241 } // namespace internal
244 #if __TBB_TASK_GROUP_CONTEXT
246 #if TBB_USE_CAPTURED_EXCEPTION
250 class tbb_exception_ptr;
252 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
254 //! Used to form groups of tasks
255 /** @ingroup task_scheduling
256 The context services explicit cancellation requests from user code, and unhandled
257 exceptions intercepted during tasks execution. Intercepting an exception results
258 in generating internal cancellation requests (which is processed in exactly the
259 same way as external ones).
261 The context is associated with one or more root tasks and defines the cancellation
262 group that includes all the descendants of the corresponding root task(s). Association
263 is established when a context object is passed as an argument to the task::allocate_root()
264 method. See task_group_context::task_group_context for more details.
266 The context can be bound to another one, and other contexts can be bound to it,
267 forming a tree-like structure: parent -> this -> children. Arrows here designate
268 cancellation propagation direction. If a task in a cancellation group is canceled
269 all the other tasks in this group and groups bound to it (as children) get canceled too.
272 When adding new members to task_group_context or changing types of existing ones,
273 update the size of both padding buffers (_leading_padding and _trailing_padding)
274 appropriately. See also VERSIONING NOTE at the constructor definition below. **/
275 class task_group_context : internal::no_copy {
277 #if TBB_USE_CAPTURED_EXCEPTION
278 typedef tbb_exception exception_container_type;
280 typedef internal::tbb_exception_ptr exception_container_type;
283 enum version_traits_word_layout {
285 version_mask = 0xFFFF,
286 traits_mask = 0xFFFFul << traits_offset
296 exact_exception = 0x0001ul << traits_offset,
297 concurrent_wait = 0x0004ul << traits_offset,
298 #if TBB_USE_CAPTURED_EXCEPTION
301 default_traits = exact_exception
302 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
307 //! Flavor of this context: bound or isolated.
309 uintptr_t _my_kind_aligner;
312 //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.
313 task_group_context *my_parent;
315 //! Used to form the thread specific list of contexts without additional memory allocation.
316 /** A context is included into the list of the current thread when its binding to
317 its parent happens. Any context can be present in the list of one thread only. **/
318 internal::context_list_node_t my_node;
320 //! Used to set and maintain stack stitching point for Intel Performance Tools.
321 __itt_caller itt_caller;
323 //! Leading padding protecting accesses to frequently used members from false sharing.
324 /** Read accesses to the field my_cancellation_requested are on the hot path inside
325 the scheduler. This padding ensures that this field never shares the same cache
326 line with a local variable that is frequently written to. **/
327 char _leading_padding[internal::NFS_MaxLineSize -
328 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
329 - sizeof(__itt_caller)];
331 //! Specifies whether cancellation was request for this task group.
332 uintptr_t my_cancellation_requested;
334 //! Version for run-time checks and behavioral traits of the context.
335 /** Version occupies low 16 bits, and traits (zero or more ORed enumerators
336 from the traits_type enumerations) take the next 16 bits.
337 Original (zeroth) version of the context did not support any traits. **/
338 uintptr_t my_version_and_traits;
340 //! Pointer to the container storing exception being propagated across this task group.
341 exception_container_type *my_exception;
343 //! Scheduler that registered this context in its thread specific list.
344 /** This field is not terribly necessary, but it allows to get a small performance
345 benefit by getting us rid of using thread local storage. We do not care
346 about extra memory it takes since this data structure is excessively padded anyway. **/
349 //! Trailing padding protecting accesses to frequently used members from false sharing
350 /** \sa _leading_padding **/
351 char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
354 //! Default & binding constructor.
355 /** By default a bound context is created. That is this context will be bound
356 (as child) to the context of the task calling task::allocate_root(this_context)
357 method. Cancellation requests passed to the parent context are propagated
358 to all the contexts bound to it.
360 If task_group_context::isolated is used as the argument, then the tasks associated
361 with this context will never be affected by events in any other context.
363 Creating isolated contexts involve much less overhead, but they have limited
364 utility. Normally when an exception occurs in an algorithm that has nested
365 ones running, it is desirably to have all the nested algorithms canceled
366 as well. Such a behavior requires nested algorithms to use bound contexts.
368 There is one good place where using isolated algorithms is beneficial. It is
369 a master thread. That is if a particular algorithm is invoked directly from
370 the master thread (not from a TBB task), supplying it with explicitly
371 created isolated context will result in a faster algorithm startup.
374 Implementation(s) of task_group_context constructor(s) cannot be made
375 entirely out-of-line because the run-time version must be set by the user
376 code. This will become critically important for binary compatibility, if
377 we ever have to change the size of the context object.
379 Boosting the runtime version will also be necessary whenever new fields
380 are introduced in the currently unused padding areas or the meaning of
381 the existing fields is changed or extended. **/
382 task_group_context ( kind_type relation_with_parent = bound,
383 uintptr_t traits = default_traits )
384 : my_kind(relation_with_parent)
385 , my_version_and_traits(1 | traits)
390 __TBB_EXPORTED_METHOD ~task_group_context ();
392 //! Forcefully reinitializes the context after the task tree it was associated with is completed.
393 /** Because the method assumes that all the tasks that used to be associated with
394 this context have already finished, calling it while the context is still
395 in use somewhere in the task hierarchy leads to undefined behavior.
397 IMPORTANT: This method is not thread safe!
399 The method does not change the context's parent if it is set. **/
400 void __TBB_EXPORTED_METHOD reset ();
402 //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
403 /** \return false if cancellation has already been requested, true otherwise.
405 Note that canceling never fails. When false is returned, it just means that
406 another thread (or this one) has already sent cancellation request to this
407 context or to one of its ancestors (if this context is bound). It is guaranteed
408 that when this method is concurrently called on the same not yet cancelled
409 context, true will be returned by one and only one invocation. **/
410 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
412 //! Returns true if the context received cancellation request.
413 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
415 //! Records the pending exception, and cancels the task group.
416 /** May be called only from inside a catch-block. If the context is already
417 canceled, does nothing.
418 The method brings the task group associated with this context exactly into
419 the state it would be in, if one of its tasks threw the currently pending
420 exception during its execution. In other words, it emulates the actions
421 of the scheduler's dispatch loop exception handler. **/
422 void __TBB_EXPORTED_METHOD register_pending_exception ();
425 //! Out-of-line part of the constructor.
426 /** Singled out to ensure backward binary compatibility of the future versions. **/
427 void __TBB_EXPORTED_METHOD init ();
431 friend class internal::allocate_root_with_context_proxy;
433 static const kind_type binding_required = bound;
434 static const kind_type binding_completed = kind_type(bound+1);
435 static const kind_type detached = kind_type(binding_completed+1);
436 static const kind_type dying = kind_type(detached+1);
438 //! Checks if any of the ancestors has a cancellation request outstanding,
439 //! and propagates it back to descendants.
440 void propagate_cancellation_from_ancestors ();
442 //! For debugging purposes only.
445 return my_version_and_traits != 0xDeadBeef;
448 #endif /* TBB_USE_DEBUG */
450 }; // class task_group_context
452 #endif /* __TBB_TASK_GROUP_CONTEXT */
454 //! Base class for user-defined tasks.
455 /** @ingroup task_scheduling */
456 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
458 //! Set reference count
459 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
461 //! Decrement reference count and return its new value.
462 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
465 //! Default constructor.
466 task() {prefix().extra_state=1;}
472 //! Should be overridden by derived classes.
473 virtual task* execute() = 0;
475 //! Enumeration of task states that the scheduler considers.
477 //! task is running, and will be destroyed after method execute() completes.
479 //! task to be rescheduled.
481 //! task is in ready pool, or is going to be put there, or was just taken off.
483 //! task object is freshly allocated or recycled.
485 //! task object is on free list, or is going to be put there, or was just taken off.
487 //! task to be recycled as continuation
491 //------------------------------------------------------------------------
493 //------------------------------------------------------------------------
495 //! Returns proxy for overloaded new that allocates a root task.
496 static internal::allocate_root_proxy allocate_root() {
497 return internal::allocate_root_proxy();
500 #if __TBB_TASK_GROUP_CONTEXT
501 //! Returns proxy for overloaded new that allocates a root task associated with user supplied context.
502 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
503 return internal::allocate_root_with_context_proxy(ctx);
505 #endif /* __TBB_TASK_GROUP_CONTEXT */
507 //! Returns proxy for overloaded new that allocates a continuation task of *this.
508 /** The continuation's parent becomes the parent of *this. */
509 internal::allocate_continuation_proxy& allocate_continuation() {
510 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
513 //! Returns proxy for overloaded new that allocates a child task of *this.
514 internal::allocate_child_proxy& allocate_child() {
515 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
518 //! Define recommended static form via import from base class.
519 using task_base::allocate_additional_child_of;
521 #if __TBB_DEPRECATED_TASK_INTERFACE
523 /** Usually, calling this method is unnecessary, because a task is
524 implicitly deleted after its execute() method runs. However,
525 sometimes a task needs to be explicitly deallocated, such as
526 when a root task is used as the parent in spawn_and_wait_for_all. */
527 void __TBB_EXPORTED_METHOD destroy( task& t );
528 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
529 //! Define recommended static form via import from base class.
530 using task_base::destroy;
531 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
533 //------------------------------------------------------------------------
534 // Recycling of tasks
535 //------------------------------------------------------------------------
537 //! Change this to be a continuation of its former self.
538 /** The caller must guarantee that the task's refcount does not become zero until
539 after the method execute() returns. Typically, this is done by having
540 method execute() return a pointer to a child of the task. If the guarantee
541 cannot be made, use method recycle_as_safe_continuation instead.
543 Because of the hazard, this method may be deprecated in the future. */
544 void recycle_as_continuation() {
545 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
546 prefix().state = allocated;
549 //! Recommended to use, safe variant of recycle_as_continuation
550 /** For safety, it requires additional increment of ref_count.
551 With no decendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */
552 void recycle_as_safe_continuation() {
553 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
554 prefix().state = recycle;
557 //! Change this to be a child of new_parent.
558 void recycle_as_child_of( task& new_parent ) {
559 internal::task_prefix& p = prefix();
560 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
561 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
562 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
563 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
564 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
566 p.parent = &new_parent;
567 #if __TBB_TASK_GROUP_CONTEXT
568 p.context = new_parent.prefix().context;
569 #endif /* __TBB_TASK_GROUP_CONTEXT */
572 //! Schedule this for reexecution after current execute() returns.
573 /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */
574 void recycle_to_reexecute() {
575 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
576 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
577 prefix().state = reexecute;
580 // All depth-related methods are obsolete, and are retained for the sake
581 // of backward source compatibility only
582 intptr_t depth() const {return 0;}
583 void set_depth( intptr_t ) {}
584 void add_to_depth( int ) {}
587 //------------------------------------------------------------------------
588 // Spawning and blocking
589 //------------------------------------------------------------------------
591 //! Set reference count
592 void set_ref_count( int count ) {
593 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
594 internal_set_ref_count(count);
596 prefix().ref_count = count;
597 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
600 //! Atomically increment reference count.
601 /** Has acquire semantics */
602 void increment_ref_count() {
603 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
606 //! Atomically decrement reference count.
607 /** Has release semantics. */
608 int decrement_ref_count() {
609 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
610 return int(internal_decrement_ref_count());
612 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
613 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
616 //! Define recommended static forms via import from base class.
617 using task_base::spawn;
619 //! Similar to spawn followed by wait_for_all, but more efficient.
620 void spawn_and_wait_for_all( task& child ) {
621 prefix().owner->wait_for_all( *this, &child );
624 //! Similar to spawn followed by wait_for_all, but more efficient.
625 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
627 //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
628 static void spawn_root_and_wait( task& root ) {
629 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
632 //! Spawn root tasks on list and wait for all of them to finish.
633 /** If there are more tasks than worker threads, the tasks are spawned in
634 order of front to back. */
635 static void spawn_root_and_wait( task_list& root_list );
637 //! Wait for reference count to become one, and set reference count to zero.
638 /** Works on tasks while waiting. */
639 void wait_for_all() {
640 prefix().owner->wait_for_all( *this, NULL );
643 #if __TBB_ARENA_PER_MASTER
644 //! Enqueue task for starvation-resistant execution.
645 static void enqueue( task& t ) {
646 t.prefix().owner->enqueue( t, NULL );
649 #endif /* __TBB_ARENA_PER_MASTER */
650 //! The innermost task being executed or destroyed by the current thread at the moment.
651 static task& __TBB_EXPORTED_FUNC self();
653 //! task on whose behalf this task is working, or NULL if this is a root.
654 task* parent() const {return prefix().parent;}
656 #if __TBB_TASK_GROUP_CONTEXT
657 //! Shared context that is used to communicate asynchronous state changes
658 task_group_context* context() {return prefix().context;}
659 #endif /* __TBB_TASK_GROUP_CONTEXT */
661 //! True if task was stolen from the task pool of another thread.
662 bool is_stolen_task() const {
663 return (prefix().extra_state & 0x80)!=0;
666 //------------------------------------------------------------------------
668 //------------------------------------------------------------------------
670 //! Current execution state
671 state_type state() const {return state_type(prefix().state);}
673 //! The internal reference count.
674 int ref_count() const {
676 internal::reference_count ref_count_ = prefix().ref_count;
677 __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
679 return int(prefix().ref_count);
682 //! Obsolete, and only retained for the sake of backward compatibility. Always returns true.
683 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
685 //------------------------------------------------------------------------
687 //------------------------------------------------------------------------
689 //! An id as used for specifying affinity.
690 /** Guaranteed to be integral type. Value of 0 means no affinity. */
691 typedef internal::affinity_id affinity_id;
693 //! Set affinity for this task.
694 void set_affinity( affinity_id id ) {prefix().affinity = id;}
696 //! Current affinity of this task
697 affinity_id affinity() const {return prefix().affinity;}
699 //! Invoked by scheduler to notify task that it ran on unexpected thread.
700 /** Invoked before method execute() runs, if task is stolen, or task has
701 affinity but will be executed on another thread.
703 The default action does nothing. */
704 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
706 #if __TBB_TASK_GROUP_CONTEXT
707 //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
708 /** \return false if cancellation has already been requested, true otherwise. **/
709 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
711 //! Returns true if the context received cancellation request.
712 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
713 #endif /* __TBB_TASK_GROUP_CONTEXT */
716 friend class interface5::internal::task_base;
717 friend class task_list;
718 friend class internal::scheduler;
719 friend class internal::allocate_root_proxy;
720 #if __TBB_TASK_GROUP_CONTEXT
721 friend class internal::allocate_root_with_context_proxy;
722 #endif /* __TBB_TASK_GROUP_CONTEXT */
723 friend class internal::allocate_continuation_proxy;
724 friend class internal::allocate_child_proxy;
725 friend class internal::allocate_additional_child_of_proxy;
727 //! Get reference to corresponding task_prefix.
728 /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/
729 internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
730 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
734 //! task that does nothing. Useful for synchronization.
735 /** @ingroup task_scheduling */
736 class empty_task: public task {
737 /*override*/ task* execute() {
742 //! A list of children.
743 /** Used for method task::spawn_children
744 @ingroup task_scheduling */
745 class task_list: internal::no_copy {
750 friend class interface5::internal::task_base;
752 //! Construct empty list
753 task_list() : first(NULL), next_ptr(&first) {}
755 //! Destroys the list, but does not destroy the task objects.
758 //! True if list if empty; false otherwise.
759 bool empty() const {return !first;}
761 //! Push task onto back of list.
762 void push_back( task& task ) {
763 task.prefix().next = NULL;
765 next_ptr = &task.prefix().next;
768 //! Pop the front task from the list.
770 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
771 task* result = first;
772 first = result->prefix().next;
773 if( !first ) next_ptr = &first;
784 inline void interface5::internal::task_base::spawn( task& t ) {
785 t.prefix().owner->spawn( t, t.prefix().next );
788 inline void interface5::internal::task_base::spawn( task_list& list ) {
789 if( task* t = list.first ) {
790 t->prefix().owner->spawn( *t, *list.next_ptr );
795 inline void task::spawn_root_and_wait( task_list& root_list ) {
796 if( task* t = root_list.first ) {
797 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
804 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
805 return &tbb::internal::allocate_root_proxy::allocate(bytes);
808 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
809 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
812 #if __TBB_TASK_GROUP_CONTEXT
813 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
814 return &p.allocate(bytes);
817 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
818 p.free( *static_cast<tbb::task*>(task) );
820 #endif /* __TBB_TASK_GROUP_CONTEXT */
822 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
823 return &p.allocate(bytes);
826 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
827 p.free( *static_cast<tbb::task*>(task) );
830 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
831 return &p.allocate(bytes);
834 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
835 p.free( *static_cast<tbb::task*>(task) );
838 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
839 return &p.allocate(bytes);
842 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
843 p.free( *static_cast<tbb::task*>(task) );
846 #endif /* __TBB_task_H */