2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_task_group_H
30 #define __TBB_task_group_H
33 #include "tbb_exception.h"
35 #if __TBB_TASK_GROUP_CONTEXT
40 template<typename F> class task_handle_task;
44 class task_handle : internal::no_assign {
45 template<typename _F> friend class internal::task_handle_task;
47 static const intptr_t scheduled = 0x1;
52 void mark_scheduled () {
53 // The check here is intentionally lax to avoid the impact of interlocked operation
54 if ( my_state & scheduled )
55 internal::throw_exception( internal::eid_invalid_multiple_scheduling );
56 my_state |= scheduled;
59 task_handle( const F& f ) : my_func(f), my_state(0) {}
61 void operator() () const { my_func(); }
64 enum task_group_status {
72 // Suppress gratuitous warnings from icc 11.0 when lambda expressions are used in instances of function_task.
73 //#pragma warning(disable: 588)
76 class function_task : public task {
78 /*override*/ task* execute() {
83 function_task( const F& f ) : my_func(f) {}
87 class task_handle_task : public task {
88 task_handle<F>& my_handle;
89 /*override*/ task* execute() {
94 task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }
97 class task_group_base : internal::no_copy {
100 task_group_context my_context;
102 task& owner () { return *my_root; }
105 task_group_status internal_run_and_wait( F& f ) {
107 if ( !my_context.is_group_execution_cancelled() )
109 } __TBB_CATCH( ... ) {
110 my_context.register_pending_exception();
115 template<typename F, typename Task>
116 void internal_run( F& f ) {
117 owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) );
121 task_group_base( uintptr_t traits = 0 )
122 : my_context(task_group_context::bound, task_group_context::default_traits | traits)
124 my_root = new( task::allocate_root(my_context) ) empty_task;
125 my_root->set_ref_count(1);
129 if( my_root->ref_count() > 1 ) {
130 bool stack_unwinding_in_progress = std::uncaught_exception();
131 // Always attempt to do proper cleanup to avoid inevitable memory corruption
132 // in case of missing wait (for the sake of better testability & debuggability)
133 if ( !is_canceling() )
136 my_root->wait_for_all();
137 } __TBB_CATCH (...) {
138 task::destroy(*my_root);
141 task::destroy(*my_root);
142 if ( !stack_unwinding_in_progress )
143 internal::throw_exception( internal::eid_missing_wait );
146 task::destroy(*my_root);
151 void run( task_handle<F>& h ) {
152 internal_run< task_handle<F>, internal::task_handle_task<F> >( h );
155 task_group_status wait() {
157 my_root->wait_for_all();
158 } __TBB_CATCH( ... ) {
162 if ( my_context.is_group_execution_cancelled() ) {
169 bool is_canceling() {
170 return my_context.is_group_execution_cancelled();
174 my_context.cancel_group_execution();
176 }; // class task_group_base
178 } // namespace internal
180 class task_group : public internal::task_group_base {
182 task_group () : task_group_base( task_group_context::concurrent_wait ) {}
185 ~task_group() __TBB_TRY {
186 __TBB_ASSERT( my_root->ref_count() != 0, NULL );
187 if( my_root->ref_count() > 1 )
188 my_root->wait_for_all();
190 #if TBB_USE_EXCEPTIONS
192 // Have to destroy my_root here as the base class destructor won't be called
193 task::destroy(*my_root);
196 #endif /* TBB_USE_EXCEPTIONS */
197 #endif /* TBB_DEPRECATED */
201 void run( task_handle<F>& h ) {
202 internal_run< task_handle<F>, internal::task_handle_task<F> >( h );
205 using task_group_base::run;
209 void run( const F& f ) {
210 internal_run< const F, internal::function_task<F> >( f );
214 task_group_status run_and_wait( const F& f ) {
215 return internal_run_and_wait<const F>( f );
219 task_group_status run_and_wait( task_handle<F>& h ) {
220 return internal_run_and_wait< task_handle<F> >( h );
222 }; // class task_group
224 class structured_task_group : public internal::task_group_base {
227 task_group_status run_and_wait ( task_handle<F>& h ) {
228 return internal_run_and_wait< task_handle<F> >( h );
231 task_group_status wait() {
232 task_group_status res = task_group_base::wait();
233 my_root->set_ref_count(1);
236 }; // class structured_task_group
239 bool is_current_task_group_canceling() {
240 return task::self().is_cancelled();
244 task_handle<F> make_task( const F& f ) {
245 return task_handle<F>( f );
250 #endif /* __TBB_TASK_GROUP_CONTEXT */
252 #endif /* __TBB_task_group_H */