2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_task_group_H
30 #define __TBB_task_group_H
33 #include "tbb_exception.h"
38 template<typename F> class task_handle_task;
42 class task_handle : internal::no_assign {
43 template<typename _F> friend class internal::task_handle_task;
45 static const intptr_t scheduled = 0x1;
50 void mark_scheduled () {
51 // The check here is intentionally lax to avoid the impact of interlocked operation
52 if ( my_state & scheduled )
53 internal::throw_exception( internal::eid_invalid_multiple_scheduling );
54 my_state |= scheduled;
57 task_handle( const F& f ) : my_func(f), my_state(0) {}
59 void operator() () const { my_func(); }
62 enum task_group_status {
70 // Suppress gratuitous warnings from icc 11.0 when lambda expressions are used in instances of function_task.
71 //#pragma warning(disable: 588)
74 class function_task : public task {
76 /*override*/ task* execute() {
81 function_task( const F& f ) : my_func(f) {}
85 class task_handle_task : public task {
86 task_handle<F>& my_handle;
87 /*override*/ task* execute() {
92 task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }
95 class task_group_base : internal::no_copy {
98 task_group_context my_context;
100 task& owner () { return *my_root; }
103 task_group_status internal_run_and_wait( F& f ) {
105 if ( !my_context.is_group_execution_cancelled() )
107 } __TBB_CATCH( ... ) {
108 my_context.register_pending_exception();
113 template<typename F, typename Task>
114 void internal_run( F& f ) {
115 owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) );
119 task_group_base( uintptr_t traits = 0 )
120 : my_context(task_group_context::bound, task_group_context::default_traits | traits)
122 my_root = new( task::allocate_root(my_context) ) empty_task;
123 my_root->set_ref_count(1);
127 if( my_root->ref_count() > 1 ) {
128 bool stack_unwinding_in_progress = std::uncaught_exception();
129 // Always attempt to do proper cleanup to avoid inevitable memory corruption
130 // in case of missing wait (for the sake of better testability & debuggability)
131 if ( !is_canceling() )
134 my_root->wait_for_all();
135 } __TBB_CATCH (...) {
136 task::destroy(*my_root);
139 task::destroy(*my_root);
140 if ( !stack_unwinding_in_progress )
141 internal::throw_exception( internal::eid_missing_wait );
144 task::destroy(*my_root);
149 void run( task_handle<F>& h ) {
150 internal_run< task_handle<F>, internal::task_handle_task<F> >( h );
153 task_group_status wait() {
155 my_root->wait_for_all();
156 } __TBB_CATCH( ... ) {
160 if ( my_context.is_group_execution_cancelled() ) {
167 bool is_canceling() {
168 return my_context.is_group_execution_cancelled();
172 my_context.cancel_group_execution();
174 }; // class task_group_base
176 } // namespace internal
178 class task_group : public internal::task_group_base {
180 task_group () : task_group_base( task_group_context::concurrent_wait ) {}
183 ~task_group() __TBB_TRY {
184 __TBB_ASSERT( my_root->ref_count() != 0, NULL );
185 if( my_root->ref_count() > 1 )
186 my_root->wait_for_all();
188 #if TBB_USE_EXCEPTIONS
190 // Have to destroy my_root here as the base class destructor won't be called
191 task::destroy(*my_root);
194 #endif /* TBB_USE_EXCEPTIONS */
195 #endif /* TBB_DEPRECATED */
199 void run( task_handle<F>& h ) {
200 internal_run< task_handle<F>, internal::task_handle_task<F> >( h );
203 using task_group_base::run;
207 void run( const F& f ) {
208 internal_run< const F, internal::function_task<F> >( f );
212 task_group_status run_and_wait( const F& f ) {
213 return internal_run_and_wait<const F>( f );
217 task_group_status run_and_wait( task_handle<F>& h ) {
218 return internal_run_and_wait< task_handle<F> >( h );
220 }; // class task_group
222 class structured_task_group : public internal::task_group_base {
225 task_group_status run_and_wait ( task_handle<F>& h ) {
226 return internal_run_and_wait< task_handle<F> >( h );
229 task_group_status wait() {
230 task_group_status res = task_group_base::wait();
231 my_root->set_ref_count(1);
234 }; // class structured_task_group
237 bool is_current_task_group_canceling() {
238 return task::self().is_cancelled();
242 task_handle<F> make_task( const F& f ) {
243 return task_handle<F>( f );
248 #endif /* __TBB_task_group_H */