* Code changes in separate commit.
<HR>
<A HREF="../index.html">Up to parent directory</A>
<p></p>
-Copyright © 2005-2014 Intel Corporation. All Rights Reserved.
+Copyright © 2005-2015 Intel Corporation. All Rights Reserved.
<P></P>
Intel is a registered trademark or trademark of Intel Corporation
or its subsidiaries in the United States and other countries.
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
it uses tbb::tick_count::interval_t to specify the time duration. */
unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );}
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ //! Move constructor
+ /** postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this
+ construction), src.pm == 0 and src.owns == false. */
+ unique_lock(unique_lock && src): pm(NULL), owns(false) {this->swap(src);}
+
+ //! Move assignment
+ /** effects: If owns calls pm->unlock().
+ Postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this
+ assignment), src.pm == 0 and src.owns == false. */
+ unique_lock& operator=(unique_lock && src) {
+ if (owns)
+ this->unlock();
+ pm = NULL;
+ this->swap(src);
+ return *this;
+ }
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
+
//! Destructor
~unique_lock() { if( owns ) pm->unlock(); }
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#ifndef __TBB_thread_H
#define __TBB_thread_H
-#include "../tbb_thread.h"
+#include "../tbb_config.h"
#if TBB_IMPLEMENT_CPP0X
+#include "../tbb_thread.h"
+
namespace std {
typedef tbb::tbb_thread thread;
inline void sleep_for(const tbb::tick_count::interval_t& rel_time) {
tbb::internal::thread_sleep_v3( rel_time );
}
-
}
-}
+} // namespace std
+
+#else /* TBB_IMPLEMENT_CPP0X */
+
+#define __TBB_COMPAT_THREAD_RECURSION_PROTECTOR 1
+#include <thread>
+#undef __TBB_COMPAT_THREAD_RECURSION_PROTECTOR
#endif /* TBB_IMPLEMENT_CPP0X */
+#else /* __TBB_thread_H */
+
+#if __TBB_COMPAT_THREAD_RECURSION_PROTECTOR
+#error The tbb/compat/thread header attempts to include itself. \
+ Please make sure that {TBBROOT}/include/tbb/compat is NOT in include paths.
+#endif
+
#endif /* __TBB_thread_H */
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include "atomic.h"
#include "tbb_exception.h"
#include "tbb_profiling.h"
-#include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher
+#include "internal/_tbb_hash_compare_impl.h"
#if __TBB_INITIALIZER_LISTS_PRESENT
#include <initializer_list>
#endif
namespace tbb {
-//! hash_compare that is default argument for concurrent_hash_map
-template<typename Key>
-struct tbb_hash_compare {
- static size_t hash( const Key& a ) { return tbb_hasher(a); }
- static bool equal( const Key& a, const Key& b ) { return a == b; }
-};
-
namespace interface5 {
template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
node( const Key &key ) : item(key, T()) {}
node( const Key &key, const T &t ) : item(key, t) {}
#if __TBB_CPP11_RVALUE_REF_PRESENT
+ node( const Key &key, T &&t ) : item(key, std::move(t)) {}
node( value_type&& i ) : item(std::move(i)){}
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ template<typename... Args>
+ node( Args&&... args ) : item(std::forward<Args>(args)...) {}
+#if __TBB_COPY_FROM_NON_CONST_REF_BROKEN
+ node( value_type& i ) : item(const_cast<const value_type&>(i)) {}
+#endif //__TBB_COPY_FROM_NON_CONST_REF_BROKEN
+#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
#endif //__TBB_CPP11_RVALUE_REF_PRESENT
node( const value_type& i ) : item(i) {}
return new( allocator ) node(key, *t);
}
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
+ return new( allocator ) node(key, std::move(*const_cast<T*>(t)));
+ }
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ template<typename... Args>
+ static node* allocate_node_emplace_construct(node_allocator_type& allocator, Args&&... args){
+ return new( allocator ) node(std::forward<Args>(args)...);
+ }
+#endif //#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+#endif
+
static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
return new( allocator ) node(key);
}
return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
}
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
+ /** Returns true if item is new. */
+ bool insert( const_accessor &result, value_type && value ) {
+ return generic_move_insert(result, std::move(value));
+ }
+
+ //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
+ /** Returns true if item is new. */
+ bool insert( accessor &result, value_type && value ) {
+ return generic_move_insert(result, std::move(value));
+ }
+
+ //! Insert item by copying if there is no such key present already
+ /** Returns true if item is inserted. */
+ bool insert( value_type && value ) {
+ return generic_move_insert(accessor_not_used(), std::move(value));
+ }
+
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
+ /** Returns true if item is new. */
+ template<typename... Args>
+ bool emplace( const_accessor &result, Args&&... args ) {
+ return generic_emplace(result, std::forward<Args>(args)...);
+ }
+
+ //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
+ /** Returns true if item is new. */
+ template<typename... Args>
+ bool emplace( accessor &result, Args&&... args ) {
+ return generic_emplace(result, std::forward<Args>(args)...);
+ }
+
+ //! Insert item by copying if there is no such key present already
+ /** Returns true if item is inserted. */
+ template<typename... Args>
+ bool emplace( Args&&... args ) {
+ return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
+ }
+#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+#endif //__TBB_CPP11_RVALUE_REF_PRESENT
+
//! Insert range [first, last)
template<typename I>
void insert( I first, I last ) {
protected:
//! Insert or find item and optionally acquire a lock on the item.
- bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ) ) ;
+ bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ;
+
+ struct accessor_not_used { void release(){}};
+ friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
+ friend const_accessor* accessor_location( const_accessor & a ) { return &a;}
+
+ friend bool is_write_access_needed( accessor const& ) { return true;}
+ friend bool is_write_access_needed( const_accessor const& ) { return false;}
+ friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ template<typename Accessor>
+ bool generic_move_insert( Accessor && result, value_type && value ) {
+ result.release();
+ return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
+ }
+
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ template<typename Accessor, typename... Args>
+ bool generic_emplace( Accessor && result, Args &&... args ) {
+ result.release();
+ node * node_ptr = allocate_node_emplace_construct(my_allocator, std::forward<Args>(args)...);
+ return lookup(/*insert*/true, node_ptr->item.first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
+ }
+#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+#endif //__TBB_CPP11_RVALUE_REF_PRESENT
//! delete item by accessor
bool exclude( const_accessor &item_accessor );
};
template<typename Key, typename T, typename HashCompare, typename A>
-bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*) ) {
+bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
__TBB_ASSERT( !result || !result->my_node, NULL );
bool return_value;
hashcode_t const h = my_hash_compare.hash( key );
hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
segment_index_t grow_segment = 0;
- node *n, *tmp_n = 0;
+ node *n;
restart:
{//lock scope
__TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
//and 2 is the minimal index for which it's true
__TBB_ASSERT(element_index, "there should be no need to call "
"is_first_element_in_segment for 0th element" );
- return is_power_of_two_factor( element_index, 2 );
+ return is_power_of_two_at_least( element_index, 2 );
}
//! An operation on an n-element array starting at begin.
//! the first item
reference front() {
__TBB_ASSERT( size()>0, NULL);
- return (my_segment[0].template load<relaxed>().template pointer<T>())[0];
+ const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
+ return (segment_value.template pointer<T>())[0];
}
//! the first item const
const_reference front() const {
__TBB_ASSERT( size()>0, NULL);
- return static_cast<const T*>(my_segment[0].array)[0];
+ const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
+ return (segment_value.template pointer<const T>())[0];
}
//! the last item
reference back() {
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#ifndef __TBB_enumerable_thread_specific_H
#define __TBB_enumerable_thread_specific_H
+#include "atomic.h"
#include "concurrent_vector.h"
#include "tbb_thread.h"
#include "tbb_allocator.h"
-#include "tbb_profiling.h"
#include "cache_aligned_allocator.h"
#include "aligned_space.h"
+#include "internal/_template_helpers.h"
+#include "internal/_tbb_hash_compare_impl.h"
+#include "tbb_profiling.h"
#include <string.h> // for memcpy
#if _WIN32||_WIN64
#include <pthread.h>
#endif
+#define __TBB_ETS_USE_CPP11 \
+ (__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
+ && __TBB_CPP11_DECLTYPE_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT)
+
namespace tbb {
//! enum for selecting between single key and key-per-instance versions
namespace interface6 {
+ // Forward declaration to use in internal classes
+ template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
+ class enumerable_thread_specific;
+
//! @cond
namespace internal {
template<ets_key_usage_type ETS_key_type>
class ets_base: tbb::internal::no_copy {
protected:
-#if _WIN32||_WIN64
- typedef DWORD key_type;
-#else
- typedef pthread_t key_type;
-#endif
+ typedef tbb_thread::id key_type;
#if __TBB_PROTECTED_NESTED_CLASS_BROKEN
public:
#endif
struct slot {
key_type key;
void* ptr;
- bool empty() const {return !key;}
- bool match( key_type k ) const {return key==k;}
+ bool empty() const {return key == key_type();}
+ bool match( key_type k ) const {return key == k;}
bool claim( key_type k ) {
- __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
- return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
+ // TODO: maybe claim ptr, because key_type is not guaranteed to fit into word size
+ return atomic_compare_and_swap(key, k, key_type()) == key_type();
}
};
#if __TBB_PROTECTED_NESTED_CLASS_BROKEN
protected:
#endif
- static key_type key_of_current_thread() {
- tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
- key_type k;
- memcpy( &k, &id, sizeof(k) );
- return k;
- }
-
//! Root of linked list of arrays of decreasing size.
/** NULL if and only if my_count==0.
Each array in the list is half the size of its predecessor. */
size_t n = 1<<(a->lg_size);
free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
}
- static size_t hash( key_type k ) {
- // Multiplicative hashing. Client should use *upper* bits.
- // casts required for Mac gcc4.* compiler
- return uintptr_t(k)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value;
- }
ets_base() {my_root=NULL; my_count=0;}
- virtual ~ets_base(); // g++ complains if this is not virtual...
+ virtual ~ets_base(); // g++ complains if this is not virtual
void* table_lookup( bool& exists );
void table_clear();
- // table_find is used in copying ETS, so is not used in concurrent context. So
- // we don't need itt annotations for it.
- slot& table_find( key_type k ) {
- size_t h = hash(k);
- array* r = my_root;
- size_t mask = r->mask();
- for(size_t i = r->start(h);;i=(i+1)&mask) {
- slot& s = r->at(i);
- if( s.empty() || s.match(k) )
- return s;
- }
- }
- void table_reserve_for_copy( const ets_base& other ) {
+ // The following functions are not used in concurrent context,
+ // so we don't need synchronization and ITT annotations there.
+ void table_elementwise_copy( const ets_base& other,
+ void*(*add_element)(ets_base&, void*) ) {
__TBB_ASSERT(!my_root,NULL);
__TBB_ASSERT(!my_count,NULL);
- if( other.my_root ) {
- array* a = allocate(other.my_root->lg_size);
- a->next = NULL;
- my_root = a;
- my_count = other.my_count;
+ if( !other.my_root ) return;
+ array* root = my_root = allocate(other.my_root->lg_size);
+ root->next = NULL;
+ my_count = other.my_count;
+ size_t mask = root->mask();
+ for( array* r=other.my_root; r; r=r->next ) {
+ for( size_t i=0; i<r->size(); ++i ) {
+ slot& s1 = r->at(i);
+ if( !s1.empty() ) {
+ for( size_t j = root->start(tbb::tbb_hash<key_type>()(s1.key)); ; j=(j+1)&mask ) {
+ slot& s2 = root->at(j);
+ if( s2.empty() ) {
+ s2.ptr = add_element(*this, s1.ptr);
+ s2.key = s1.key;
+ break;
+ }
+ else if( s2.match(s1.key) )
+ break;
+ }
+ }
+ }
}
}
+ void table_swap( ets_base& other ) {
+ __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
+ tbb::internal::swap<relaxed>(my_root, other.my_root);
+ tbb::internal::swap<relaxed>(my_count, other.my_count);
+ }
};
template<ets_key_usage_type ETS_key_type>
template<ets_key_usage_type ETS_key_type>
void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
- const key_type k = key_of_current_thread();
+ const key_type k = tbb::this_tbb_thread::get_id();
- __TBB_ASSERT(k!=0,NULL);
+ __TBB_ASSERT(k != key_type(),NULL);
void* found;
- size_t h = hash(k);
+ size_t h = tbb::tbb_hash<key_type>()(k);
for( array* r=my_root; r; r=r->next ) {
call_itt_notify(acquired,r);
size_t mask=r->mask();
virtual void* create_local() = 0;
virtual void* create_array(size_t _size) = 0; // _size in bytes
virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
- public:
+ protected:
ets_base() {create_key();}
~ets_base() {destroy_key();}
void* table_lookup( bool& exists ) {
create_key();
super::table_clear();
}
+ void table_swap( ets_base& other ) {
+ using std::swap;
+ __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
+ swap(my_key, other.my_key);
+ super::table_swap(other);
+ }
};
//! Random access iterator for traversing the thread local copies.
mutable Value *my_value;
template<typename C, typename T>
- friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset,
- const enumerable_thread_specific_iterator<C,T>& v );
+ friend enumerable_thread_specific_iterator<C,T>
+ operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<C,T>& v );
template<typename C, typename T, typename U>
friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
const enumerable_thread_specific_iterator<C,U>& j );
template<typename C, typename T, typename U>
- friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
+ friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i,
+ const enumerable_thread_specific_iterator<C,U>& j );
template<typename C, typename U>
friend class enumerable_thread_specific_iterator;
Value& operator*() const {
Value* value = my_value;
if( !value ) {
- value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
+ value = my_value = (*my_container)[my_index].value();
}
- __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
+ __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" );
return *value;
}
};
template<typename Container, typename T>
- enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset,
- const enumerable_thread_specific_iterator<Container,T>& v ) {
+ enumerable_thread_specific_iterator<Container,T>
+ operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<Container,T>& v ) {
return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
}
return !(i==j);
}
- template<typename T>
- struct destruct_only: tbb::internal::no_copy {
- tbb::aligned_space<T> value;
- ~destruct_only() {value.begin()[0].~T();}
- };
-
template<typename T>
struct construct_by_default: tbb::internal::no_assign {
void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
const T exemplar;
void construct(void*where) {new(where) T(exemplar);}
construct_by_exemplar( const T& t ) : exemplar(t) {}
+#if __TBB_ETS_USE_CPP11
+ construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {}
+#endif
};
template<typename T, typename Finit>
Finit f;
void construct(void* where) {new(where) T(f());}
construct_by_finit( const Finit& f_ ) : f(f_) {}
+#if __TBB_ETS_USE_CPP11
+ construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {}
+#endif
+ };
+
+#if __TBB_ETS_USE_CPP11
+ template<typename T, typename... P>
+ struct construct_by_args: tbb::internal::no_assign {
+ internal::stored_pack<P...> pack;
+ void construct(void* where) {
+ internal::call( [where](const typename strip<P>::type&... args ){
+ new(where) T(args...);
+ }, pack );
+ }
+ construct_by_args( P&& ... args ) : pack(std::forward<P>(args)...) {}
};
+#endif
// storage for initialization function pointer
+ // TODO: consider removing the template parameter T here and in callback_leaf
template<typename T>
class callback_base {
public:
// Clone *this
- virtual callback_base* clone() = 0;
+ virtual callback_base* clone() const = 0;
// Destruct and free *this
virtual void destroy() = 0;
// Need virtual destructor to satisfy GCC compiler warning
template <typename T, typename Constructor>
class callback_leaf: public callback_base<T>, Constructor {
+#if __TBB_ETS_USE_CPP11
+ template<typename... P> callback_leaf( P&& ... params ) : Constructor(std::forward<P>(params)...) {}
+#else
template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
-
+#endif
+ // TODO: make the construction/destruction consistent (use allocator.construct/destroy)
typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
- /*override*/ callback_base<T>* clone() {
+ /*override*/ callback_base<T>* clone() const {
return make(*this);
}
Constructor::construct(where);
}
public:
+#if __TBB_ETS_USE_CPP11
+ template<typename... P>
+ static callback_base<T>* make( P&& ... params ) {
+ void* where = my_allocator_type().allocate(1);
+ return new(where) callback_leaf( std::forward<P>(params)... );
+ }
+#else
template<typename X>
static callback_base<T>* make( const X& x ) {
void* where = my_allocator_type().allocate(1);
return new(where) callback_leaf(x);
}
+#endif
};
- //! Template for adding padding in order to avoid false sharing
- /** ModularSize should be sizeof(U) modulo the cache line size.
- All maintenance of the space will be done explicitly on push_back,
+ //! Template for recording construction of objects in table
+ /** All maintenance of the space will be done explicitly on push_back,
and all thread local copies must be destroyed before the concurrent
vector is deleted.
+
+ The flag is_built is initialized to false. When the local is
+ successfully-constructed, set the flag to true or call value_committed().
+ If the constructor throws, the flag will be false.
*/
- template<typename U, size_t ModularSize>
+ // TODO: make a constructor for ets_element that takes a callback_base. make is_built private
+ template<typename U>
struct ets_element {
- ets_element() { /* avoid cl warning C4345 about default initialization of POD types */ }
- char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
- void unconstruct() {
- tbb::internal::punned_cast<U*>(&value)->~U();
+ tbb::aligned_space<U> my_space;
+ bool is_built;
+ ets_element() { is_built = false; } // not currently-built
+ U* value() { return my_space.begin(); }
+ U* value_committed() { is_built = true; return my_space.begin(); }
+ ~ets_element() {
+ if(is_built) {
+ my_space.begin()->~U();
+ is_built = false;
+ }
}
};
+ // A predicate that can be used for a compile-time compatibility check of ETS instances
+ // Ideally, it should have been declared inside the ETS class, but unfortunately
+ // in that case VS2013 does not enable the variadic constructor.
+ template<typename T, typename ETS> struct is_compatible_ets { static const bool value = false; };
+ template<typename T, typename U, typename A, ets_key_usage_type C>
+ struct is_compatible_ets< T, enumerable_thread_specific<U,A,C> > { static const bool value = internal::is_same_type<T,U>::value; };
+
+#if __TBB_ETS_USE_CPP11
+ // A predicate that checks whether, for a variable 'foo' of type T, foo() is a valid expression
+ template <typename T>
+ class is_callable_no_args {
+ private:
+ typedef char yes[1];
+ typedef char no [2];
+
+ template<typename U> static yes& decide( decltype(declval<U>()())* );
+ template<typename U> static no& decide(...);
+ public:
+ static const bool value = (sizeof(decide<T>(NULL)) == sizeof(yes));
+ };
+#endif
+
} // namespace internal
//! @endcond
@par combine and combine_each
- Both methods are defined for enumerable_thread_specific.
- - combine() requires the the type T have operator=() defined.
+ - combine() requires the type T have operator=() defined.
- neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.)
- Both are evaluated in serial context (the methods are assumed to be non-benign.)
template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
- typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
+ typedef internal::padded< internal::ets_element<T> > padded_element;
//! A generic range, used to create range objects from the iterators
template<typename I>
internal_collection_type my_locals;
+ // TODO: consider unifying the callback mechanism for all create_local* methods below
+ // (likely non-compatible and requires interface version increase)
/*override*/ void* create_local() {
- void* lref = &*my_locals.grow_by(1);
- my_construct_callback->construct(lref);
- return lref;
+ padded_element& lref = *my_locals.grow_by(1);
+ my_construct_callback->construct(lref.value());
+ return lref.value_committed();
}
- void unconstruct_locals() {
- for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
- cvi->unconstruct();
- }
+ static void* create_local_by_copy( internal::ets_base<ets_no_key>& base, void* p ) {
+ enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
+ padded_element& lref = *ets.my_locals.grow_by(1);
+ new(lref.value()) T(*static_cast<T*>(p));
+ return lref.value_committed();
}
+#if __TBB_ETS_USE_CPP11
+ static void* create_local_by_move( internal::ets_base<ets_no_key>& base, void* p ) {
+ enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
+ padded_element& lref = *ets.my_locals.grow_by(1);
+ new(lref.value()) T(std::move(*static_cast<T*>(p)));
+ return lref.value_committed();
+ }
+#endif
+
typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
// _size is in bytes
){}
//! Constructor with initializer functor. Each local instance of T is constructed by T(finit()).
- template <typename Finit>
+ template <typename Finit
+#if __TBB_ETS_USE_CPP11
+ , typename = typename internal::enable_if<internal::is_callable_no_args<typename internal::strip<Finit>::type>::value>::type
+#endif
+ >
enumerable_thread_specific( Finit finit ) : my_construct_callback(
- internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit )
+ internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( tbb::internal::move(finit) )
){}
//! Constructor with exemplar. Each local instance of T is copy-constructed from the exemplar.
internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar )
){}
+#if __TBB_ETS_USE_CPP11
+ enumerable_thread_specific( T&& exemplar ) : my_construct_callback(
+ internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( std::move(exemplar) )
+ ){}
+
+ //! Variadic constructor with initializer arguments. Each local instance of T is constructed by T(args...)
+ template <typename P1, typename... P,
+ typename = typename internal::enable_if<!internal::is_callable_no_args<typename internal::strip<P1>::type>::value
+ && !internal::is_compatible_ets<T, typename internal::strip<P1>::type>::value
+ && !internal::is_same_type<T, typename internal::strip<P1>::type>::value
+ >::type>
+ enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback(
+ internal::callback_leaf<T,internal::construct_by_args<T,P1,P...> >::make( std::forward<P1>(arg1), std::forward<P>(args)... )
+ ){}
+#endif
+
//! Destructor
~enumerable_thread_specific() {
- my_construct_callback->destroy();
- this->clear(); // deallocation before the derived class is finished destructing
- // So free(array *) is still accessible
+ if(my_construct_callback) my_construct_callback->destroy();
+ // Deallocate the hash table before overridden free_array() becomes inaccessible
+ this->internal::ets_base<ets_no_key>::table_clear();
}
//! returns reference to local, discarding exists
//! Destroys local copies
void clear() {
- unconstruct_locals();
my_locals.clear();
this->table_clear();
// callback is not destroyed
- // exemplar is not destroyed
}
private:
template<typename A2, ets_key_usage_type C2>
- void internal_copy( const enumerable_thread_specific<T, A2, C2>& other);
+ void internal_copy(const enumerable_thread_specific<T, A2, C2>& other) {
+#if __TBB_ETS_USE_CPP11 && TBB_USE_ASSERT
+ // this tests is_compatible_ets
+ __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
+#endif
+ // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
+ my_construct_callback = other.my_construct_callback->clone();
+ __TBB_ASSERT(my_locals.size()==0,NULL);
+ my_locals.reserve(other.size());
+ this->table_elementwise_copy( other, create_local_by_copy );
+ }
+
+ void internal_swap(enumerable_thread_specific& other) {
+ using std::swap;
+ __TBB_ASSERT( this!=&other, NULL );
+ swap(my_construct_callback, other.my_construct_callback);
+ // concurrent_vector::swap() preserves storage space,
+ // so addresses to the vector kept in ETS hash table remain valid.
+ swap(my_locals, other.my_locals);
+ this->internal::ets_base<ETS_key_type>::table_swap(other);
+ }
+
+#if __TBB_ETS_USE_CPP11
+ template<typename A2, ets_key_usage_type C2>
+ void internal_move(enumerable_thread_specific<T, A2, C2>&& other) {
+#if TBB_USE_ASSERT
+ // this tests is_compatible_ets
+ __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
+#endif
+ my_construct_callback = other.my_construct_callback;
+ other.my_construct_callback = NULL;
+ __TBB_ASSERT(my_locals.size()==0,NULL);
+ my_locals.reserve(other.size());
+ this->table_elementwise_copy( other, create_local_by_move );
+ }
+#endif
public:
- template<typename Alloc, ets_key_usage_type Cachetype>
- enumerable_thread_specific( const enumerable_thread_specific<T, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
+ enumerable_thread_specific( const enumerable_thread_specific& other )
+ : internal::ets_base<ETS_key_type>() /* prevents GCC warnings with -Wextra */
{
internal_copy(other);
}
- enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
+ template<typename Alloc, ets_key_usage_type Cachetype>
+ enumerable_thread_specific( const enumerable_thread_specific<T, Alloc, Cachetype>& other )
{
internal_copy(other);
}
- private:
+#if __TBB_ETS_USE_CPP11
+ enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback()
+ {
+ internal_swap(other);
+ }
- template<typename A2, ets_key_usage_type C2>
- enumerable_thread_specific &
- internal_assign(const enumerable_thread_specific<T, A2, C2>& other) {
- if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
+ template<typename Alloc, ets_key_usage_type Cachetype>
+ enumerable_thread_specific( enumerable_thread_specific<T, Alloc, Cachetype>&& other ) : my_construct_callback()
+ {
+ internal_move(std::move(other));
+ }
+#endif
+
+ enumerable_thread_specific& operator=( const enumerable_thread_specific& other )
+ {
+ if( this != &other ) {
this->clear();
my_construct_callback->destroy();
- my_construct_callback = 0;
internal_copy( other );
}
return *this;
}
- public:
+ template<typename Alloc, ets_key_usage_type Cachetype>
+ enumerable_thread_specific& operator=( const enumerable_thread_specific<T, Alloc, Cachetype>& other )
+ {
+ __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
+ this->clear();
+ my_construct_callback->destroy();
+ internal_copy(other);
+ return *this;
+ }
- // assignment
- enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
- return internal_assign(other);
+#if __TBB_ETS_USE_CPP11
+ enumerable_thread_specific& operator=( enumerable_thread_specific&& other )
+ {
+ if( this != &other )
+ internal_swap(other);
+ return *this;
}
template<typename Alloc, ets_key_usage_type Cachetype>
- enumerable_thread_specific& operator=(const enumerable_thread_specific<T, Alloc, Cachetype>& other)
+ enumerable_thread_specific& operator=( enumerable_thread_specific<T, Alloc, Cachetype>&& other )
{
- return internal_assign(other);
+ __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
+ this->clear();
+ my_construct_callback->destroy();
+ internal_move(std::move(other));
+ return *this;
}
+#endif
// combine_func_t has signature T(T,T) or T(const T&, const T&)
template <typename combine_func_t>
T combine(combine_func_t f_combine) {
if(begin() == end()) {
- internal::destruct_only<T> location;
- my_construct_callback->construct(location.value.begin());
- return *location.value.begin();
+ internal::ets_element<T> location;
+ my_construct_callback->construct(location.value());
+ return *location.value_committed();
}
const_iterator ci = begin();
T my_result = *ci;
return my_result;
}
- // combine_func_t has signature void(T) or void(const T&)
+ // combine_func_t takes T by value or by [const] reference, and returns nothing
template <typename combine_func_t>
void combine_each(combine_func_t f_combine) {
- for(const_iterator ci = begin(); ci != end(); ++ci) {
+ for(iterator ci = begin(); ci != end(); ++ci) {
f_combine( *ci );
}
}
}; // enumerable_thread_specific
- template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
- template<typename A2, ets_key_usage_type C2>
- void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<T, A2, C2>& other) {
- // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
- my_construct_callback = other.my_construct_callback->clone();
-
- typedef internal::ets_base<ets_no_key> base;
- __TBB_ASSERT(my_locals.size()==0,NULL);
- this->table_reserve_for_copy( other );
- for( base::array* r=other.my_root; r; r=r->next ) {
- for( size_t i=0; i<r->size(); ++i ) {
- base::slot& s1 = r->at(i);
- if( !s1.empty() ) {
- base::slot& s2 = this->table_find(s1.key);
- if( s2.empty() ) {
- void* lref = &*my_locals.grow_by(1);
- s2.ptr = new(lref) T(*(T*)s1.ptr);
- s2.key = s1.key;
- } else {
- // Skip the duplicate
- }
- }
- }
- }
- }
-
template< typename Container >
class flattened2d {
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include "task.h"
#include "cache_aligned_allocator.h"
#include "tbb_exception.h"
+#include "internal/_template_helpers.h"
#include "internal/_aggregator_impl.h"
#include "tbb_profiling.h"
//! An enumeration the provides the two most common concurrency levels: unlimited and serial
enum concurrency { unlimited = 0, serial = 1 };
-namespace interface7 {
+namespace interface8 {
namespace internal {
template<typename T, typename M> class successor_cache;
template<typename T, typename M> class broadcast_cache;
template<typename T, typename M> class round_robin_cache;
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ template< typename C> class edge_container;
+#endif
}
+//A generic null type
+struct null_type {};
+
//! An empty class used for messages that mean "I'm done"
class continue_msg {};
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
//! interface to record edges for traversal & deletion
+ typedef typename internal::edge_container<successor_type> built_successors_type;
+ typedef typename built_successors_type::edge_list_type successor_list_type;
+ virtual built_successors_type &built_successors() = 0;
virtual void internal_add_built_successor( successor_type & ) = 0;
virtual void internal_delete_built_successor( successor_type & ) = 0;
- virtual void copy_successors( std::vector<successor_type *> &) = 0;
+ virtual void copy_successors( successor_list_type &) = 0;
virtual size_t successor_count() = 0;
#endif
-};
+}; // class sender<T>
template< typename T > class limiter_node; // needed for resetting decrementer
template< typename R, typename B > class run_and_put_task;
static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
// flags to modify the behavior of the graph reset(). Can be combined.
enum reset_flags {
rf_reset_protocol = 0,
rf_reset_bodies = 1<<0, // delete the current node body, reset to a copy of the initial node body.
- rf_extract = 1<<1 // delete edges (extract() for single node, reset() for graph.)
+ rf_clear_edges = 1<<1 // delete edges
};
-#define __TBB_PFG_RESET_ARG(exp) exp
-#define __TBB_COMMA ,
-#else
-#define __TBB_PFG_RESET_ARG(exp) /* nothing */
-#define __TBB_COMMA /* nothing */
-#endif
-
// enqueue left task if necessary. Returns the non-enqueued task if there is one.
static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right) {
// if no RHS task, don't change left.
virtual bool remove_predecessor( predecessor_type & ) { return false; }
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ typedef typename internal::edge_container<predecessor_type> built_predecessors_type;
+ typedef typename built_predecessors_type::edge_list_type predecessor_list_type;
+ virtual built_predecessors_type &built_predecessors() = 0;
virtual void internal_add_built_predecessor( predecessor_type & ) = 0;
virtual void internal_delete_built_predecessor( predecessor_type & ) = 0;
- virtual void copy_predecessors( std::vector<predecessor_type *> & ) = 0;
+ virtual void copy_predecessors( predecessor_list_type & ) = 0;
virtual size_t predecessor_count() = 0;
#endif
protected:
//! put receiver back in initial state
template<typename U> friend class limiter_node;
- virtual void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol ) ) = 0;
+ virtual void reset_receiver(reset_flags f = rf_reset_protocol) = 0;
template<typename TT, typename M>
friend class internal::successor_cache;
virtual bool is_continue_receiver() { return false; }
-};
+}; // class receiver<T>
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
//* holder of edges both for caches and for those nodes which do not have predecessor caches.
// C == receiver< ... > or sender< ... >, depending.
+namespace internal {
template<typename C>
class edge_container {
public:
- typedef std::vector<C *> edge_vector;
+ typedef std::list<C *, tbb::tbb_allocator<C *> > edge_list_type;
void add_edge( C &s) {
built_edges.push_back( &s );
}
void delete_edge( C &s) {
- for ( typename edge_vector::iterator i = built_edges.begin(); i != built_edges.end(); ++i ) {
+ for ( typename edge_list_type::iterator i = built_edges.begin(); i != built_edges.end(); ++i ) {
if ( *i == &s ) {
(void)built_edges.erase(i);
return; // only remove one predecessor per request
}
}
- void copy_edges( edge_vector &v) {
+ void copy_edges( edge_list_type &v) {
v = built_edges;
}
built_edges.clear();
}
+ // methods remove the statement from all predecessors/successors liste in the edge
+ // container.
template< typename S > void sender_extract( S &s );
template< typename R > void receiver_extract( R &r );
private:
- edge_vector built_edges;
-};
+ edge_list_type built_edges;
+}; // class edge_container
+} // namespace internal
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
//! Base class for receivers of completion messages
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
+ typedef internal::edge_container<predecessor_type> built_predecessors_type;
+ typedef built_predecessors_type::edge_list_type predecessor_list_type;
+ /*override*/ built_predecessors_type &built_predecessors() { return my_built_predecessors; }
/*override*/ void internal_add_built_predecessor( predecessor_type &s) {
spin_mutex::scoped_lock l(my_mutex);
my_built_predecessors.delete_edge(s);
}
- /*override*/ void copy_predecessors( predecessor_vector_type &v) {
+ /*override*/ void copy_predecessors( predecessor_list_type &v) {
spin_mutex::scoped_lock l(my_mutex);
my_built_predecessors.copy_edges(v);
}
spin_mutex::scoped_lock l(my_mutex);
return my_built_predecessors.edge_count();
}
+
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
protected:
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- edge_container<predecessor_type> my_built_predecessors;
+ // continue_receiver must contain its own built_predecessors because it does
+ // not have a node_cache.
+ built_predecessors_type my_built_predecessors;
#endif
spin_mutex my_mutex;
int my_predecessor_count;
// the friend declaration in the base class did not eliminate the "protected class"
// error in gcc 4.1.2
template<typename U> friend class limiter_node;
- /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f) )
- {
+
+ /*override*/void reset_receiver( reset_flags f ) {
my_current_count = 0;
+ if(f & rf_clear_edges) {
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- if(f & rf_extract) {
- my_built_predecessors.receiver_extract(*this);
+ my_built_predecessors.clear();
+#endif
my_predecessor_count = my_initial_predecessor_count;
}
-#endif
}
//! Does whatever should happen when the threshold is reached
template<typename TT, typename M>
friend class internal::successor_cache;
/*override*/ bool is_continue_receiver() { return true; }
-};
-} // interface7
+
+}; // class continue_receiver
+} // interface8
} // flow
} // tbb
#include "internal/_flow_graph_trace_impl.h"
+#include "internal/_tbb_hash_compare_impl.h"
namespace tbb {
namespace flow {
-namespace interface7 {
+namespace interface8 {
-#include "internal/_flow_graph_types_impl.h"
#include "internal/_flow_graph_impl.h"
+#include "internal/_flow_graph_types_impl.h"
using namespace internal::graph_policy_namespace;
class graph;
//! Private initializing constructor for begin() and end() iterators
graph_iterator(GraphContainerType *g, bool begin);
void internal_forward();
-};
+}; // class graph_iterator
//! The graph class
/** This class serves as a handle to the graph */
Receiver &my_receiver;
Body my_body;
};
+ typedef std::list<task *> task_list_type;
public:
//! Constructs a graph with isolated task_group_context
my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );
my_root_task->set_ref_count(1);
tbb::internal::fgt_graph( this );
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
my_is_active = true;
-#endif
}
//! Constructs a graph with use_this_context as context
my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );
my_root_task->set_ref_count(1);
tbb::internal::fgt_graph( this );
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
my_is_active = true;
-#endif
}
//! Destroys the graph.
that need to block a wait_for_all() on the graph. For example a one-off source. */
template< typename Receiver, typename Body >
void run( Receiver &r, Body body ) {
- FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *my_root_task ) )
+ if(is_active()) {
+ FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *root_task() ) )
run_and_put_task< Receiver, Body >( r, body )) );
+ }
}
//! Spawns a task that runs a function object
that need to block a wait_for_all() on the graph. For example a one-off source. */
template< typename Body >
void run( Body body ) {
- FLOW_SPAWN( * new ( task::allocate_additional_child_of( *my_root_task ) ) run_task< Body >( body ) );
+ if(is_active()) {
+ FLOW_SPAWN( * new ( task::allocate_additional_child_of( *root_task() ) ) run_task< Body >( body ) );
+ }
}
//! Wait until graph is idle and decrement_wait_count calls equals increment_wait_count calls.
//! Returns the root task of the graph
task * root_task() {
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- if (!my_is_active)
- return NULL;
- else
-#endif
- return my_root_task;
+ return my_root_task;
}
-
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+
void set_active(bool a = true) {
my_is_active = a;
}
bool is_active() {
return my_is_active;
}
-#endif
+
+ void add_task_to_reset_list(task *tp) {
+ my_reset_task_list.push_back(tp);
+ }
// ITERATORS
template<typename C, typename N>
bool exception_thrown() { return caught_exception; }
// thread-unsafe state reset.
- void reset(__TBB_PFG_RESET_ARG(reset_flags f = rf_reset_protocol));
+ void reset(reset_flags f = rf_reset_protocol);
private:
task *my_root_task;
bool own_context;
bool cancelled;
bool caught_exception;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
bool my_is_active;
-#endif
+ task_list_type my_reset_task_list;
graph_node *my_nodes, *my_nodes_last;
#endif
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- virtual void extract( reset_flags f=rf_extract ) {
- bool a = my_graph.is_active();
- my_graph.set_active(false);
- reset((reset_flags)(f|rf_extract));
- my_graph.set_active(a);
- }
+ virtual void extract( ) = 0;
#endif
protected:
- virtual void reset(__TBB_PFG_RESET_ARG(reset_flags f=rf_reset_protocol)) = 0;
-};
+ // performs the reset on an individual node.
+ virtual void reset_node(reset_flags f=rf_reset_protocol) = 0;
+}; // class graph_node
inline void graph::register_node(graph_node *n) {
n->next = NULL;
n->prev = n->next = NULL;
}
-inline void graph::reset( __TBB_PFG_RESET_ARG( reset_flags f )) {
+inline void graph::reset( reset_flags f ) {
// reset context
- task *saved_my_root_task = my_root_task;
- my_root_task = NULL;
+ set_active(false);
if(my_context) my_context->reset();
cancelled = false;
caught_exception = false;
// reset all the nodes comprising the graph
for(iterator ii = begin(); ii != end(); ++ii) {
graph_node *my_p = &(*ii);
- my_p->reset(__TBB_PFG_RESET_ARG(f));
+ my_p->reset_node(f);
+ }
+ set_active(true);
+ // now spawn the tasks necessary to start the graph
+ for(task_list_type::iterator rti = my_reset_task_list.begin(); rti != my_reset_task_list.end(); ++rti) {
+ FLOW_SPAWN(*(*rti));
}
- my_root_task = saved_my_root_task;
+ my_reset_task_list.clear();
}
//! The type of successors of this node
typedef receiver< Output > successor_type;
+ //Source node has no input type
+ typedef null_type input_type;
+
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<successor_type *> successor_vector_type;
+ typedef typename sender<output_type>::built_successors_type built_successors_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
//! Constructor for a node with a successor
source_node( graph &g, Body body, bool is_active = true )
: graph_node(g), my_active(is_active), init_my_active(is_active),
my_body( new internal::source_body_leaf< output_type, Body>(body) ),
+ my_init_body( new internal::source_body_leaf< output_type, Body>(body) ),
my_reserved(false), my_has_cached_item(false)
{
my_successors.set_owner(this);
source_node( const source_node& src ) :
graph_node(src.my_graph), sender<Output>(),
my_active(src.init_my_active),
- init_my_active(src.init_my_active), my_body( src.my_body->clone() ),
+ init_my_active(src.init_my_active), my_body( src.my_init_body->clone() ), my_init_body(src.my_init_body->clone() ),
my_reserved(false), my_has_cached_item(false)
{
my_successors.set_owner(this);
}
//! The destructor
- ~source_node() { delete my_body; }
+ ~source_node() { delete my_body; delete my_init_body; }
#if TBB_PREVIEW_FLOW_GRAPH_TRACE
/* override */ void set_name( const char *name ) {
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+
+ /*override*/ built_successors_type &built_successors() { return my_successors.built_successors(); }
+
/*override*/void internal_add_built_successor( successor_type &r) {
spin_mutex::scoped_lock lock(my_mutex);
my_successors.internal_add_built_successor(r);
return my_successors.successor_count();
}
- /*override*/void copy_successors(successor_vector_type &v) {
+ /*override*/void copy_successors(successor_list_type &v) {
spin_mutex::scoped_lock l(my_mutex);
my_successors.copy_successors(v);
}
return dynamic_cast< internal::source_body_leaf<output_type, Body> & >(body_ref).get_body();
}
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/void extract( ) {
+ my_successors.built_successors().sender_extract(*this); // removes "my_owner" == this from each successor
+ my_active = init_my_active;
+ my_reserved = false;
+ if(my_has_cached_item) my_has_cached_item = false;
+ }
+#endif
+
protected:
//! resets the source_node to its initial state
- void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
+ /*override*/void reset_node( reset_flags f) {
my_active = init_my_active;
my_reserved =false;
if(my_has_cached_item) {
my_has_cached_item = false;
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- my_successors.reset(f);
- if(f & rf_reset_bodies) my_body->reset_body();
-#endif
+ if(f & rf_clear_edges) my_successors.clear();
+ if(f & rf_reset_bodies) {
+ internal::source_body<output_type> *tmp = my_init_body->clone();
+ delete my_body;
+ my_body = tmp;
+ }
+ if(my_active)
+ this->my_graph.add_task_to_reset_list(create_put_task());
}
private:
bool my_active;
bool init_my_active;
internal::source_body<output_type> *my_body;
+ internal::source_body<output_type> *my_init_body;
internal::broadcast_cache< output_type > my_successors;
bool my_reserved;
bool my_has_cached_item;
}
}
+ // when resetting, and if the source_node was created with my_active == true, then
+ // when we reset the node we must store a task to run the node, and spawn it only
+ // after the reset is complete and is_active() is again true. This is why we don't
+ // test for is_active() here.
+ task* create_put_task() {
+ return ( new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
+ internal:: source_task_bypass < source_node< output_type > >( *this ) );
+ }
+
//! Spawns a task that applies the body
/* override */ void spawn_put( ) {
- task* tp = this->my_graph.root_task();
- if(tp) {
- FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) )
- internal:: source_task_bypass < source_node< output_type > >( *this ) ) );
+ if(this->my_graph.is_active()) {
+ FLOW_SPAWN( *create_put_task());
}
}
try_release();
return last_task;
}
-}; // source_node
+}; // class source_node
+
+template<typename T>
+struct allocate_buffer {
+ static const bool value = false;
+};
+
+template<>
+struct allocate_buffer<queueing> {
+ static const bool value = true;
+};
//! Implements a function node that supports Input -> Output
-template < typename Input, typename Output = continue_msg, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
+template < typename Input, typename Output = continue_msg, typename Policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
class function_node : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {
-protected:
- using graph_node::my_graph;
public:
typedef Input input_type;
typedef Output output_type;
typedef sender< input_type > predecessor_type;
typedef receiver< output_type > successor_type;
typedef internal::function_input<input_type,output_type,Allocator> fInput_type;
+ typedef internal::function_input_queue<input_type, Allocator> input_queue_type;
typedef internal::function_output<output_type> fOutput_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- using typename internal::function_input<Input,Output,Allocator>::predecessor_vector_type;
- using typename internal::function_output<Output>::successor_vector_type;
+ using typename fInput_type::predecessor_list_type;
+ using typename fOutput_type::successor_list_type;
#endif
+ using fInput_type::my_predecessors;
//! Constructor
+ // input_queue_type is allocated here, but destroyed in the function_input_base.
+ // TODO: pass the graph_buffer_policy to the function_input_base so it can all
+ // be done in one place. This would be an interface-breaking change.
template< typename Body >
function_node( graph &g, size_t concurrency, Body body ) :
- graph_node(g), internal::function_input<input_type,output_type,Allocator>(g, concurrency, body) {
- tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
- static_cast<sender<output_type> *>(this), this->my_body );
+ graph_node(g), fInput_type(g, concurrency, body, allocate_buffer<Policy>::value ?
+ new input_queue_type( ) : NULL ) {
+ tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph,
+ static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this), this->my_body );
}
//! Copy constructor
function_node( const function_node& src ) :
- graph_node(src.my_graph), internal::function_input<input_type,output_type,Allocator>( src ),
+ graph_node(src.graph_node::my_graph),
+ fInput_type(src, allocate_buffer<Policy>::value ? new input_queue_type : NULL),
fOutput_type() {
- tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, static_cast<receiver<input_type> *>(this),
- static_cast<sender<output_type> *>(this), this->my_body );
+ tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph,
+ static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this), this->my_body );
}
#if TBB_PREVIEW_FLOW_GRAPH_TRACE
}
#endif
-protected:
- template< typename R, typename B > friend class run_and_put_task;
- template<typename X, typename Y> friend class internal::broadcast_cache;
- template<typename X, typename Y> friend class internal::round_robin_cache;
- using fInput_type::try_put_task;
-
- // override of graph_node's reset.
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {
- fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f));
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- successors().reset(f);
- __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "function_node successors not empty");
- __TBB_ASSERT(this->my_predecessors.empty(), "function_node predecessors not empty");
-#endif
- }
-
- /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
-};
-
-//! Implements a function node that supports Input -> Output
-template < typename Input, typename Output, typename Allocator >
-class function_node<Input,Output,queueing,Allocator> : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {
-protected:
- using graph_node::my_graph;
-public:
- typedef Input input_type;
- typedef Output output_type;
- typedef sender< input_type > predecessor_type;
- typedef receiver< output_type > successor_type;
- typedef internal::function_input<input_type,output_type,Allocator> fInput_type;
- typedef internal::function_input_queue<input_type, Allocator> queue_type;
- typedef internal::function_output<output_type> fOutput_type;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- using typename internal::function_input<Input,Output,Allocator>::predecessor_vector_type;
- using typename internal::function_output<Output>::successor_vector_type;
-#endif
-
- //! Constructor
- template< typename Body >
- function_node( graph &g, size_t concurrency, Body body ) :
- graph_node(g), fInput_type( g, concurrency, body, new queue_type() ) {
- tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
- static_cast<sender<output_type> *>(this), this->my_body );
- }
-
- //! Copy constructor
- function_node( const function_node& src ) :
- graph_node(src.graph_node::my_graph), fInput_type( src, new queue_type() ), fOutput_type() {
- tbb::internal::fgt_node_with_body( tbb::internal::FLOW_FUNCTION_NODE, &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
- static_cast<sender<output_type> *>(this), this->my_body );
- }
-
-#if TBB_PREVIEW_FLOW_GRAPH_TRACE
- /* override */ void set_name( const char *name ) {
- tbb::internal::fgt_node_desc( this, name );
+ /*override*/void extract( ) {
+ my_predecessors.built_predecessors().receiver_extract(*this);
+ successors().built_successors().sender_extract(*this);
}
#endif
template<typename X, typename Y> friend class internal::round_robin_cache;
using fInput_type::try_put_task;
- /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
- fInput_type::reset_function_input(__TBB_PFG_RESET_ARG(f));
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- successors().reset(f);
- __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "function_node successors not empty");
- __TBB_ASSERT(!(f & rf_extract) || this->my_predecessors.empty(), "function_node predecessors not empty");
-#endif
+ /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
+ // override of graph_node's reset.
+ /*override*/void reset_node(reset_flags f) {
+ fInput_type::reset_function_input(f);
+ // TODO: use clear() instead.
+ if(f & rf_clear_edges) {
+ successors().clear();
+ my_predecessors.clear();
+ }
+ __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "function_node successors not empty");
+ __TBB_ASSERT(this->my_predecessors.empty(), "function_node predecessors not empty");
}
- /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
-};
+}; // class function_node
+
//! implements a function node that supports Input -> (set of outputs)
// Output is a tuple of output types.
-template < typename Input, typename Output, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
+template < typename Input, typename Output, typename Policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
class multifunction_node :
public graph_node,
public internal::multifunction_input
> {
protected:
using graph_node::my_graph;
-private:
static const int N = tbb::flow::tuple_size<Output>::value;
public:
typedef Input input_type;
+ typedef null_type output_type;
typedef typename internal::wrap_tuple_elements<N,internal::multifunction_output, Output>::type output_ports_type;
+ typedef internal::multifunction_input<input_type, output_ports_type, Allocator> fInput_type;
+ typedef internal::function_input_queue<input_type, Allocator> input_queue_type;
private:
typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;
- typedef typename internal::function_input_queue<input_type,Allocator> queue_type;
+ using fInput_type::my_predecessors;
public:
template<typename Body>
multifunction_node( graph &g, size_t concurrency, Body body ) :
- graph_node(g), base_type(g,concurrency, body) {
- tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,
- &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
- this->output_ports(), this->my_body );
+ graph_node(g), base_type(g,concurrency, body, allocate_buffer<Policy>::value ? new input_queue_type : NULL) {
+ tbb::internal::fgt_multioutput_node_with_body<N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,
+ &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
+ this->output_ports(), this->my_body );
}
multifunction_node( const multifunction_node &other) :
- graph_node(other.graph_node::my_graph), base_type(other) {
- tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,
- &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
- this->output_ports(), this->my_body );
+ graph_node(other.graph_node::my_graph), base_type(other, allocate_buffer<Policy>::value ? new input_queue_type : NULL) {
+ tbb::internal::fgt_multioutput_node_with_body<N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,
+ &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
+ this->output_ports(), this->my_body );
}
#if TBB_PREVIEW_FLOW_GRAPH_TRACE
}
#endif
- // all the guts are in multifunction_input...
-protected:
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); }
-}; // multifunction_node
-
-template < typename Input, typename Output, typename Allocator >
-class multifunction_node<Input,Output,queueing,Allocator> : public graph_node, public internal::multifunction_input<Input,
- typename internal::wrap_tuple_elements<tbb::flow::tuple_size<Output>::value, internal::multifunction_output, Output>::type, Allocator> {
-protected:
- using graph_node::my_graph;
- static const int N = tbb::flow::tuple_size<Output>::value;
-public:
- typedef Input input_type;
- typedef typename internal::wrap_tuple_elements<N, internal::multifunction_output, Output>::type output_ports_type;
-private:
- typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;
- typedef typename internal::function_input_queue<input_type,Allocator> queue_type;
-public:
- template<typename Body>
- multifunction_node( graph &g, size_t concurrency, Body body) :
- graph_node(g), base_type(g,concurrency, body, new queue_type()) {
- tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,
- &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
- this->output_ports(), this->my_body );
- }
-
- multifunction_node( const multifunction_node &other) :
- graph_node(other.graph_node::my_graph), base_type(other, new queue_type()) {
- tbb::internal::fgt_multioutput_node_with_body<Output,N>( tbb::internal::FLOW_MULTIFUNCTION_NODE,
- &this->graph_node::my_graph, static_cast<receiver<input_type> *>(this),
- this->output_ports(), this->my_body );
- }
-
-#if TBB_PREVIEW_FLOW_GRAPH_TRACE
- /* override */ void set_name( const char *name ) {
- tbb::internal::fgt_multioutput_node_desc( this, name );
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ void extract( ) {
+ my_predecessors.built_predecessors().receiver_extract(*this);
+ base_type::extract();
}
#endif
-
// all the guts are in multifunction_input...
protected:
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) { base_type::reset(__TBB_PFG_RESET_ARG(f)); }
+ /*override*/void reset_node(reset_flags f) { base_type::reset(f); }
}; // multifunction_node
//! split_node: accepts a tuple as input, forwards each element of the tuple to its
typedef multifunction_node<TupleType,TupleType,rejecting,Allocator> base_type;
public:
typedef typename base_type::output_ports_type output_ports_type;
+ typedef typename base_type::output_type output_type;
private:
struct splitting_body {
void operator()(const TupleType& t, output_ports_type &p) {
typedef TupleType input_type;
typedef Allocator allocator_type;
split_node(graph &g) : base_type(g, unlimited, splitting_body()) {
- tbb::internal::fgt_multioutput_node<TupleType,N>( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph,
+ tbb::internal::fgt_multioutput_node<N>( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph,
static_cast<receiver<input_type> *>(this), this->output_ports() );
}
split_node( const split_node & other) : base_type(other) {
- tbb::internal::fgt_multioutput_node<TupleType,N>( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph,
+ tbb::internal::fgt_multioutput_node<N>( tbb::internal::FLOW_SPLIT_NODE, &this->graph_node::my_graph,
static_cast<receiver<input_type> *>(this), this->output_ports() );
}
}
#endif
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override graph_node*/ void extract() {
+ fInput_type::my_built_predecessors.receiver_extract(*this);
+ successors().built_successors().sender_extract(*this);
+ }
+#endif
+
protected:
template< typename R, typename B > friend class run_and_put_task;
template<typename X, typename Y> friend class internal::broadcast_cache;
template<typename X, typename Y> friend class internal::round_robin_cache;
using fInput_type::try_put_task;
+ /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {
- fInput_type::reset_receiver(__TBB_PFG_RESET_ARG(f));
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- successors().reset(f);
- __TBB_ASSERT(!(f & rf_extract) || successors().empty(), "continue_node not reset");
-#endif
+ /*override*/void reset_node(reset_flags f) {
+ fInput_type::reset_receiver(f);
+ if(f & rf_clear_edges)successors().clear();
+ __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "continue_node not reset");
}
- /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
}; // continue_node
template< typename T >
typedef sender< input_type > predecessor_type;
typedef receiver< output_type > successor_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
- typedef std::vector<successor_type *> successor_vector_type;
+ typedef typename receiver<input_type>::built_predecessors_type built_predecessors_type;
+ typedef typename sender<output_type>::built_successors_type built_successors_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) {
/* override */ bool register_successor( successor_type &s ) {
spin_mutex::scoped_lock l( my_mutex );
- task* tp = this->my_graph.root_task(); // just to test if we are resetting
- if (my_buffer_is_valid && tp) {
+ if (my_buffer_is_valid && this->my_graph.is_active()) {
// We have a valid value that must be forwarded immediately.
if ( s.try_put( my_buffer ) || !s.register_predecessor( *this ) ) {
// We add the successor: it accepted our put or it rejected it but won't let us become a predecessor
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/built_predecessors_type &built_predecessors() { return my_built_predecessors; }
+ /*override*/built_successors_type &built_successors() { return my_successors.built_successors(); }
+
/*override*/void internal_add_built_successor( successor_type &s) {
spin_mutex::scoped_lock l( my_mutex );
my_successors.internal_add_built_successor(s);
return my_successors.successor_count();
}
- /*override*/ void copy_successors(successor_vector_type &v) {
+ /*override*/ void copy_successors(successor_list_type &v) {
spin_mutex::scoped_lock l( my_mutex );
my_successors.copy_successors(v);
}
return my_built_predecessors.edge_count();
}
- /*override*/void copy_predecessors(predecessor_vector_type &v) {
+ /*override*/void copy_predecessors(predecessor_list_type &v) {
spin_mutex::scoped_lock l( my_mutex );
my_built_predecessors.copy_edges(v);
}
+
+ /*override*/ void extract() {
+ my_buffer_is_valid = false;
+ built_successors().sender_extract(*this);
+ built_predecessors().receiver_extract(*this);
+ }
+
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
/* override */ bool try_get( input_type &v ) {
return rtask;
}
- /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
- my_buffer_is_valid = false;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- my_successors.reset(f);
- if (f&rf_extract) {
- my_built_predecessors.receiver_extract(*this);
- }
-#endif
- }
-
spin_mutex my_mutex;
internal::broadcast_cache< input_type, null_rw_mutex > my_successors;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- edge_container<sender<input_type> > my_built_predecessors;
+ internal::edge_container<predecessor_type> my_built_predecessors;
#endif
input_type my_buffer;
bool my_buffer_is_valid;
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {}
+ /*override*/void reset_receiver(reset_flags /*f*/) {}
+
+ /*override*/void reset_node( reset_flags f) {
+ my_buffer_is_valid = false;
+ if (f&rf_clear_edges) {
+ my_successors.clear();
+ }
+ }
}; // overwrite_node
template< typename T >
typedef sender< input_type > predecessor_type;
typedef receiver< output_type > successor_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
- typedef std::vector<successor_type *> successor_vector_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
private:
internal::broadcast_cache<input_type> my_successors;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- edge_container<predecessor_type> my_built_predecessors;
- spin_mutex pred_mutex;
+ internal::edge_container<predecessor_type> my_built_predecessors;
+ spin_mutex pred_mutex; // serialize accesses on edge_container
#endif
public:
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/ void internal_add_built_successor(successor_type &r) {
+ typedef typename sender<T>::built_successors_type built_successors_type;
+
+ /*override sender*/ built_successors_type &built_successors() { return my_successors.built_successors(); }
+
+ /*override sender*/ void internal_add_built_successor(successor_type &r) {
my_successors.internal_add_built_successor(r);
}
- /*override*/ void internal_delete_built_successor(successor_type &r) {
+ /*override sender*/ void internal_delete_built_successor(successor_type &r) {
my_successors.internal_delete_built_successor(r);
}
- /*override*/ size_t successor_count() {
+ /*override sender*/ size_t successor_count() {
return my_successors.successor_count();
}
- /*override*/ void copy_successors(successor_vector_type &v) {
+ /*override*/ void copy_successors(successor_list_type &v) {
my_successors.copy_successors(v);
}
+ typedef typename receiver<T>::built_predecessors_type built_predecessors_type;
+
+ /*override receiver*/ built_predecessors_type &built_predecessors() { return my_built_predecessors; }
+
/*override*/ void internal_add_built_predecessor( predecessor_type &p) {
+ spin_mutex::scoped_lock l(pred_mutex);
my_built_predecessors.add_edge(p);
}
/*override*/ void internal_delete_built_predecessor( predecessor_type &p) {
+ spin_mutex::scoped_lock l(pred_mutex);
my_built_predecessors.delete_edge(p);
}
/*override*/ size_t predecessor_count() {
+ spin_mutex::scoped_lock l(pred_mutex);
return my_built_predecessors.edge_count();
}
- /*override*/ void copy_predecessors(predecessor_vector_type &v) {
+ /*override*/ void copy_predecessors(predecessor_list_type &v) {
+ spin_mutex::scoped_lock l(pred_mutex);
my_built_predecessors.copy_edges(v);
}
+
+ /*override graph_node*/ void extract() {
+ my_built_predecessors.receiver_extract(*this);
+ my_successors.built_successors().sender_extract(*this);
+ }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
protected:
return new_task;
}
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {
+ /*override*/void reset_receiver(reset_flags /*f*/) {}
+
+ /*override*/void reset_node(reset_flags f) {
+ if (f&rf_clear_edges) {
+ my_successors.clear();
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- my_successors.reset(f);
- if (f&rf_extract) {
- my_built_predecessors.receiver_extract(*this);
- }
- __TBB_ASSERT(!(f & rf_extract) || my_successors.empty(), "Error resetting broadcast_node");
+ my_built_predecessors.clear();
#endif
+ }
+ __TBB_ASSERT(!(f & rf_clear_edges) || my_successors.empty(), "Error resetting broadcast_node");
}
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) {}
}; // broadcast_node
//! Forwards messages in arbitrary order
typedef T output_type;
typedef sender< input_type > predecessor_type;
typedef receiver< output_type > successor_type;
- typedef buffer_node<T, A> my_class;
+ typedef buffer_node<T, A> class_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
- typedef std::vector<successor_type *> successor_vector_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
protected:
typedef size_t size_type;
internal::round_robin_cache< T, null_rw_mutex > my_successors;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- edge_container<predecessor_type> my_built_predecessors;
+ internal::edge_container<predecessor_type> my_built_predecessors;
#endif
friend class internal::forward_task_bypass< buffer_node< T, A > >;
successor_type *r;
predecessor_type *p;
size_t cnt_val;
- successor_vector_type *svec;
- predecessor_vector_type *pvec;
+ successor_list_type *svec;
+ predecessor_list_type *pvec;
};
#else
T *elem;
};
bool forwarder_busy;
- typedef internal::aggregating_functor<my_class, buffer_operation> my_handler;
- friend class internal::aggregating_functor<my_class, buffer_operation>;
- internal::aggregator< my_handler, buffer_operation> my_aggregator;
+ typedef internal::aggregating_functor<class_type, buffer_operation> handler_type;
+ friend class internal::aggregating_functor<class_type, buffer_operation>;
+ internal::aggregator< handler_type, buffer_operation> my_aggregator;
virtual void handle_operations(buffer_operation *op_list) {
buffer_operation *tmp = NULL;
}
}
if (try_forwarding && !forwarder_busy) {
- task* tp = this->my_graph.root_task();
- if(tp) {
+ if(this->my_graph.is_active()) {
forwarder_busy = true;
- task *new_task = new(task::allocate_additional_child_of(*tp)) internal::
+ task *new_task = new(task::allocate_additional_child_of(*(this->my_graph.root_task()))) internal::
forward_task_bypass
< buffer_node<input_type, A> >(*this);
// tmp should point to the last item handled by the aggregator. This is the operation
tmp->ltask = combine_tasks(z, new_task); // in case the op generated a task
}
}
- }
+ } // handle_operations
inline task *grab_forwarding_task( buffer_operation &op_data) {
return op_data.ltask;
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ typedef typename sender<T>::built_successors_type built_successors_type;
+
+ /*override sender*/ built_successors_type &built_successors() { return my_successors.built_successors(); }
+
virtual void internal_add_built_succ(buffer_operation *op) {
my_successors.internal_add_built_successor(*(op->r));
__TBB_store_with_release(op->status, SUCCEEDED);
__TBB_store_with_release(op->status, SUCCEEDED);
}
+ typedef typename receiver<T>::built_predecessors_type built_predecessors_type;
+
+ /*override receiver*/ built_predecessors_type &built_predecessors() { return my_built_predecessors; }
+
virtual void internal_add_built_pred(buffer_operation *op) {
my_built_predecessors.add_edge(*(op->p));
__TBB_store_with_release(op->status, SUCCEEDED);
my_built_predecessors.copy_edges(*(op->pvec));
__TBB_store_with_release(op->status, SUCCEEDED);
}
+
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
//! Tries to forward valid items to successors
buffer_node( graph &g ) : graph_node(g), internal::reservable_item_buffer<T>(),
forwarder_busy(false) {
my_successors.set_owner(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph,
static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );
}
internal::reservable_item_buffer<T>(), receiver<T>(), sender<T>() {
forwarder_busy = false;
my_successors.set_owner(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
tbb::internal::fgt_node( tbb::internal::FLOW_BUFFER_NODE, &this->my_graph,
static_cast<receiver<input_type> *>(this), static_cast<sender<output_type> *>(this) );
}
return op_data.cnt_val;
}
- /*override*/ void copy_predecessors( predecessor_vector_type &v ) {
+ /*override*/ void copy_predecessors( predecessor_list_type &v ) {
buffer_operation op_data(blt_pred_cpy);
op_data.pvec = &v;
my_aggregator.execute(&op_data);
}
- /*override*/ void copy_successors( successor_vector_type &v ) {
+ /*override*/ void copy_successors( successor_list_type &v ) {
buffer_operation op_data(blt_succ_cpy);
op_data.svec = &v;
my_aggregator.execute(&op_data);
}
+
#endif
//! Removes a successor.
return ft;
}
- /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
+ /*override*/void reset_receiver(reset_flags /*f*/) { }
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+public:
+ /* override*/ void extract() {
+ my_built_predecessors.receiver_extract(*this);
+ my_successors.built_successors().sender_extract(*this);
+ }
+#endif
+
+protected:
+ /*override*/void reset_node( reset_flags f) {
internal::reservable_item_buffer<T, A>::reset();
+ // TODO: just clear structures
+ if (f&rf_clear_edges) {
+ my_successors.clear();
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- my_successors.reset(f);
- if (f&rf_extract) {
- my_built_predecessors.receiver_extract(*this);
- }
+ my_built_predecessors.clear();
#endif
+ }
forwarder_busy = false;
}
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { }
}; // buffer_node
}
#endif
- /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
- base_type::reset(__TBB_PFG_RESET_ARG(f));
+protected:
+ /*override*/void reset_node( reset_flags f) {
+ base_type::reset_node(f);
}
}; // queue_node
protected:
- /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
+ /*override*/void reset_node( reset_flags f) {
mark = 0;
- base_type::reset(__TBB_PFG_RESET_ARG(f));
+ base_type::reset_node(f);
}
typedef typename buffer_node<T, A>::size_type size_type;
if (mark<this->my_tail) heapify();
__TBB_ASSERT(mark == this->my_tail, "mark unequal after heapify");
if (try_forwarding && !this->forwarder_busy) { // could we also test for this->my_tail (queue non-empty)?
- task* tp = this->my_graph.root_task();
- if(tp) {
+ if(this->my_graph.is_active()) {
this->forwarder_busy = true;
- task *new_task = new(task::allocate_additional_child_of(*tp)) internal::
+ task *new_task = new(task::allocate_additional_child_of(*(this->my_graph.root_task()))) internal::
forward_task_bypass
< buffer_node<input_type, A> >(*this);
// tmp should point to the last item handled by the aggregator. This is the operation
typedef sender< input_type > predecessor_type;
typedef receiver< output_type > successor_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<successor_type *> successor_vector_type;
- typedef std::vector<predecessor_type *> predecessor_vector_type;
+ typedef typename receiver<input_type>::built_predecessors_type built_predecessors_type;
+ typedef typename sender<output_type>::built_successors_type built_successors_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
private:
--my_tries;
my_predecessors.try_consume();
if ( check_conditions() ) {
- task* tp = this->my_graph.root_task();
- if ( tp ) {
- task *rtask = new ( task::allocate_additional_child_of( *tp ) )
+ if ( this->my_graph.is_active() ) {
+ task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
internal::forward_task_bypass< limiter_node<T> >( *this );
FLOW_SPAWN (*rtask);
}
--my_tries;
if (reserved) my_predecessors.try_release();
if ( check_conditions() ) {
- task* tp = this->my_graph.root_task();
- if ( tp ) {
- task *rtask = new ( task::allocate_additional_child_of( *tp ) )
+ if ( this->my_graph.is_active() ) {
+ task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
internal::forward_task_bypass< limiter_node<T> >( *this );
__TBB_ASSERT(!rval, "Have two tasks to handle");
return rtask;
my_successors.register_successor(r);
//spawn a forward task if this is the only successor
if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) {
- task* tp = this->my_graph.root_task();
- if ( tp ) {
- FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) )
+ if ( this->my_graph.is_active() ) {
+ FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
internal::forward_task_bypass < limiter_node<T> >( *this ) ) );
}
}
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/ built_successors_type &built_successors() { return my_successors.built_successors(); }
+ /*override*/ built_predecessors_type &built_predecessors() { return my_predecessors.built_predecessors(); }
+
/*override*/void internal_add_built_successor(receiver<output_type> &src) {
my_successors.internal_add_built_successor(src);
}
/*override*/size_t successor_count() { return my_successors.successor_count(); }
- /*override*/ void copy_successors(successor_vector_type &v) {
+ /*override*/ void copy_successors(successor_list_type &v) {
my_successors.copy_successors(v);
}
/*override*/size_t predecessor_count() { return my_predecessors.predecessor_count(); }
- /*override*/ void copy_predecessors(predecessor_vector_type &v) {
+ /*override*/ void copy_predecessors(predecessor_list_type &v) {
my_predecessors.copy_predecessors(v);
}
+
+ /*override*/void extract() {
+ my_count = 0;
+ my_successors.built_successors().sender_extract(*this);
+ my_predecessors.built_predecessors().receiver_extract(*this);
+ decrement.built_predecessors().receiver_extract(decrement);
+ }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
//! Adds src to the list of cached predecessors.
/* override */ bool register_predecessor( predecessor_type &src ) {
spin_mutex::scoped_lock lock(my_mutex);
my_predecessors.add( src );
- task* tp = this->my_graph.root_task();
- if ( my_count + my_tries < my_threshold && !my_successors.empty() && tp ) {
- FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *tp ) )
+ if ( my_count + my_tries < my_threshold && !my_successors.empty() && this->my_graph.is_active() ) {
+ FLOW_SPAWN( (* new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
internal::forward_task_bypass < limiter_node<T> >( *this ) ) );
}
return true;
if ( !rtask ) { // try_put_task failed.
spin_mutex::scoped_lock lock(my_mutex);
--my_tries;
- task* tp = this->my_graph.root_task();
- if ( check_conditions() && tp ) {
- rtask = new ( task::allocate_additional_child_of( *tp ) )
+ if ( check_conditions() && this->my_graph.is_active() ) {
+ rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) )
internal::forward_task_bypass< limiter_node<T> >( *this );
}
}
return rtask;
}
- /*override*/void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
- my_count = 0;
- my_predecessors.reset(__TBB_PFG_RESET_ARG(f));
- decrement.reset_receiver(__TBB_PFG_RESET_ARG(f));
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- my_successors.reset(f);
-#endif
+ /*override*/void reset_receiver(reset_flags /*f*/) {
+ __TBB_ASSERT(false,NULL); // should never be called
}
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) { my_predecessors.reset(__TBB_PFG_RESET_ARG(f)); }
+ /*override*/void reset_node( reset_flags f) {
+ my_count = 0;
+ if(f & rf_clear_edges) {
+ my_predecessors.clear();
+ my_successors.clear();
+ }
+ else
+ {
+ my_predecessors.reset( );
+ }
+ decrement.reset_receiver(f);
+ }
}; // limiter_node
#include "internal/_flow_graph_join_impl.h"
using internal::reserving_port;
using internal::queueing_port;
-using internal::tag_matching_port;
+using internal::key_matching_port;
using internal::input_port;
using internal::tag_value;
-using internal::NO_TAG;
-template<typename OutputTuple, graph_buffer_policy JP=queueing> class join_node;
+template<typename OutputTuple, typename JP=queueing> class join_node;
template<typename OutputTuple>
class join_node<OutputTuple,reserving>: public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value, reserving_port, OutputTuple, reserving> {
typedef OutputTuple output_type;
typedef typename unfolded_type::input_ports_type input_ports_type;
join_node(graph &g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
join_node(const join_node &other) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef OutputTuple output_type;
typedef typename unfolded_type::input_ports_type input_ports_type;
join_node(graph &g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
join_node(const join_node &other) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
};
-// template for tag_matching join_node
-template<typename OutputTuple>
-class join_node<OutputTuple, tag_matching> : public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value,
- tag_matching_port, OutputTuple, tag_matching> {
+// template for key_matching join_node
+// tag_matching join_node is a specialization of key_matching, and is source-compatible.
+template<typename OutputTuple, typename K, typename KHash>
+class join_node<OutputTuple, key_matching<K, KHash> > : public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value,
+ key_matching_port, OutputTuple, key_matching<K,KHash> > {
private:
static const int N = tbb::flow::tuple_size<OutputTuple>::value;
- typedef typename internal::unfolded_join_node<N, tag_matching_port, OutputTuple, tag_matching> unfolded_type;
+ typedef typename internal::unfolded_join_node<N, key_matching_port, OutputTuple, key_matching<K,KHash> > unfolded_type;
public:
typedef OutputTuple output_type;
typedef typename unfolded_type::input_ports_type input_ports_type;
template<typename __TBB_B0, typename __TBB_B1>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
template<typename __TBB_B0, typename __TBB_B1, typename __TBB_B2, typename __TBB_B3, typename __TBB_B4>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) :
unfolded_type(g, b0, b1, b2, b3, b4) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
#if __TBB_VARIADIC_MAX >= 6
typename __TBB_B5>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) :
unfolded_type(g, b0, b1, b2, b3, b4, b5) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
#endif
typename __TBB_B5, typename __TBB_B6>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) :
unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
#endif
typename __TBB_B5, typename __TBB_B6, typename __TBB_B7>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6,
__TBB_B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
#endif
typename __TBB_B5, typename __TBB_B6, typename __TBB_B7, typename __TBB_B8>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6,
__TBB_B7 b7, __TBB_B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
#endif
typename __TBB_B5, typename __TBB_B6, typename __TBB_B7, typename __TBB_B8, typename __TBB_B9>
join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6,
__TBB_B7 b7, __TBB_B8 b8, __TBB_B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
#endif
join_node(const join_node &other) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<OutputTuple,N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// indexer node
#include "internal/_flow_graph_indexer_impl.h"
-struct indexer_null_type {};
-
-template<typename T0, typename T1=indexer_null_type, typename T2=indexer_null_type, typename T3=indexer_null_type,
- typename T4=indexer_null_type, typename T5=indexer_null_type, typename T6=indexer_null_type,
- typename T7=indexer_null_type, typename T8=indexer_null_type, typename T9=indexer_null_type> class indexer_node;
+template<typename T0, typename T1=null_type, typename T2=null_type, typename T3=null_type,
+ typename T4=null_type, typename T5=null_type, typename T6=null_type,
+ typename T7=null_type, typename T8=null_type, typename T9=null_type> class indexer_node;
//indexer node specializations
template<typename T0>
typedef typename internal::tagged_msg<size_t, T0> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6, T7> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6, T7, T8> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
typedef typename internal::tagged_msg<size_t, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> output_type;
typedef typename internal::unfolded_indexer_node<InputTuple> unfolded_type;
indexer_node(graph& g) : unfolded_type(g) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
// Copy constructor
indexer_node( const indexer_node& other ) : unfolded_type(other) {
- tbb::internal::fgt_multiinput_node<InputTuple,N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
+ tbb::internal::fgt_multiinput_node<N>( tbb::internal::FLOW_INDEXER_NODE, &this->my_graph,
this->input_ports(), static_cast< sender< output_type > *>(this) );
}
tbb::internal::fgt_make_edge( &p, &s );
}
-//! Makes an edge between a single predecessor and a single successor
+#if __TBB_FLOW_GRAPH_CPP11_FEATURES
+//Makes an edge from port 0 of a multi-output predecessor to port 0 of a multi-input successor.
+template< typename T, typename V,
+ typename = typename T::output_ports_type, typename = typename V::input_ports_type >
+inline void make_edge( T& output, V& input) {
+ make_edge(get<0>(output.output_ports()), get<0>(input.input_ports()));
+}
+
+//Makes an edge from port 0 of a multi-output predecessor to a receiver.
+template< typename T, typename R,
+ typename = typename T::output_ports_type >
+inline void make_edge( T& output, receiver<R>& input) {
+ make_edge(get<0>(output.output_ports()), input);
+}
+
+//Makes an edge from a sender to port 0 of a multi-input successor.
+template< typename S, typename V,
+ typename = typename V::input_ports_type >
+inline void make_edge( sender<S>& output, V& input) {
+ make_edge(output, get<0>(input.input_ports()));
+}
+#endif
+
+//! Removes an edge between a single predecessor and a single successor
template< typename T >
inline void remove_edge( sender<T> &p, receiver<T> &s ) {
p.remove_successor( s );
tbb::internal::fgt_remove_edge( &p, &s );
}
+#if __TBB_FLOW_GRAPH_CPP11_FEATURES
+//Removes an edge between port 0 of a multi-output predecessor and port 0 of a multi-input successor.
+template< typename T, typename V,
+ typename = typename T::output_ports_type, typename = typename V::input_ports_type >
+inline void remove_edge( T& output, V& input) {
+ remove_edge(get<0>(output.output_ports()), get<0>(input.input_ports()));
+}
+
+//Removes an edge between port 0 of a multi-output predecessor and a receiver.
+template< typename T, typename R,
+ typename = typename T::output_ports_type >
+inline void remove_edge( T& output, receiver<R>& input) {
+ remove_edge(get<0>(output.output_ports()), input);
+}
+//Removes an edge between a sender and port 0 of a multi-input successor.
+template< typename S, typename V,
+ typename = typename V::input_ports_type >
+inline void remove_edge( sender<S>& output, V& input) {
+ remove_edge(output, get<0>(input.input_ports()));
+}
+#endif
+
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
template<typename C >
template< typename S >
-void edge_container<C>::sender_extract( S &s ) {
- edge_vector e = built_edges;
- for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) {
+void internal::edge_container<C>::sender_extract( S &s ) {
+ edge_list_type e = built_edges;
+ for ( typename edge_list_type::iterator i = e.begin(); i != e.end(); ++i ) {
remove_edge(s, **i);
}
}
template<typename C >
template< typename R >
-void edge_container<C>::receiver_extract( R &r ) {
- edge_vector e = built_edges;
- for ( typename edge_vector::iterator i = e.begin(); i != e.end(); ++i ) {
+void internal::edge_container<C>::receiver_extract( R &r ) {
+ edge_list_type e = built_edges;
+ for ( typename edge_list_type::iterator i = e.begin(); i != e.end(); ++i ) {
remove_edge(**i, r);
}
}
-#endif
+#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
//! Returns a copy of the body from a function or continue node
template< typename Body, typename Node >
return n.template copy_function_object<Body>();
}
-} // interface7
+#if __TBB_FLOW_GRAPH_CPP11_FEATURES
+
+//composite_node
+template< typename InputTuple, typename OutputTuple > class composite_node;
+
+template< typename... InputTypes, typename... OutputTypes>
+class composite_node <tbb::flow::tuple<InputTypes...>, tbb::flow::tuple<OutputTypes...> > : public graph_node, tbb::internal::no_copy {
+
+public:
+ typedef tbb::flow::tuple< receiver<InputTypes>&... > input_ports_type;
+ typedef tbb::flow::tuple< sender<OutputTypes>&... > output_ports_type;
+
+private:
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ const char *my_type_name;
+#endif
+ input_ports_type *my_input_ports;
+ output_ports_type *my_output_ports;
+
+ static const size_t NUM_INPUTS = sizeof...(InputTypes);
+ static const size_t NUM_OUTPUTS = sizeof...(OutputTypes);
+
+protected:
+ /*override*/void reset_node(reset_flags) {}
+
+public:
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g), my_type_name(type_name), my_input_ports(NULL), my_output_ports(NULL) {
+ tbb::internal::itt_make_task_group( tbb::internal::ITT_DOMAIN_FLOW, this, tbb::internal::FLOW_NODE, &g, tbb::internal::FLOW_GRAPH, tbb::internal::FLOW_COMPOSITE_NODE );
+ tbb::internal::fgt_multiinput_multioutput_node_desc( this, my_type_name );
+ }
+#else
+ composite_node( graph &g) : graph_node(g), my_input_ports(NULL), my_output_ports(NULL) {}
+#endif
+
+ template<typename T1, typename T2>
+ void set_external_ports(T1&& input_ports_tuple, T2&& output_ports_tuple) {
+ __TBB_STATIC_ASSERT(NUM_INPUTS == tbb::flow::tuple_size<input_ports_type>::value, "number of arguments does not match number of input ports");
+ __TBB_STATIC_ASSERT(NUM_OUTPUTS == tbb::flow::tuple_size<output_ports_type>::value, "number of arguments does not match number of output ports");
+ my_input_ports = new input_ports_type(std::forward<T1>(input_ports_tuple));
+ my_output_ports = new output_ports_type(std::forward<T2>(output_ports_tuple));
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ tbb::internal::fgt_internal_input_helper<T1, NUM_INPUTS>::register_port( this, input_ports_tuple);
+ tbb::internal::fgt_internal_output_helper<T2, NUM_OUTPUTS>::register_port( this, output_ports_tuple);
+#endif
+ }
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ template< typename... NodeTypes >
+ void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); }
+
+ template< typename... NodeTypes >
+ void add_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); }
+#else
+ template<typename... Nodes> void add_nodes(Nodes&...) { }
+ template<typename... Nodes> void add_visible_nodes(Nodes&...) { }
+#endif
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ /* override */ void set_name( const char *name ) {
+ tbb::internal::fgt_multiinput_multioutput_node_desc( this, name );
+ }
+#endif
+
+ input_ports_type input_ports() {
+ __TBB_ASSERT(my_input_ports, "input ports not set, call set_external_ports to set input ports");
+ return *my_input_ports;
+ }
+
+ output_ports_type output_ports() {
+ __TBB_ASSERT(my_output_ports, "output ports not set, call set_external_ports to set output ports");
+ return *my_output_ports;
+ }
+
+ virtual ~composite_node() {
+ if(my_input_ports) delete my_input_ports;
+ if(my_output_ports) delete my_output_ports;
+ }
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/void extract() {
+ __TBB_ASSERT(false, "Current composite_node implementation does not support extract");
+ }
+#endif
+}; // class composite_node
+
+//composite_node with only input ports
+template< typename... InputTypes>
+class composite_node <tbb::flow::tuple<InputTypes...>, tbb::flow::tuple<> > : public graph_node, tbb::internal::no_copy {
+public:
+ typedef tbb::flow::tuple< receiver<InputTypes>&... > input_ports_type;
+
+private:
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ const char *my_type_name;
+#endif
+ input_ports_type *my_input_ports;
+ static const size_t NUM_INPUTS = sizeof...(InputTypes);
+
+protected:
+ /*override*/void reset_node(reset_flags) {}
+
+public:
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g), my_type_name(type_name), my_input_ports(NULL) {
+ tbb::internal::itt_make_task_group( tbb::internal::ITT_DOMAIN_FLOW, this, tbb::internal::FLOW_NODE, &g, tbb::internal::FLOW_GRAPH, tbb::internal::FLOW_COMPOSITE_NODE );
+ tbb::internal::fgt_multiinput_multioutput_node_desc( this, my_type_name );
+ }
+#else
+ composite_node( graph &g) : graph_node(g), my_input_ports(NULL) {}
+#endif
+
+ template<typename T>
+ void set_external_ports(T&& input_ports_tuple) {
+ __TBB_STATIC_ASSERT(NUM_INPUTS == tbb::flow::tuple_size<input_ports_type>::value, "number of arguments does not match number of input ports");
+
+ my_input_ports = new input_ports_type(std::forward<T>(input_ports_tuple));
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ tbb::internal::fgt_internal_input_helper<T, NUM_INPUTS>::register_port( this, std::forward<T>(input_ports_tuple));
+#endif
+ }
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ template< typename... NodeTypes >
+ void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); }
+
+ template< typename... NodeTypes >
+ void add_nodes( const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); }
+#else
+ template<typename... Nodes> void add_nodes(Nodes&...) {}
+ template<typename... Nodes> void add_visible_nodes(Nodes&...) {}
+#endif
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ /* override */ void set_name( const char *name ) {
+ tbb::internal::fgt_multiinput_multioutput_node_desc( this, name );
+ }
+#endif
+
+ input_ports_type input_ports() {
+ __TBB_ASSERT(my_input_ports, "input ports not set, call set_external_ports to set input ports");
+ return *my_input_ports;
+ }
+
+ virtual ~composite_node() {
+ if(my_input_ports) delete my_input_ports;
+ }
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/void extract() {
+ __TBB_ASSERT(false, "Current composite_node implementation does not support extract");
+ }
+#endif
+
+}; // class composite_node
+
+//composite_nodes with only output_ports
+template<typename... OutputTypes>
+class composite_node <tbb::flow::tuple<>, tbb::flow::tuple<OutputTypes...> > : public graph_node, tbb::internal::no_copy {
+public:
+ typedef tbb::flow::tuple< sender<OutputTypes>&... > output_ports_type;
+
+private:
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ const char *my_type_name;
+#endif
+ output_ports_type *my_output_ports;
+ static const size_t NUM_OUTPUTS = sizeof...(OutputTypes);
+
+protected:
+ /*override*/void reset_node(reset_flags) {}
+
+public:
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g), my_type_name(type_name), my_output_ports(NULL) {
+ tbb::internal::itt_make_task_group( tbb::internal::ITT_DOMAIN_FLOW, this, tbb::internal::FLOW_NODE, &g, tbb::internal::FLOW_GRAPH, tbb::internal::FLOW_COMPOSITE_NODE );
+ tbb::internal::fgt_multiinput_multioutput_node_desc( this, my_type_name );
+ }
+#else
+ composite_node( graph &g) : graph_node(g), my_output_ports(NULL) {}
+#endif
+
+ template<typename T>
+ void set_external_ports(T&& output_ports_tuple) {
+ __TBB_STATIC_ASSERT(NUM_OUTPUTS == tbb::flow::tuple_size<output_ports_type>::value, "number of arguments does not match number of output ports");
+
+ my_output_ports = new output_ports_type(std::forward<T>(output_ports_tuple));
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ tbb::internal::fgt_internal_output_helper<T, NUM_OUTPUTS>::register_port( this, std::forward<T>(output_ports_tuple));
+#endif
+ }
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ template<typename... NodeTypes >
+ void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); }
+
+ template<typename... NodeTypes >
+ void add_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); }
+#else
+ template<typename... Nodes> void add_nodes(Nodes&...) {}
+ template<typename... Nodes> void add_visible_nodes(Nodes&...) {}
+#endif
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ /* override */ void set_name( const char *name ) {
+ tbb::internal::fgt_multiinput_multioutput_node_desc( this, name );
+ }
+#endif
+
+ output_ports_type output_ports() {
+ __TBB_ASSERT(my_output_ports, "output ports not set, call set_external_ports to set output ports");
+ return *my_output_ports;
+ }
+
+ virtual ~composite_node() {
+ if(my_output_ports) delete my_output_ports;
+ }
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/void extract() {
+ __TBB_ASSERT(false, "Current composite_node implementation does not support extract");
+ }
+#endif
+
+}; // class composite_node
+
+#endif // __TBB_PREVIEW_COMPOSITE_NODE
+
+#if __TBB_PREVIEW_ASYNC_NODE
+namespace internal {
+//! Pure virtual template class that defines interface for async communication
+template < typename Output >
+class async_gateway {
+public:
+ typedef Output output_type;
+
+ //! Submit signal from Async Activity to FG
+ virtual bool async_try_put(const output_type &i ) = 0;
+
+ virtual void async_reserve() = 0;
+
+ virtual void async_commit() = 0;
+
+ virtual ~async_gateway() {}
+};
+}
+
+//! Implements a async node
+template < typename Input, typename Output, typename Allocator=cache_aligned_allocator<Input> >
+class async_node : public graph_node, public internal::async_input<Input, Allocator, internal::async_gateway<Output> >, public internal::function_output<Output>, public internal::async_gateway<Output> {
+protected:
+ using graph_node::my_graph;
+public:
+ typedef Input input_type;
+ typedef Output output_type;
+ typedef async_node< input_type, output_type, Allocator > my_class;
+ typedef sender< input_type > predecessor_type;
+ typedef receiver< output_type > successor_type;
+ typedef internal::async_gateway< output_type > async_gateway_type;
+ typedef internal::async_input<input_type, Allocator, async_gateway_type > async_input_type;
+ typedef internal::function_output<output_type> async_output_type;
+
+ //! Constructor
+ template< typename Body >
+ async_node( graph &g, Body body ) :
+ graph_node( g ), async_input_type( g, body ) {
+ tbb::internal::fgt_node_with_body( tbb::internal::FLOW_ASYNC_NODE, &this->graph_node::my_graph,
+ static_cast<receiver<input_type> *>(this),
+ static_cast<sender<output_type> *>(this), this->my_body );
+ }
+
+ //! Copy constructor
+ async_node( const async_node& src ) :
+ graph_node(src.graph_node::my_graph), async_input_type( src ), async_output_type(){
+ tbb::internal::fgt_node_with_body( tbb::internal::FLOW_ASYNC_NODE, &this->graph_node::my_graph,
+ static_cast<receiver<input_type> *>(this),
+ static_cast<sender<output_type> *>(this), this->my_body );
+ }
+
+ /* override */ async_gateway_type& async_gateway() {
+ return static_cast< async_gateway_type& >(*this);
+ }
+
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ /* override */ void set_name( const char *name ) {
+ tbb::internal::fgt_node_desc( this, name );
+ }
+#endif
+
+protected:
+ template< typename R, typename B > friend class run_and_put_task;
+ template<typename X, typename Y> friend class internal::broadcast_cache;
+ template<typename X, typename Y> friend class internal::round_robin_cache;
+ using async_input_type::try_put_task;
+
+ /*override*/void reset_node( reset_flags f) {
+ async_input_type::reset_async_input(f);
+ if(f & rf_clear_edges) successors().clear();
+ __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "function_node successors not empty");
+ __TBB_ASSERT(!(f & rf_clear_edges) || this->my_predecessors.empty(), "function_node predecessors not empty");
+ }
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- using interface7::reset_flags;
- using interface7::rf_reset_protocol;
- using interface7::rf_reset_bodies;
- using interface7::rf_extract;
-#endif
-
- using interface7::graph;
- using interface7::graph_node;
- using interface7::continue_msg;
- using interface7::sender;
- using interface7::receiver;
- using interface7::continue_receiver;
-
- using interface7::source_node;
- using interface7::function_node;
- using interface7::multifunction_node;
- using interface7::split_node;
- using interface7::internal::output_port;
- using interface7::indexer_node;
- using interface7::internal::tagged_msg;
- using interface7::internal::cast_to;
- using interface7::internal::is_a;
- using interface7::continue_node;
- using interface7::overwrite_node;
- using interface7::write_once_node;
- using interface7::broadcast_node;
- using interface7::buffer_node;
- using interface7::queue_node;
- using interface7::sequencer_node;
- using interface7::priority_queue_node;
- using interface7::limiter_node;
- using namespace interface7::internal::graph_policy_namespace;
- using interface7::join_node;
- using interface7::input_port;
- using interface7::copy_body;
- using interface7::make_edge;
- using interface7::remove_edge;
- using interface7::internal::NO_TAG;
- using interface7::internal::tag_value;
+ /*override*/void extract() {
+ this->my_predecessors.built_predecessors().receiver_extract(*this);
+ successors().built_successors().sender_extract(*this);
+ }
+#endif
+ internal::broadcast_cache<output_type> &successors () { return async_output_type::my_successors; }
+
+ //! Submit signal from Async Activity to FG
+ /*override*/ bool async_try_put(const output_type &i ) {
+ // TODO: enqueue a task to a FG arena
+ task *res = successors().try_put_task(i);
+ if(!res) return false;
+ if (res != SUCCESSFULLY_ENQUEUED) FLOW_SPAWN(*res);
+ return true;
+ }
+
+ /*override*/ void async_reserve() {
+ my_graph.increment_wait_count();
+ }
+
+ /*override*/ void async_commit() {
+ my_graph.decrement_wait_count();
+ }
+};
+
+#endif // __TBB_PREVIEW_ASYNC_NODE
+
+} // interface8
+
+ using interface8::reset_flags;
+ using interface8::rf_reset_protocol;
+ using interface8::rf_reset_bodies;
+ using interface8::rf_clear_edges;
+
+ using interface8::graph;
+ using interface8::graph_node;
+ using interface8::continue_msg;
+ using interface8::sender;
+ using interface8::receiver;
+ using interface8::continue_receiver;
+
+ using interface8::source_node;
+ using interface8::function_node;
+ using interface8::multifunction_node;
+ using interface8::split_node;
+ using interface8::internal::output_port;
+ using interface8::indexer_node;
+ using interface8::internal::tagged_msg;
+ using interface8::internal::cast_to;
+ using interface8::internal::is_a;
+ using interface8::continue_node;
+ using interface8::overwrite_node;
+ using interface8::write_once_node;
+ using interface8::broadcast_node;
+ using interface8::buffer_node;
+ using interface8::queue_node;
+ using interface8::sequencer_node;
+ using interface8::priority_queue_node;
+ using interface8::limiter_node;
+ using namespace interface8::internal::graph_policy_namespace;
+ using interface8::join_node;
+ using interface8::input_port;
+ using interface8::copy_body;
+ using interface8::make_edge;
+ using interface8::remove_edge;
+ using interface8::internal::tag_value;
+#if __TBB_FLOW_GRAPH_CPP11_FEATURES
+ using interface8::composite_node;
+#endif
+#if __TBB_PREVIEW_ASYNC_NODE
+ using interface8::async_node;
+#endif
} // flow
} // tbb
--- /dev/null
+/*
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
+
+ This file is part of Threading Building Blocks. Threading Building Blocks is free software;
+ you can redistribute it and/or modify it under the terms of the GNU General Public License
+ version 2 as published by the Free Software Foundation. Threading Building Blocks is
+ distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details. You should have received a copy of
+ the GNU General Public License along with Threading Building Blocks; if not, write to the
+ Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+ As a special exception, you may use this file as part of a free software library without
+ restriction. Specifically, if other files instantiate templates or use macros or inline
+ functions from this file, or you compile this file and link it with other files to produce
+ an executable, this file does not by itself cause the resulting executable to be covered
+ by the GNU General Public License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General Public License.
+*/
+
+#ifndef __TBB_global_control_H
+#define __TBB_global_control_H
+
+#if !TBB_PREVIEW_GLOBAL_CONTROL && !__TBB_BUILD
+#error Set TBB_PREVIEW_GLOBAL_CONTROL before including global_control.h
+#endif
+
+#include "tbb_stddef.h"
+
+namespace tbb {
+namespace interface9 {
+
+class global_control {
+public:
+ enum parameter {
+ max_allowed_parallelism,
+ thread_stack_size,
+ parameter_max // insert new parameters above this point
+ };
+
+ global_control(parameter p, size_t value) :
+ my_value(value), my_next(NULL), my_param(p) {
+ __TBB_ASSERT(my_param < parameter_max, "Invalid parameter");
+#if __TBB_WIN8UI_SUPPORT
+ // For Windows Store* apps it's impossible to set stack size
+ if (p==thread_stack_size)
+ return;
+#elif __TBB_x86_64 && (_WIN32 || _WIN64)
+ if (p==thread_stack_size)
+ __TBB_ASSERT_RELEASE((unsigned)value == value, "Stack size is limited to unsigned int range");
+#endif
+ if (my_param==max_allowed_parallelism)
+ // TODO: support for serialization via max_allowed_parallelism==1
+ __TBB_ASSERT_RELEASE(my_value>1, "Values of 1 and 0 are not supported for max_allowed_parallelism.");
+ internal_create();
+ }
+
+ ~global_control() {
+ __TBB_ASSERT(my_param < parameter_max, "Invalid parameter. Probably the object was corrupted.");
+#if __TBB_WIN8UI_SUPPORT
+ // For Windows Store* apps it's impossible to set stack size
+ if (my_param==thread_stack_size)
+ return;
+#endif
+ internal_destroy();
+ }
+
+ static size_t active_value(parameter p) {
+ __TBB_ASSERT(p < parameter_max, "Invalid parameter");
+ return active_value((int)p);
+ }
+private:
+ size_t my_value;
+ global_control *my_next;
+ parameter my_param;
+
+ void __TBB_EXPORTED_METHOD internal_create();
+ void __TBB_EXPORTED_METHOD internal_destroy();
+ static size_t __TBB_EXPORTED_FUNC active_value(int param);
+};
+} // namespace interface9
+
+using interface9::global_control;
+
+} // tbb
+
+#endif // __TBB_global_control_H
<HR>
<A HREF="../index.html">Up to parent directory</A>
<p></p>
-Copyright © 2005-2014 Intel Corporation. All Rights Reserved.
+Copyright © 2005-2015 Intel Corporation. All Rights Reserved.
<P></P>
Intel is a registered trademark or trademark of Intel Corporation
or its subsidiaries in the United States and other countries.
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
class concurrent_queue_iterator: public concurrent_queue_iterator_base,
public std::iterator<std::forward_iterator_tag,Value> {
-#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
+#if !__TBB_TEMPLATE_FRIENDS_BROKEN
template<typename T, class A>
friend class ::tbb::concurrent_bounded_queue;
#else
-public: // workaround for MSVC
+public:
#endif
//! Construct iterator pointing to head of queue.
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include <initializer_list>
#endif
+#include "_tbb_hash_compare_impl.h"
+
namespace tbb {
namespace interface5 {
//! @cond INTERNAL
sokey_t my_order_key; // Order key for this element
};
+ // Allocate a new node with the given order key; used to allocate dummy nodes
+ nodeptr_t create_node(sokey_t order_key) {
+ nodeptr_t pnode = my_node_allocator.allocate(1);
+ pnode->init(order_key);
+ return (pnode);
+ }
+
// Allocate a new node with the given order key and value
- nodeptr_t create_node(sokey_t order_key, const T &value) {
+ template<typename Arg>
+ nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t){
nodeptr_t pnode = my_node_allocator.allocate(1);
+ //TODO: use RAII scoped guard instead of explicit catch
__TBB_TRY {
- new(static_cast<void*>(&pnode->my_element)) T(value);
+ new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
pnode->init(order_key);
} __TBB_CATCH(...) {
my_node_allocator.deallocate(pnode, 1);
return (pnode);
}
-#if __TBB_CPP11_RVALUE_REF_PRESENT
- //TODO: try to combine both implementations using poor man forward
- //TODO: use RAII scoped guard instead of explicit catch
- // Allocate a new node with the given order key and value
- nodeptr_t create_node(sokey_t order_key, T &&value) {
+ // Allocate a new node with the given parameters for constructing value
+ template<typename __TBB_PARAMETER_PACK Args>
+ nodeptr_t create_node_v( __TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args){
nodeptr_t pnode = my_node_allocator.allocate(1);
+ //TODO: use RAII scoped guard instead of explicit catch
__TBB_TRY {
- new(static_cast<void*>(&pnode->my_element)) T(std::move(value));
- pnode->init(order_key);
+ new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
} __TBB_CATCH(...) {
my_node_allocator.deallocate(pnode, 1);
__TBB_RETHROW();
return (pnode);
}
-#endif //__TBB_CPP11_RVALUE_REF_PRESENT
-
- // Allocate a new node with the given order key; used to allocate dummy nodes
- nodeptr_t create_node(sokey_t order_key) {
- nodeptr_t pnode = my_node_allocator.allocate(1);
- pnode->init(order_key);
- return (pnode);
- }
split_ordered_list(allocator_type a = allocator_type())
: my_node_allocator(a), my_element_count(0)
{
// Immediately allocate a dummy node with order key of 0. This node
// will always be the head of the list.
- my_head = create_node(0);
+ my_head = create_node(sokey_t(0));
}
~split_ordered_list()
my_node_allocator.deallocate(pnode, 1);
}
- // Try to insert a new element in the list. If insert fails, return the node that
- // was inserted instead.
- nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
+ // Try to insert a new element in the list.
+ // If insert fails, return the node that was inserted instead.
+ static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
new_node->my_next = current_node;
return previous->atomic_set_next(new_node, current_node);
}
// Insert a new element between passed in iterators
- std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
+ std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
{
- nodeptr_t pnode = create_node(order_key, value);
- nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
+ nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
if (inserted_node == pnode)
{
// If the insert succeeded, check that the order is correct and increment the element count
check_range(it, next);
- *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
+ *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
return std::pair<iterator, bool>(iterator(pnode, this), true);
}
else
{
- // If the insert failed (element already there), then delete the new one
- destroy_node(pnode);
return std::pair<iterator, bool>(end(), false);
}
}
__TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
// Try to insert it in the right place
- nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
+ nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
if (inserted_node == dummy_node)
{
nodeptr_t pnode = it.get_node_ptr();
nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
- previous_node = try_insert(previous_node, dummy_node, NULL);
+ previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
__TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
raw_const_iterator where = it++;
source.erase_node(get_iterator(begin_iterator), where);
nodeptr_t my_head; // pointer to head node
};
-// Template class for hash compare
-template<typename Key, typename Hasher, typename Key_equality>
-class hash_compare
-{
-public:
- typedef Hasher hasher;
- typedef Key_equality key_equal;
-
- hash_compare() {}
-
- hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
-
- hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
-
- size_t operator()(const Key& key) const {
- return ((size_t)my_hash_object(key));
- }
-
- bool operator()(const Key& key1, const Key& key2) const {
- return (!my_key_compare_object(key1, key2));
- }
-
- Hasher my_hash_object; // The hash object
- Key_equality my_key_compare_object; // The equality comparator object
-};
-
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#pragma warning(push)
#pragma warning(disable: 4127) // warning C4127: conditional expression is constant
node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
}
- previous_node = my_solist.try_insert(previous_node, node, NULL);
+ previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
__TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
}
my_solist.check_range();
clear_buckets_on_exception.dismiss();
}
-#endif //__TBB_CPP11_RVALUE_REF_PRESENT
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
if (this != &right)
return *this;
}
-#endif //__TBB_CPP11_RVALUE_REF_PRESENT
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
#if __TBB_INITIALIZER_LISTS_PRESENT
//! assignment operator from initializer_list
this->insert(il.begin(),il.end());
return (*this);
}
-#endif //# __TBB_INITIALIZER_LISTS_PRESENT
+#endif // __TBB_INITIALIZER_LISTS_PRESENT
~concurrent_unordered_base() {
return insert(value).first;
}
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+ std::pair<iterator, bool> insert(value_type&& value) {
+ return internal_insert(std::move(value));
+ }
+
+ iterator insert(const_iterator, value_type&& value) {
+ // Ignore hint
+ return insert(std::move(value)).first;
+ }
+
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+ template<typename... Args>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
+ const sokey_t hashed_element_key = (sokey_t) my_hash_compare(get_key(pnode->my_element));
+ const sokey_t order_key = split_order_key_regular(hashed_element_key);
+ pnode->init(order_key);
+
+ return internal_insert(pnode->my_element, pnode);
+ }
+
+ template<typename... Args>
+ iterator emplace_hint(const_iterator, Args&&... args) {
+ // Ignore hint
+ return emplace(tbb::internal::forward<Args>(args)...).first;
+ }
+
+#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+#endif // __TBB_CPP11_RVALUE_REF_PRESENT
+
template<class Iterator>
void insert(Iterator first, Iterator last) {
for (Iterator it = first; it != last; ++it)
//TODO: why not use std::distance?
// Hash APIs
- size_type internal_distance(const_iterator first, const_iterator last) const
+ static size_type internal_distance(const_iterator first, const_iterator last)
{
size_type num = 0;
}
// Insert an element in the hash given its value
- std::pair<iterator, bool> internal_insert(const value_type& value)
+ template< typename ValueType>
+ std::pair<iterator, bool> internal_insert( __TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
{
sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
size_type bucket = order_key % my_number_of_buckets;
+ //TODO:refactor the get_bucket related stuff into separate function something like acquire_bucket(key_type)
// If bucket is empty, initialize it first
if (!is_initialized(bucket))
init_bucket(bucket);
for (;;)
{
- if (where == last || solist_t::get_order_key(where) > order_key)
+ if (where == last || solist_t::get_order_key(where) > order_key ||
+ // if multimapped, stop at the first item equal to us.
+ (allow_multimapping && solist_t::get_order_key(where) == order_key &&
+ !my_hash_compare(get_key(*where), get_key(value))))
{
- // Try to insert it in the right place
- std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
+ if (!pnode)
+ pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value));
+
+ // Try to insert 'pnode' between 'it' and 'where'
+ std::pair<iterator, bool> result = my_solist.try_insert(it, where, pnode, &new_count);
if (result.second)
{
continue;
}
}
- else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
- {
- // Element already in the list, return it
+ else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
+ my_hash_compare(get_key(*where), get_key(value)) == 0)
+ { // Element already in the list, return it
+ if (pnode)
+ my_solist.destroy_node(pnode);
return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
}
-
// Move the iterator forward
it = where;
++where;
// Erase an element from the list. This is not a concurrency safe function.
iterator internal_erase(const_iterator it)
{
- key_type key = get_key(*it);
+ //const reference extends lifetime of possible temporary coming from get_key
+ const key_type& key = get_key(*it);
sokey_t order_key = (sokey_t) my_hash_compare(key);
size_type bucket = order_key % my_number_of_buckets;
#pragma warning(pop) // warning 4127 is back
#endif
-//! Hash multiplier
-static const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value;
} // namespace internal
//! @endcond
-//! Hasher functions
-template<typename T>
-inline size_t tbb_hasher( const T& t ) {
- return static_cast<size_t>( t ) * internal::hash_multiplier;
-}
-template<typename P>
-inline size_t tbb_hasher( P* ptr ) {
- size_t const h = reinterpret_cast<size_t>( ptr );
- return (h >> 3) ^ h;
-}
-template<typename E, typename S, typename A>
-inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
- size_t h = 0;
- for( const E* c = s.c_str(); *c; ++c )
- h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
- return h;
-}
-template<typename F, typename S>
-inline size_t tbb_hasher( const std::pair<F,S>& p ) {
- return tbb_hasher(p.first) ^ tbb_hasher(p.second);
-}
} // namespace interface5
-using interface5::tbb_hasher;
-
-
-// Template class for hash compare
-template<typename Key>
-class tbb_hash
-{
-public:
- tbb_hash() {}
-
- size_t operator()(const Key& key) const
- {
- return tbb_hasher(key);
- }
-};
-
} // namespace tbb
-#endif// __TBB__concurrent_unordered_impl_H
+#endif // __TBB__concurrent_unordered_impl_H
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#error Do not #include this internal file directly; use public TBB headers instead.
#endif
+// included in namespace tbb::flow::interface8 (in flow_graph.h)
+
namespace internal {
+ typedef tbb::internal::uint64_t tag_value;
+
+ using tbb::internal::strip;
+
namespace graph_policy_namespace {
- enum graph_buffer_policy { rejecting, reserving, queueing, tag_matching };
+
+ struct rejecting { };
+ struct reserving { };
+ struct queueing { };
+
+ // K == type of field used for key-matching. Each tag-matching port will be provided
+ // functor that, given an object accepted by the port, will return the
+ /// field of type K being used for matching.
+ template<typename K, typename KHash=tbb_hash_compare<typename strip<K>::type > >
+ struct key_matching {
+ typedef K key_type;
+ typedef typename strip<K>::type base_key_type;
+ typedef KHash hash_compare_type;
+ };
+
+ // old tag_matching join's new specifier
+ typedef key_matching<tag_value> tag_matching;
}
// -------------- function_body containers ----------------------
virtual ~source_body() {}
virtual bool operator()(Output &output) = 0;
virtual source_body* clone() = 0;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- virtual void reset_body() = 0;
-#endif
};
//! The leaf for source_body
template< typename Output, typename Body>
class source_body_leaf : public source_body<Output> {
public:
- source_body_leaf( const Body &_body ) : body(_body), init_body(_body) { }
+ source_body_leaf( const Body &_body ) : body(_body) { }
/*override*/ bool operator()(Output &output) { return body( output ); }
/*override*/ source_body_leaf* clone() {
- return new source_body_leaf< Output, Body >(init_body);
- }
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/ void reset_body() {
- body = init_body;
+ return new source_body_leaf< Output, Body >(body);
}
-#endif
Body get_body() { return body; }
private:
Body body;
- Body init_body;
};
//! A functor that takes an Input and generates an Output
virtual ~function_body() {}
virtual Output operator()(const Input &input) = 0;
virtual function_body* clone() = 0;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- virtual void reset_body() = 0;
-#endif
};
//! the leaf for function_body
template <typename Input, typename Output, typename B>
class function_body_leaf : public function_body< Input, Output > {
public:
- function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }
+ function_body_leaf( const B &_body ) : body(_body) { }
Output operator()(const Input &i) { return body(i); }
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/ void reset_body() {
- body = init_body;
- }
-#endif
B get_body() { return body; }
/*override*/ function_body_leaf* clone() {
- return new function_body_leaf< Input, Output, B >(init_body);
+ return new function_body_leaf< Input, Output, B >(body);
}
private:
B body;
- B init_body;
};
//! the leaf for function_body specialized for Input and output of continue_msg
template <typename B>
class function_body_leaf< continue_msg, continue_msg, B> : public function_body< continue_msg, continue_msg > {
public:
- function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }
+ function_body_leaf( const B &_body ) : body(_body) { }
continue_msg operator()( const continue_msg &i ) {
body(i);
return i;
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/ void reset_body() {
- body = init_body;
- }
-#endif
B get_body() { return body; }
/*override*/ function_body_leaf* clone() {
- return new function_body_leaf< continue_msg, continue_msg, B >(init_body);
+ return new function_body_leaf< continue_msg, continue_msg, B >(body);
}
private:
B body;
- B init_body;
};
//! the leaf for function_body specialized for Output of continue_msg
template <typename Input, typename B>
class function_body_leaf< Input, continue_msg, B> : public function_body< Input, continue_msg > {
public:
- function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }
+ function_body_leaf( const B &_body ) : body(_body) { }
continue_msg operator()(const Input &i) {
body(i);
return continue_msg();
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/ void reset_body() {
- body = init_body;
- }
-#endif
B get_body() { return body; }
/*override*/ function_body_leaf* clone() {
- return new function_body_leaf< Input, continue_msg, B >(init_body);
+ return new function_body_leaf< Input, continue_msg, B >(body);
}
private:
B body;
- B init_body;
};
//! the leaf for function_body specialized for Input of continue_msg
template <typename Output, typename B>
class function_body_leaf< continue_msg, Output, B > : public function_body< continue_msg, Output > {
public:
- function_body_leaf( const B &_body ) : body(_body), init_body(_body) { }
+ function_body_leaf( const B &_body ) : body(_body) { }
Output operator()(const continue_msg &i) {
return body(i);
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/ void reset_body() {
- body = init_body;
- }
-#endif
B get_body() { return body; }
/*override*/ function_body_leaf* clone() {
- return new function_body_leaf< continue_msg, Output, B >(init_body);
+ return new function_body_leaf< continue_msg, Output, B >(body);
}
private:
B body;
- B init_body;
};
//! function_body that takes an Input and a set of output ports
virtual ~multifunction_body () {}
virtual void operator()(const Input &/* input*/, OutputSet &/*oset*/) = 0;
virtual multifunction_body* clone() = 0;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- virtual void reset_body() = 0;
-#endif
};
//! leaf for multifunction. OutputSet can be a std::tuple or a vector.
template<typename Input, typename OutputSet, typename B>
class multifunction_body_leaf : public multifunction_body<Input, OutputSet> {
public:
- multifunction_body_leaf(const B &_body) : body(_body), init_body(_body) { }
+ multifunction_body_leaf(const B &_body) : body(_body) { }
void operator()(const Input &input, OutputSet &oset) {
body(input, oset); // body may explicitly put() to one or more of oset.
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/ void reset_body() {
- body = init_body;
- }
-#endif
B get_body() { return body; }
/*override*/ multifunction_body_leaf* clone() {
- return new multifunction_body_leaf<Input, OutputSet,B>(init_body);
+ return new multifunction_body_leaf<Input, OutputSet,B>(body);
}
private:
B body;
- B init_body;
};
+// ------ function bodies for hash_buffers and key-matching joins.
+
+template<typename Input, typename Output>
+class type_to_key_function_body : tbb::internal::no_assign {
+ public:
+ virtual ~type_to_key_function_body() {}
+ virtual Output operator()(const Input &input) = 0; // returns an Output
+ virtual type_to_key_function_body* clone() = 0;
+};
+
+// specialization for ref output
+template<typename Input, typename Output>
+class type_to_key_function_body<Input,Output&> : tbb::internal::no_assign {
+ public:
+ virtual ~type_to_key_function_body() {}
+ virtual const Output & operator()(const Input &input) = 0; // returns a const Output&
+ virtual type_to_key_function_body* clone() = 0;
+};
+
+template <typename Input, typename Output, typename B>
+class type_to_key_function_body_leaf : public type_to_key_function_body<Input, Output> {
+public:
+ type_to_key_function_body_leaf( const B &_body ) : body(_body) { }
+ /*override*/Output operator()(const Input &i) { return body(i); }
+ B get_body() { return body; }
+ /*override*/ type_to_key_function_body_leaf* clone() {
+ return new type_to_key_function_body_leaf< Input, Output, B>(body);
+ }
+private:
+ B body;
+};
+
+template <typename Input, typename Output, typename B>
+class type_to_key_function_body_leaf<Input,Output&,B> : public type_to_key_function_body< Input, Output&> {
+public:
+ type_to_key_function_body_leaf( const B &_body ) : body(_body) { }
+
+ /*override*/const Output& operator()(const Input &i) {
+ return const_cast<const Output&>(body(i));
+ }
+
+ B get_body() { return body; }
+
+ /*override*/ type_to_key_function_body_leaf* clone() {
+ return new type_to_key_function_body_leaf< Input, Output&, B>(body);
+ }
+
+private:
+ B body;
+};
+
+#if __TBB_PREVIEW_ASYNC_NODE
+
+ //! A functor that takes Input and submit it to Asynchronous Activity
+ template< typename Input, typename AsyncGateway >
+ class async_body : tbb::internal::no_assign {
+ public:
+ virtual ~async_body() {}
+ virtual void operator()(const Input &output, AsyncGateway& gateway) = 0;
+ virtual async_body* clone() = 0;
+ };
+
+ //! The leaf for async_body
+ template< typename Input, typename Body, typename AsyncGateway >
+ class async_body_leaf : public async_body< Input, AsyncGateway > {
+ public:
+ async_body_leaf( const Body &_body ) : body(_body) { }
+ /*override*/ void operator()(const Input &input, AsyncGateway& gateway) { body( input, gateway ); }
+ /*override*/ async_body_leaf* clone() {
+ return new async_body_leaf< Input, Body, AsyncGateway >(body);
+ }
+ Body get_body() { return body; }
+ private:
+ Body body;
+ };
+#endif
+
// --------------------------- end of function_body containers ------------------------
// --------------------------- node task bodies ---------------------------------------
typedef size_t size_type;
bool empty() {
- typename my_mutex_type::scoped_lock lock( my_mutex );
+ typename mutex_type::scoped_lock lock( my_mutex );
return internal_empty();
}
void add( T &n ) {
- typename my_mutex_type::scoped_lock lock( my_mutex );
+ typename mutex_type::scoped_lock lock( my_mutex );
internal_push(n);
}
void remove( T &n ) {
- typename my_mutex_type::scoped_lock lock( my_mutex );
+ typename mutex_type::scoped_lock lock( my_mutex );
for ( size_t i = internal_size(); i != 0; --i ) {
T &s = internal_pop();
if ( &s == &n ) return; // only remove one predecessor per request
}
}
+ void clear() {
+ while( !my_q.empty()) (void)my_q.pop();
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ my_built_predecessors.clear();
+#endif
+ }
+
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<T *> predecessor_vector_type;
+ typedef edge_container<T> built_predecessors_type;
+ built_predecessors_type &built_predecessors() { return my_built_predecessors; }
+
+ typedef typename edge_container<T>::edge_list_type predecessor_list_type;
void internal_add_built_predecessor( T &n ) {
- typename my_mutex_type::scoped_lock lock( my_mutex );
+ typename mutex_type::scoped_lock lock( my_mutex );
my_built_predecessors.add_edge(n);
}
void internal_delete_built_predecessor( T &n ) {
- typename my_mutex_type::scoped_lock lock( my_mutex );
+ typename mutex_type::scoped_lock lock( my_mutex );
my_built_predecessors.delete_edge(n);
}
- void copy_predecessors( predecessor_vector_type &v) {
- typename my_mutex_type::scoped_lock lock( my_mutex );
+ void copy_predecessors( predecessor_list_type &v) {
+ typename mutex_type::scoped_lock lock( my_mutex );
my_built_predecessors.copy_edges(v);
}
size_t predecessor_count() {
- typename my_mutex_type::scoped_lock lock(my_mutex);
+ typename mutex_type::scoped_lock lock(my_mutex);
return (size_t)(my_built_predecessors.edge_count());
}
-#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
+#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
protected:
- typedef M my_mutex_type;
- my_mutex_type my_mutex;
+ typedef M mutex_type;
+ mutex_type my_mutex;
std::queue< T * > my_q;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- edge_container<T> my_built_predecessors;
+ built_predecessors_type my_built_predecessors;
#endif
// Assumes lock is held
template< typename T, typename M=spin_mutex >
class predecessor_cache : public node_cache< sender<T>, M > {
public:
- typedef M my_mutex_type;
+ typedef M mutex_type;
typedef T output_type;
typedef sender<output_type> predecessor_type;
typedef receiver<output_type> successor_type;
do {
predecessor_type *src;
{
- typename my_mutex_type::scoped_lock lock(this->my_mutex);
+ typename mutex_type::scoped_lock lock(this->my_mutex);
if ( this->internal_empty() ) {
break;
}
return msg;
}
- void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
+ // If we are removing arcs (rf_clear_edges), call clear() rather than reset().
+ void reset() {
if(my_owner) {
for(;;) {
predecessor_type *src;
if(this->internal_empty()) break;
src = &this->internal_pop();
}
- src->register_successor( *my_owner);
+ src->register_successor( *my_owner);
}
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- if (f&rf_extract && my_owner)
- my_built_predecessors.receiver_extract(*my_owner);
- __TBB_ASSERT(!(f&rf_extract) || this->internal_empty(), "predecessor cache not empty");
-#endif
}
protected:
template< typename T, typename M=spin_mutex >
class reservable_predecessor_cache : public predecessor_cache< T, M > {
public:
- typedef M my_mutex_type;
+ typedef M mutex_type;
typedef T output_type;
typedef sender<T> predecessor_type;
typedef receiver<T> successor_type;
do {
{
- typename my_mutex_type::scoped_lock lock(this->my_mutex);
+ typename mutex_type::scoped_lock lock(this->my_mutex);
if ( reserved_src || this->internal_empty() )
return false;
msg = reserved_src->try_reserve( v );
if (msg == false) {
- typename my_mutex_type::scoped_lock lock(this->my_mutex);
+ typename mutex_type::scoped_lock lock(this->my_mutex);
// Relinquish ownership of the edge
reserved_src->register_successor( *this->my_owner );
reserved_src = NULL;
return true;
}
- void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
+ void reset( ) {
+ reserved_src = NULL;
+ predecessor_cache<T,M>::reset( );
+ }
+
+ void clear() {
reserved_src = NULL;
- predecessor_cache<T,M>::reset(__TBB_PFG_RESET_ARG(f));
+ predecessor_cache<T,M>::clear();
}
private:
class successor_cache : tbb::internal::no_copy {
protected:
- typedef M my_mutex_type;
- my_mutex_type my_mutex;
+ typedef M mutex_type;
+ mutex_type my_mutex;
+ typedef receiver<T> successor_type;
typedef receiver<T> *pointer_type;
- typedef std::list< pointer_type > my_successors_type;
+ typedef std::list< pointer_type > successors_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- edge_container<receiver<T> > my_built_successors;
+ edge_container<successor_type> my_built_successors;
#endif
- my_successors_type my_successors;
+ successors_type my_successors;
sender<T> *my_owner;
public:
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<pointer_type> successor_vector_type;
- void internal_add_built_successor( receiver<T> &r) {
- typename my_mutex_type::scoped_lock l(my_mutex, true);
+ typedef typename edge_container<successor_type>::edge_list_type successor_list_type;
+
+ edge_container<successor_type> &built_successors() { return my_built_successors; }
+
+ void internal_add_built_successor( successor_type &r) {
+ typename mutex_type::scoped_lock l(my_mutex, true);
my_built_successors.add_edge( r );
}
- void internal_delete_built_successor( receiver<T> &r) {
- typename my_mutex_type::scoped_lock l(my_mutex, true);
+ void internal_delete_built_successor( successor_type &r) {
+ typename mutex_type::scoped_lock l(my_mutex, true);
my_built_successors.delete_edge(r);
}
- void copy_successors( successor_vector_type &v) {
- typename my_mutex_type::scoped_lock l(my_mutex, false);
+ void copy_successors( successor_list_type &v) {
+ typename mutex_type::scoped_lock l(my_mutex, false);
my_built_successors.copy_edges(v);
}
size_t successor_count() {
- typename my_mutex_type::scoped_lock l(my_mutex,false);
+ typename mutex_type::scoped_lock l(my_mutex,false);
return my_built_successors.edge_count();
}
- void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
- if (f&rf_extract && my_owner)
- my_built_successors.sender_extract(*my_owner);
- }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
successor_cache( ) : my_owner(NULL) {}
virtual ~successor_cache() {}
- void register_successor( receiver<T> &r ) {
- typename my_mutex_type::scoped_lock l(my_mutex, true);
+ void register_successor( successor_type &r ) {
+ typename mutex_type::scoped_lock l(my_mutex, true);
my_successors.push_back( &r );
}
- void remove_successor( receiver<T> &r ) {
- typename my_mutex_type::scoped_lock l(my_mutex, true);
- for ( typename my_successors_type::iterator i = my_successors.begin();
+ void remove_successor( successor_type &r ) {
+ typename mutex_type::scoped_lock l(my_mutex, true);
+ for ( typename successors_type::iterator i = my_successors.begin();
i != my_successors.end(); ++i ) {
if ( *i == & r ) {
my_successors.erase(i);
}
bool empty() {
- typename my_mutex_type::scoped_lock l(my_mutex, false);
+ typename mutex_type::scoped_lock l(my_mutex, false);
return my_successors.empty();
}
}
virtual task * try_put_task( const T &t ) = 0;
- };
+ }; // successor_cache<T>
//! An abstract cache of successors, specialized to continue_msg
template<>
class successor_cache< continue_msg > : tbb::internal::no_copy {
protected:
- typedef spin_rw_mutex my_mutex_type;
- my_mutex_type my_mutex;
+ typedef spin_rw_mutex mutex_type;
+ mutex_type my_mutex;
+ typedef receiver<continue_msg> successor_type;
typedef receiver<continue_msg> *pointer_type;
- typedef std::list< pointer_type > my_successors_type;
- my_successors_type my_successors;
+ typedef std::list< pointer_type > successors_type;
+ successors_type my_successors;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- edge_container<receiver<continue_msg> > my_built_successors;
+ edge_container<successor_type> my_built_successors;
+ typedef edge_container<successor_type>::edge_list_type successor_list_type;
#endif
sender<continue_msg> *my_owner;
public:
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<pointer_type> successor_vector_type;
- void internal_add_built_successor( receiver<continue_msg> &r) {
- my_mutex_type::scoped_lock l(my_mutex, true);
+
+ edge_container<successor_type> &built_successors() { return my_built_successors; }
+
+ void internal_add_built_successor( successor_type &r) {
+ mutex_type::scoped_lock l(my_mutex, true);
my_built_successors.add_edge( r );
}
- void internal_delete_built_successor( receiver<continue_msg> &r) {
- my_mutex_type::scoped_lock l(my_mutex, true);
+ void internal_delete_built_successor( successor_type &r) {
+ mutex_type::scoped_lock l(my_mutex, true);
my_built_successors.delete_edge(r);
}
- void copy_successors( successor_vector_type &v) {
- my_mutex_type::scoped_lock l(my_mutex, false);
+ void copy_successors( successor_list_type &v) {
+ mutex_type::scoped_lock l(my_mutex, false);
my_built_successors.copy_edges(v);
}
size_t successor_count() {
- my_mutex_type::scoped_lock l(my_mutex,false);
+ mutex_type::scoped_lock l(my_mutex,false);
return my_built_successors.edge_count();
}
- void reset( __TBB_PFG_RESET_ARG(reset_flags f)) {
- if (f&rf_extract && my_owner)
- my_built_successors.sender_extract(*my_owner);
- }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
successor_cache( ) : my_owner(NULL) {}
virtual ~successor_cache() {}
- void register_successor( receiver<continue_msg> &r ) {
- my_mutex_type::scoped_lock l(my_mutex, true);
+ void register_successor( successor_type &r ) {
+ mutex_type::scoped_lock l(my_mutex, true);
my_successors.push_back( &r );
if ( my_owner && r.is_continue_receiver() ) {
r.register_predecessor( *my_owner );
}
}
- void remove_successor( receiver<continue_msg> &r ) {
- my_mutex_type::scoped_lock l(my_mutex, true);
- for ( my_successors_type::iterator i = my_successors.begin();
+ void remove_successor( successor_type &r ) {
+ mutex_type::scoped_lock l(my_mutex, true);
+ for ( successors_type::iterator i = my_successors.begin();
i != my_successors.end(); ++i ) {
if ( *i == & r ) {
// TODO: Check if we need to test for continue_receiver before
}
bool empty() {
- my_mutex_type::scoped_lock l(my_mutex, false);
+ mutex_type::scoped_lock l(my_mutex, false);
return my_successors.empty();
}
virtual task * try_put_task( const continue_msg &t ) = 0;
- };
+ }; // successor_cache< continue_msg >
//! A cache of successors that are broadcast to
template<typename T, typename M=spin_rw_mutex>
class broadcast_cache : public successor_cache<T, M> {
- typedef M my_mutex_type;
- typedef std::list< receiver<T> * > my_successors_type;
+ typedef M mutex_type;
+ typedef typename successor_cache<T,M>::successors_type successors_type;
public:
/*override*/ task * try_put_task( const T &t ) {
task * last_task = NULL;
bool upgraded = true;
- typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded);
- typename my_successors_type::iterator i = this->my_successors.begin();
+ typename mutex_type::scoped_lock l(this->my_mutex, upgraded);
+ typename successors_type::iterator i = this->my_successors.begin();
while ( i != this->my_successors.end() ) {
task *new_task = (*i)->try_put_task(t);
last_task = combine_tasks(last_task, new_task); // enqueue if necessary
template<typename T, typename M=spin_rw_mutex >
class round_robin_cache : public successor_cache<T, M> {
typedef size_t size_type;
- typedef M my_mutex_type;
- typedef std::list< receiver<T> * > my_successors_type;
+ typedef M mutex_type;
+ typedef typename successor_cache<T,M>::successors_type successors_type;
public:
round_robin_cache( ) {}
size_type size() {
- typename my_mutex_type::scoped_lock l(this->my_mutex, false);
+ typename mutex_type::scoped_lock l(this->my_mutex, false);
return this->my_successors.size();
}
/*override*/task *try_put_task( const T &t ) {
bool upgraded = true;
- typename my_mutex_type::scoped_lock l(this->my_mutex, upgraded);
- typename my_successors_type::iterator i = this->my_successors.begin();
+ typename mutex_type::scoped_lock l(this->my_mutex, upgraded);
+ typename successors_type::iterator i = this->my_successors.begin();
while ( i != this->my_successors.end() ) {
task *new_task = (*i)->try_put_task(t);
if ( new_task ) {
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
tbb::flow::get<N-1>(my_input).set_up(p, indexer_node_put_task);
indexer_helper<TupleTypes,N-1>::template set_indexer_node_pointer<IndexerNodeBaseType,PortTuple>(my_input, p);
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
template<typename InputTuple>
static inline void reset_inputs(InputTuple &my_input, reset_flags f) {
- join_helper<N-1>::reset_inputs(my_input, f);
+ indexer_helper<TupleTypes,N-1>::reset_inputs(my_input, f);
tbb::flow::get<N-1>(my_input).reset_receiver(f);
}
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ template<typename InputTuple>
+ static inline void extract(InputTuple &my_input) {
+ indexer_helper<TupleTypes,N-1>::extract(my_input);
+ tbb::flow::get<N-1>(my_input).extract_receiver();
+ }
#endif
};
task *(*indexer_node_put_task)(const T&, void *) = do_try_put<IndexerNodeBaseType, T, 0>;
tbb::flow::get<0>(my_input).set_up(p, indexer_node_put_task);
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
template<typename InputTuple>
static inline void reset_inputs(InputTuple &my_input, reset_flags f) {
tbb::flow::get<0>(my_input).reset_receiver(f);
}
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ template<typename InputTuple>
+ static inline void extract(InputTuple &my_input) {
+ tbb::flow::get<0>(my_input).extract_receiver();
+ }
#endif
};
forward_function_ptr my_try_put_task;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
spin_mutex my_pred_mutex;
- edge_container<sender<T> > my_built_predecessors;
+ typedef typename receiver<T>::built_predecessors_type built_predecessors_type;
+ built_predecessors_type my_built_predecessors;
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
public:
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
my_try_put_task = f;
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<sender<T> *> predecessor_vector_type;
+ typedef typename receiver<T>::predecessor_list_type predecessor_list_type;
+
+ /*override*/ built_predecessors_type &built_predecessors() { return my_built_predecessors; }
+
/*override*/size_t predecessor_count() {
spin_mutex::scoped_lock l(my_pred_mutex);
return my_built_predecessors.edge_count();
spin_mutex::scoped_lock l(my_pred_mutex);
my_built_predecessors.delete_edge(p);
}
- /*override*/void copy_predecessors( predecessor_vector_type &v) {
+ /*override*/void copy_predecessors( predecessor_list_type &v) {
spin_mutex::scoped_lock l(my_pred_mutex);
return my_built_predecessors.copy_edges(v);
}
+ /*override*/void clear_predecessors() {
+ spin_mutex::scoped_lock l(my_pred_mutex);
+ my_built_predecessors.clear();
+ }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
protected:
template< typename R, typename B > friend class run_and_put_task;
return my_try_put_task(v, my_indexer_ptr);
}
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
public:
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) {
- if(f&rf_extract) my_built_predecessors.receiver_extract(*this);
- }
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/void reset_receiver(reset_flags f) { if(f&rf_clear_edges) my_built_predecessors.clear(); }
#else
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { }
+ /*override*/void reset_receiver(reset_flags /*f*/) { }
#endif
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ void extract_receiver() { my_built_predecessors.receiver_extract(*this); }
+#endif
};
template<typename InputTuple, typename OutputType, typename StructTypes>
typedef receiver<output_type> successor_type;
typedef indexer_node_FE<InputTuple, output_type,StructTypes> input_ports_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<successor_type *> successor_vector_type;
+ typedef typename sender<output_type>::built_successors_type built_successors_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
private:
#endif
};
enum op_stat {WAIT=0, SUCCEEDED, FAILED};
- typedef indexer_node_base<InputTuple,output_type,StructTypes> my_class;
+ typedef indexer_node_base<InputTuple,output_type,StructTypes> class_type;
class indexer_node_base_operation : public aggregated_operation<indexer_node_base_operation> {
public:
task *bypass_t;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
size_t cnt_val;
- successor_vector_type *succv;
+ successor_list_type *succv;
#endif
};
indexer_node_base_operation(const output_type* e, op_type t) :
indexer_node_base_operation(op_type t) : type(char(t)) {}
};
- typedef internal::aggregating_functor<my_class, indexer_node_base_operation> my_handler;
- friend class internal::aggregating_functor<my_class, indexer_node_base_operation>;
- aggregator<my_handler, indexer_node_base_operation> my_aggregator;
+ typedef internal::aggregating_functor<class_type, indexer_node_base_operation> handler_type;
+ friend class internal::aggregating_functor<class_type, indexer_node_base_operation>;
+ aggregator<handler_type, indexer_node_base_operation> my_aggregator;
void handle_operations(indexer_node_base_operation* op_list) {
indexer_node_base_operation *current;
indexer_node_base(graph& g) : graph_node(g), input_ports_type() {
indexer_helper<StructTypes,N>::set_indexer_node_pointer(this->my_inputs, this);
my_successors.set_owner(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
indexer_node_base(const indexer_node_base& other) : graph_node(other.my_graph), input_ports_type(), sender<output_type>() {
indexer_helper<StructTypes,N>::set_indexer_node_pointer(this->my_inputs, this);
my_successors.set_owner(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
bool register_successor(successor_type &r) {
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+
+ built_successors_type &built_successors() { return my_successors.built_successors(); }
+
void internal_add_built_successor( successor_type &r) {
indexer_node_base_operation op_data(r, add_blt_succ);
my_aggregator.execute(&op_data);
return op_data.cnt_val;
}
- void copy_successors( successor_vector_type &v) {
+ void copy_successors( successor_list_type &v) {
indexer_node_base_operation op_data(blt_succ_cpy);
op_data.succv = &v;
my_aggregator.execute(&op_data);
}
+ void extract() {
+ my_successors.built_successors().sender_extract(*this);
+ indexer_helper<StructTypes,N>::extract(this->my_inputs);
+ }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
protected:
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- my_successors.reset(f);
- indexer_helper<StructTypes,N>::reset_inputs(this->my_inputs, f);
-#endif
+ /*override*/void reset_node(reset_flags f) {
+ if(f & rf_clear_edges) {
+ my_successors.clear();
+ indexer_helper<StructTypes,N>::reset_inputs(this->my_inputs,f);
+ }
}
private:
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include "tbb/internal/_flow_graph_types_impl.h" // for aligned_pair
-// in namespace tbb::flow::interface7 (included in _flow_graph_node_impl.h)
+// in namespace tbb::flow::interface8 (included in _flow_graph_node_impl.h)
//! Expandable buffer of items. The possible operations are push, pop,
//* tests for empty and so forth. No mutual exclusion is built in.
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#error Do not #include this internal file directly; use public TBB headers instead.
#endif
-#include "_flow_graph_types_impl.h"
-
namespace internal {
- typedef size_t tag_value;
- static const tag_value NO_TAG = tag_value(-1);
-
struct forwarding_base {
- forwarding_base(graph &g) : my_graph_ptr(&g), current_tag(NO_TAG) {}
+ forwarding_base(graph &g) : graph_pointer(&g) {}
virtual ~forwarding_base() {}
// decrement_port_count may create a forwarding task. If we cannot handle the task
// ourselves, ask decrement_port_count to deal with it.
virtual task * decrement_port_count(bool handle_task) = 0;
virtual void increment_port_count() = 0;
- virtual task * increment_tag_count(tag_value /*t*/, bool /*handle_task*/) {return NULL;}
// moved here so input ports can queue tasks
- graph* my_graph_ptr;
- tag_value current_tag; // so ports can refer to FE's desired items
+ graph* graph_pointer;
+ };
+
+ // specialization that lets us keep a copy of the current_key for building results.
+ // KeyType can be a reference type.
+ template<typename KeyType>
+ struct matching_forwarding_base :public forwarding_base {
+ typedef typename tbb::internal::strip<KeyType>::type current_key_type;
+ matching_forwarding_base(graph &g) : forwarding_base(g) { }
+ virtual task * increment_key_count(current_key_type const & /*t*/, bool /*handle_task*/) = 0; // {return NULL;}
+ current_key_type current_key; // so ports can refer to FE's desired items
};
template< int N >
reset_my_port(my_input);
}
- template<typename InputTuple, typename TagFuncTuple>
- static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) {
- tbb::flow::get<N-1>(my_input).set_my_original_tag_func(tbb::flow::get<N-1>(my_tag_funcs));
- tbb::flow::get<N-1>(my_input).set_my_tag_func(tbb::flow::get<N-1>(my_input).my_original_func()->clone());
- tbb::flow::get<N-1>(my_tag_funcs) = NULL;
- join_helper<N-1>::set_tag_func(my_input, my_tag_funcs);
+ template<typename InputTuple, typename KeyFuncTuple>
+ static inline void set_key_functors(InputTuple &my_input, KeyFuncTuple &my_key_funcs) {
+ tbb::flow::get<N-1>(my_input).set_my_key_func(tbb::flow::get<N-1>(my_key_funcs));
+ tbb::flow::get<N-1>(my_key_funcs) = NULL;
+ join_helper<N-1>::set_key_functors(my_input, my_key_funcs);
}
- template< typename TagFuncTuple1, typename TagFuncTuple2>
- static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) {
- if(tbb::flow::get<N-1>(other_inputs).my_original_func()) {
- tbb::flow::get<N-1>(my_inputs).set_my_tag_func(tbb::flow::get<N-1>(other_inputs).my_original_func()->clone());
- tbb::flow::get<N-1>(my_inputs).set_my_original_tag_func(tbb::flow::get<N-1>(other_inputs).my_original_func()->clone());
+ template< typename KeyFuncTuple>
+ static inline void copy_key_functors(KeyFuncTuple &my_inputs, KeyFuncTuple &other_inputs) {
+ if(tbb::flow::get<N-1>(other_inputs).get_my_key_func()) {
+ tbb::flow::get<N-1>(my_inputs).set_my_key_func(tbb::flow::get<N-1>(other_inputs).get_my_key_func()->clone());
}
- join_helper<N-1>::copy_tag_functors(my_inputs, other_inputs);
+ join_helper<N-1>::copy_key_functors(my_inputs, other_inputs);
}
template<typename InputTuple>
- static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) {
- join_helper<N-1>::reset_inputs(my_input __TBB_PFG_RESET_ARG(__TBB_COMMA f));
- tbb::flow::get<N-1>(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f));
+ static inline void reset_inputs(InputTuple &my_input, reset_flags f) {
+ join_helper<N-1>::reset_inputs(my_input, f);
+ tbb::flow::get<N-1>(my_input).reset_receiver(f);
}
- };
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ template<typename InputTuple>
+ static inline void extract_inputs(InputTuple &my_input) {
+ join_helper<N-1>::extract_inputs(my_input);
+ tbb::flow::get<N-1>(my_input).extract_receiver();
+ }
+#endif
+ }; // join_helper<N>
template< >
struct join_helper<1> {
reset_my_port(my_input);
}
- template<typename InputTuple, typename TagFuncTuple>
- static inline void set_tag_func(InputTuple &my_input, TagFuncTuple &my_tag_funcs) {
- tbb::flow::get<0>(my_input).set_my_original_tag_func(tbb::flow::get<0>(my_tag_funcs));
- tbb::flow::get<0>(my_input).set_my_tag_func(tbb::flow::get<0>(my_input).my_original_func()->clone());
- tbb::flow::get<0>(my_tag_funcs) = NULL;
+ template<typename InputTuple, typename KeyFuncTuple>
+ static inline void set_key_functors(InputTuple &my_input, KeyFuncTuple &my_key_funcs) {
+ tbb::flow::get<0>(my_input).set_my_key_func(tbb::flow::get<0>(my_key_funcs));
+ tbb::flow::get<0>(my_key_funcs) = NULL;
}
- template< typename TagFuncTuple1, typename TagFuncTuple2>
- static inline void copy_tag_functors(TagFuncTuple1 &my_inputs, TagFuncTuple2 &other_inputs) {
- if(tbb::flow::get<0>(other_inputs).my_original_func()) {
- tbb::flow::get<0>(my_inputs).set_my_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone());
- tbb::flow::get<0>(my_inputs).set_my_original_tag_func(tbb::flow::get<0>(other_inputs).my_original_func()->clone());
+ template< typename KeyFuncTuple>
+ static inline void copy_key_functors(KeyFuncTuple &my_inputs, KeyFuncTuple &other_inputs) {
+ if(tbb::flow::get<0>(other_inputs).get_my_key_func()) {
+ tbb::flow::get<0>(my_inputs).set_my_key_func(tbb::flow::get<0>(other_inputs).get_my_key_func()->clone());
}
}
template<typename InputTuple>
- static inline void reset_inputs(InputTuple &my_input __TBB_PFG_RESET_ARG(__TBB_COMMA reset_flags f)) {
- tbb::flow::get<0>(my_input).reset_receiver(__TBB_PFG_RESET_ARG(f));
+ static inline void reset_inputs(InputTuple &my_input, reset_flags f) {
+ tbb::flow::get<0>(my_input).reset_receiver(f);
}
- };
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ template<typename InputTuple>
+ static inline void extract_inputs(InputTuple &my_input) {
+ tbb::flow::get<0>(my_input).extract_receiver();
+ }
+#endif
+ }; // join_helper<1>
//! The two-phase join port
template< typename T >
typedef T input_type;
typedef sender<T> predecessor_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
+ typedef typename receiver<input_type>::built_predecessors_type built_predecessors_type;
#endif
private:
// ----------- Aggregator ------------
#endif
};
enum op_stat {WAIT=0, SUCCEEDED, FAILED};
- typedef reserving_port<T> my_class;
+ typedef reserving_port<T> class_type;
class reserving_port_operation : public aggregated_operation<reserving_port_operation> {
public:
predecessor_type *my_pred;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
size_t cnt_val;
- predecessor_vector_type *pvec;
+ predecessor_list_type *plist;
#endif
};
reserving_port_operation(const T& e, op_type t) :
reserving_port_operation(op_type t) : type(char(t)) {}
};
- typedef internal::aggregating_functor<my_class, reserving_port_operation> my_handler;
- friend class internal::aggregating_functor<my_class, reserving_port_operation>;
- aggregator<my_handler, reserving_port_operation> my_aggregator;
+ typedef internal::aggregating_functor<class_type, reserving_port_operation> handler_type;
+ friend class internal::aggregating_functor<class_type, reserving_port_operation>;
+ aggregator<handler_type, reserving_port_operation> my_aggregator;
void handle_operations(reserving_port_operation* op_list) {
reserving_port_operation *current;
__TBB_store_with_release(current->status, SUCCEEDED);
break;
case blt_pred_cpy:
- my_predecessors.copy_predecessors(*(current->pvec));
+ my_predecessors.copy_predecessors(*(current->plist));
__TBB_store_with_release(current->status, SUCCEEDED);
break;
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
reserving_port() : reserved(false) {
my_join = NULL;
my_predecessors.set_owner( this );
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
// copy constructor
reserved = false;
my_join = NULL;
my_predecessors.set_owner( this );
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
void set_join_node_pointer(forwarding_base *join) {
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/ built_predecessors_type &built_predecessors() { return my_predecessors.built_predecessors(); }
/*override*/void internal_add_built_predecessor(predecessor_type &src) {
reserving_port_operation op_data(src, add_blt_pred);
my_aggregator.execute(&op_data);
return op_data.cnt_val;
}
- /*override*/void copy_predecessors(predecessor_vector_type &v) {
+ /*override*/void copy_predecessors(predecessor_list_type &l) {
reserving_port_operation op_data(blt_pred_cpy);
- op_data.pvec = &v;
+ op_data.plist = &l;
my_aggregator.execute(&op_data);
}
+
+ void extract_receiver() {
+ my_predecessors.built_predecessors().receiver_extract(*this);
+ }
+
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
- /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) {
- my_predecessors.reset(__TBB_PFG_RESET_ARG(f));
+ /*override*/void reset_receiver( reset_flags f) {
+ if(f & rf_clear_edges) my_predecessors.clear();
+ else
+ my_predecessors.reset();
reserved = false;
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- __TBB_ASSERT(!(f&rf_extract) || my_predecessors.empty(), "port edges not removed");
-#endif
+ __TBB_ASSERT(!(f&rf_clear_edges) || my_predecessors.empty(), "port edges not removed");
}
private:
forwarding_base *my_join;
reservable_predecessor_cache< T, null_mutex > my_predecessors;
bool reserved;
- };
+ }; // reserving_port
//! queueing join_port
template<typename T>
public:
typedef T input_type;
typedef sender<T> predecessor_type;
- typedef queueing_port<T> my_node_type;
+ typedef queueing_port<T> class_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
+ typedef typename receiver<input_type>::built_predecessors_type built_predecessors_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
#endif
// ----------- Aggregator ------------
private:
enum op_type { get__item, res_port, try__put_task
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy
+ , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy
#endif
};
enum op_stat {WAIT=0, SUCCEEDED, FAILED};
- typedef queueing_port<T> my_class;
class queueing_port_operation : public aggregated_operation<queueing_port_operation> {
public:
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
sender<T> *pred;
size_t cnt_val;
- predecessor_vector_type *pvec;
+ predecessor_list_type *plist;
#endif
task * bypass_t;
// constructor for value parameter
{}
};
- typedef internal::aggregating_functor<my_class, queueing_port_operation> my_handler;
- friend class internal::aggregating_functor<my_class, queueing_port_operation>;
- aggregator<my_handler, queueing_port_operation> my_aggregator;
+ typedef internal::aggregating_functor<class_type, queueing_port_operation> handler_type;
+ friend class internal::aggregating_functor<class_type, queueing_port_operation>;
+ aggregator<handler_type, queueing_port_operation> my_aggregator;
void handle_operations(queueing_port_operation* op_list) {
queueing_port_operation *current;
__TBB_store_with_release(current->status, SUCCEEDED);
break;
case blt_pred_cpy:
- my_built_predecessors.copy_edges(*(current->pvec));
+ my_built_predecessors.copy_edges(*(current->plist));
__TBB_store_with_release(current->status, SUCCEEDED);
break;
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
//! Constructor
queueing_port() : item_buffer<T>() {
my_join = NULL;
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
//! copy constructor
queueing_port(const queueing_port& /* other */) : receiver<T>(), item_buffer<T>() {
my_join = NULL;
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
//! record parent for tallying available items
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/ built_predecessors_type &built_predecessors() { return my_built_predecessors; }
+
/*override*/void internal_add_built_predecessor(sender<T> &p) {
queueing_port_operation op_data(add_blt_pred);
op_data.pred = &p;
return op_data.cnt_val;
}
- /*override*/void copy_predecessors(predecessor_vector_type &v) {
+ /*override*/void copy_predecessors(predecessor_list_type &l) {
queueing_port_operation op_data(blt_pred_cpy);
- op_data.pvec = &v;
+ op_data.plist = &l;
my_aggregator.execute(&op_data);
}
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) {
+ void extract_receiver() {
item_buffer<T>::reset();
- if (f & rf_extract)
- my_built_predecessors.receiver_extract(*this);
+ my_built_predecessors.receiver_extract(*this);
}
-#else
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { item_buffer<T>::reset(); }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
+ /*override*/void reset_receiver(reset_flags f) {
+ tbb::internal::suppress_unused_warning(f);
+ item_buffer<T>::reset();
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ if (f & rf_clear_edges)
+ my_built_predecessors.clear();
+#endif
+ }
+
private:
forwarding_base *my_join;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
edge_container<sender<T> > my_built_predecessors;
#endif
- };
+ }; // queueing_port
#include "_flow_graph_tagged_buffer_impl.h"
- template< typename T >
- class tag_matching_port : public receiver<T>, public tagged_buffer< tag_value, T, NO_TAG > {
+ template<typename K>
+ struct count_element {
+ K my_key;
+ size_t my_value;
+ };
+
+ // method to access the key in the counting table
+ // the ref has already been removed from K
+ template< typename K >
+ struct key_to_count_functor {
+ typedef count_element<K> table_item_type;
+ const K& operator()(const table_item_type& v) { return v.my_key; }
+ };
+
+ // the ports can have only one template parameter. We wrap the types needed in
+ // a traits type
+ template< class TraitsType >
+ class key_matching_port :
+ public receiver<typename TraitsType::T>,
+ public hash_buffer< typename TraitsType::K, typename TraitsType::T, typename TraitsType::TtoK,
+ typename TraitsType::KHash > {
public:
- typedef T input_type;
- typedef sender<T> predecessor_type;
- typedef tag_matching_port<T> my_node_type; // for forwarding, if needed
- typedef function_body<input_type, tag_value> my_tag_func_type;
- typedef tagged_buffer<tag_value,T,NO_TAG> my_buffer_type;
+ typedef TraitsType traits;
+ typedef key_matching_port<traits> class_type;
+ typedef typename TraitsType::T input_type;
+ typedef typename TraitsType::K key_type;
+ typedef typename tbb::internal::strip<key_type>::type noref_key_type;
+ typedef sender<input_type> predecessor_type;
+ typedef typename TraitsType::TtoK type_to_key_func_type;
+ typedef typename TraitsType::KHash hash_compare_type;
+ typedef hash_buffer< key_type, input_type, type_to_key_func_type, hash_compare_type > buffer_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
+ typedef typename receiver<input_type>::built_predecessors_type built_predecessors_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
#endif
private:
// ----------- Aggregator ------------
private:
- enum op_type { try__put, get__item, res_port,
- add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy
+ enum op_type { try__put, get__item, res_port
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy
+#endif
};
enum op_stat {WAIT=0, SUCCEEDED, FAILED};
- typedef tag_matching_port<T> my_class;
- class tag_matching_port_operation : public aggregated_operation<tag_matching_port_operation> {
+ class key_matching_port_operation : public aggregated_operation<key_matching_port_operation> {
public:
char type;
- T my_val;
- T *my_arg;
+ input_type my_val;
+ input_type *my_arg;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
predecessor_type *pred;
size_t cnt_val;
- predecessor_vector_type *pvec;
+ predecessor_list_type *plist;
#endif
- tag_value my_tag_value;
// constructor for value parameter
- tag_matching_port_operation(const T& e, op_type t) :
+ key_matching_port_operation(const input_type& e, op_type t) :
type(char(t)), my_val(e) {}
// constructor for pointer parameter
- tag_matching_port_operation(const T* p, op_type t) :
- type(char(t)), my_arg(const_cast<T*>(p)) {}
+ key_matching_port_operation(const input_type* p, op_type t) :
+ type(char(t)), my_arg(const_cast<input_type*>(p)) {}
// constructor with no parameter
- tag_matching_port_operation(op_type t) : type(char(t)) {}
+ key_matching_port_operation(op_type t) : type(char(t)) {}
};
- typedef internal::aggregating_functor<my_class, tag_matching_port_operation> my_handler;
- friend class internal::aggregating_functor<my_class, tag_matching_port_operation>;
- aggregator<my_handler, tag_matching_port_operation> my_aggregator;
+ typedef internal::aggregating_functor<class_type, key_matching_port_operation> handler_type;
+ friend class internal::aggregating_functor<class_type, key_matching_port_operation>;
+ aggregator<handler_type, key_matching_port_operation> my_aggregator;
- void handle_operations(tag_matching_port_operation* op_list) {
- tag_matching_port_operation *current;
+ void handle_operations(key_matching_port_operation* op_list) {
+ key_matching_port_operation *current;
while(op_list) {
current = op_list;
op_list = op_list->next;
switch(current->type) {
case try__put: {
- bool was_inserted = this->tagged_insert(current->my_tag_value, current->my_val);
+ bool was_inserted = this->insert_with_key(current->my_val);
// return failure if a duplicate insertion occurs
__TBB_store_with_release(current->status, was_inserted ? SUCCEEDED : FAILED);
}
break;
case get__item:
- // use current_tag from FE for item
- if(!this->tagged_find(my_join->current_tag, *(current->my_arg))) {
- __TBB_ASSERT(false, "Failed to find item corresponding to current_tag.");
+ // use current_key from FE for item
+ if(!this->find_with_key(my_join->current_key, *(current->my_arg))) {
+ __TBB_ASSERT(false, "Failed to find item corresponding to current_key.");
}
__TBB_store_with_release(current->status, SUCCEEDED);
break;
case res_port:
- // use current_tag from FE for item
- this->tagged_delete(my_join->current_tag);
+ // use current_key from FE for item
+ this->delete_with_key(my_join->current_key);
__TBB_store_with_release(current->status, SUCCEEDED);
break;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
__TBB_store_with_release(current->status, SUCCEEDED);
break;
case blt_pred_cpy:
- my_built_predecessors.copy_edges(*(current->pvec));
+ my_built_predecessors.copy_edges(*(current->plist));
__TBB_store_with_release(current->status, SUCCEEDED);
break;
#endif
template< typename R, typename B > friend class run_and_put_task;
template<typename X, typename Y> friend class internal::broadcast_cache;
template<typename X, typename Y> friend class internal::round_robin_cache;
- /*override*/task *try_put_task(const T& v) {
- tag_matching_port_operation op_data(v, try__put);
- op_data.my_tag_value = (*my_tag_func)(v);
+ /*override*/task *try_put_task(const input_type& v) {
+ key_matching_port_operation op_data(v, try__put);
task *rtask = NULL;
my_aggregator.execute(&op_data);
if(op_data.status == SUCCEEDED) {
- rtask = my_join->increment_tag_count(op_data.my_tag_value, false); // may spawn
+ rtask = my_join->increment_key_count((*(this->get_key_func()))(v), false); // may spawn
// rtask has to reflect the return status of the try_put
if(!rtask) rtask = SUCCESSFULLY_ENQUEUED;
}
public:
- tag_matching_port() : receiver<T>(), tagged_buffer<tag_value, T, NO_TAG>() {
+ key_matching_port() : receiver<input_type>(), buffer_type() {
my_join = NULL;
- my_tag_func = NULL;
- my_original_tag_func = NULL;
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
// copy constructor
- tag_matching_port(const tag_matching_port& /*other*/) : receiver<T>(), tagged_buffer<tag_value,T, NO_TAG>() {
+ key_matching_port(const key_matching_port& /*other*/) : receiver<input_type>(), buffer_type() {
my_join = NULL;
- // setting the tag methods is done in the copy-constructor for the front-end.
- my_tag_func = NULL;
- my_original_tag_func = NULL;
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
- ~tag_matching_port() {
- if (my_tag_func) delete my_tag_func;
- if (my_original_tag_func) delete my_original_tag_func;
- }
+ ~key_matching_port() { }
void set_join_node_pointer(forwarding_base *join) {
- my_join = join;
+ my_join = dynamic_cast<matching_forwarding_base<key_type>*>(join);
}
- void set_my_original_tag_func(my_tag_func_type *f) {
- my_original_tag_func = f;
- }
+ void set_my_key_func(type_to_key_func_type *f) { this->set_key_func(f); }
- void set_my_tag_func(my_tag_func_type *f) {
- my_tag_func = f;
- }
+ type_to_key_func_type* get_my_key_func() { return this->get_key_func(); }
- bool get_item( T &v ) {
- tag_matching_port_operation op_data(&v, get__item);
+ bool get_item( input_type &v ) {
+ // aggregator uses current_key from FE for Key
+ key_matching_port_operation op_data(&v, get__item);
my_aggregator.execute(&op_data);
return op_data.status == SUCCEEDED;
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/void internal_add_built_predecessor(sender<T> &p) {
- tag_matching_port_operation op_data(add_blt_pred);
+ /*override*/built_predecessors_type &built_predecessors() { return my_built_predecessors; }
+
+ /*override*/void internal_add_built_predecessor(sender<input_type> &p) {
+ key_matching_port_operation op_data(add_blt_pred);
op_data.pred = &p;
my_aggregator.execute(&op_data);
}
- /*override*/void internal_delete_built_predecessor(sender<T> &p) {
- tag_matching_port_operation op_data(del_blt_pred);
+ /*override*/void internal_delete_built_predecessor(sender<input_type> &p) {
+ key_matching_port_operation op_data(del_blt_pred);
op_data.pred = &p;
my_aggregator.execute(&op_data);
}
/*override*/size_t predecessor_count() {
- tag_matching_port_operation op_data(blt_pred_cnt);
+ key_matching_port_operation op_data(blt_pred_cnt);
my_aggregator.execute(&op_data);
return op_data.cnt_val;
}
- /*override*/void copy_predecessors(predecessor_vector_type &v) {
- tag_matching_port_operation op_data(blt_pred_cpy);
- op_data.pvec = &v;
+ /*override*/void copy_predecessors(predecessor_list_type &l) {
+ key_matching_port_operation op_data(blt_pred_cpy);
+ op_data.plist = &l;
my_aggregator.execute(&op_data);
}
#endif
// reset_port is called when item is accepted by successor, but
// is initiated by join_node.
void reset_port() {
- tag_matching_port_operation op_data(res_port);
+ key_matching_port_operation op_data(res_port);
my_aggregator.execute(&op_data);
return;
}
- my_tag_func_type *my_func() { return my_tag_func; }
- my_tag_func_type *my_original_func() { return my_original_tag_func; }
-
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags f)) {
- my_buffer_type::reset();
- if (f & rf_extract)
- my_built_predecessors.receiver_extract(*this);
+ void extract_receiver() {
+ buffer_type::reset();
+ my_built_predecessors.receiver_extract(*this);
}
-#else
- /*override*/void reset_receiver(__TBB_PFG_RESET_ARG(reset_flags /*f*/)) { my_buffer_type::reset(); }
#endif
+ /*override*/void reset_receiver(reset_flags f ) {
+ tbb::internal::suppress_unused_warning(f);
+ buffer_type::reset();
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ if (f & rf_clear_edges)
+ my_built_predecessors.clear();
+#endif
+ }
private:
- // need map of tags to values
- forwarding_base *my_join;
+ // my_join forwarding base used to count number of inputs that
+ // received key.
+ matching_forwarding_base<key_type> *my_join;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
edge_container<predecessor_type> my_built_predecessors;
#endif
- my_tag_func_type *my_tag_func;
- my_tag_func_type *my_original_tag_func;
- }; // tag_matching_port
+ }; // key_matching_port
using namespace graph_policy_namespace;
- template<graph_buffer_policy JP, typename InputTuple, typename OutputTuple>
+ template<typename JP, typename InputTuple, typename OutputTuple>
class join_node_base;
//! join_node_FE : implements input port policy
- template<graph_buffer_policy JP, typename InputTuple, typename OutputTuple>
+ template<typename JP, typename InputTuple, typename OutputTuple>
class join_node_FE;
template<typename InputTuple, typename OutputTuple>
static const int N = tbb::flow::tuple_size<OutputTuple>::value;
typedef OutputTuple output_type;
typedef InputTuple input_type;
- typedef join_node_base<reserving, InputTuple, OutputTuple> my_node_type; // for forwarding
+ typedef join_node_base<reserving, InputTuple, OutputTuple> base_node_type; // for forwarding
join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) {
ports_with_no_inputs = N;
join_helper<N>::set_join_node_pointer(my_inputs, this);
}
- join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) {
+ join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::graph_pointer)), my_node(NULL) {
ports_with_no_inputs = N;
join_helper<N>::set_join_node_pointer(my_inputs, this);
}
- void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; }
+ void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; }
void increment_port_count() {
++ports_with_no_inputs;
// if all input_ports have predecessors, spawn forward to try and consume tuples
task * decrement_port_count(bool handle_task) {
if(ports_with_no_inputs.fetch_and_decrement() == 1) {
- task* tp = this->my_graph_ptr->root_task();
- if(tp) {
- task *rtask = new ( task::allocate_additional_child_of( *tp ) )
- forward_task_bypass<my_node_type>(*my_node);
+ if(this->graph_pointer->is_active()) {
+ task *rtask = new ( task::allocate_additional_child_of( *(this->graph_pointer->root_task()) ) )
+ forward_task_bypass<base_node_type>(*my_node);
if(!handle_task) return rtask;
FLOW_SPAWN(*rtask);
}
protected:
- void reset( __TBB_PFG_RESET_ARG( reset_flags f)) {
+ void reset( reset_flags f) {
// called outside of parallel contexts
ports_with_no_inputs = N;
- join_helper<N>::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f));
+ join_helper<N>::reset_inputs(my_inputs, f);
}
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ void extract( ) {
+ // called outside of parallel contexts
+ ports_with_no_inputs = N;
+ join_helper<N>::extract_inputs(my_inputs);
+ }
+#endif
+
// all methods on input ports should be called under mutual exclusion from join_node_base.
bool tuple_build_may_succeed() {
}
input_type my_inputs;
- my_node_type *my_node;
+ base_node_type *my_node;
atomic<size_t> ports_with_no_inputs;
- };
+ }; // join_node_FE<reserving, ... >
template<typename InputTuple, typename OutputTuple>
class join_node_FE<queueing, InputTuple, OutputTuple> : public forwarding_base {
static const int N = tbb::flow::tuple_size<OutputTuple>::value;
typedef OutputTuple output_type;
typedef InputTuple input_type;
- typedef join_node_base<queueing, InputTuple, OutputTuple> my_node_type; // for forwarding
+ typedef join_node_base<queueing, InputTuple, OutputTuple> base_node_type; // for forwarding
join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) {
ports_with_no_items = N;
join_helper<N>::set_join_node_pointer(my_inputs, this);
}
- join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_node(NULL) {
+ join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::graph_pointer)), my_node(NULL) {
ports_with_no_items = N;
join_helper<N>::set_join_node_pointer(my_inputs, this);
}
// needed for forwarding
- void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; }
+ void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; }
void reset_port_count() {
ports_with_no_items = N;
task * decrement_port_count(bool handle_task)
{
if(ports_with_no_items.fetch_and_decrement() == 1) {
- task* tp = this->my_graph_ptr->root_task();
- if(tp) {
- task *rtask = new ( task::allocate_additional_child_of( *tp ) )
- forward_task_bypass <my_node_type>(*my_node);
+ if(this->graph_pointer->is_active()) {
+ task *rtask = new ( task::allocate_additional_child_of( *(this->graph_pointer->root_task()) ) )
+ forward_task_bypass <base_node_type>(*my_node);
if(!handle_task) return rtask;
FLOW_SPAWN( *rtask);
}
protected:
- void reset( __TBB_PFG_RESET_ARG( reset_flags f)) {
+ void reset( reset_flags f) {
reset_port_count();
- join_helper<N>::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f) );
+ join_helper<N>::reset_inputs(my_inputs, f );
}
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ void extract() {
+ reset_port_count();
+ join_helper<N>::extract_inputs(my_inputs);
+ }
+#endif
// all methods on input ports should be called under mutual exclusion from join_node_base.
bool tuple_build_may_succeed() {
}
input_type my_inputs;
- my_node_type *my_node;
+ base_node_type *my_node;
atomic<size_t> ports_with_no_items;
- };
-
- // tag_matching join input port.
- template<typename InputTuple, typename OutputTuple>
- class join_node_FE<tag_matching, InputTuple, OutputTuple> : public forwarding_base,
- // buffer of tag value counts buffer of output items
- public tagged_buffer<tag_value, size_t, NO_TAG>, public item_buffer<OutputTuple> {
+ }; // join_node_FE<queueing, ...>
+
+ // key_matching join front-end.
+ template<typename InputTuple, typename OutputTuple, typename K, typename KHash>
+ class join_node_FE<key_matching<K,KHash>, InputTuple, OutputTuple> : public matching_forwarding_base<K>,
+ // buffer of key value counts
+ public hash_buffer< // typedefed below to key_to_count_buffer_type
+ typename tbb::internal::strip<K>::type&, // force ref type on K
+ count_element<typename tbb::internal::strip<K>::type>,
+ internal::type_to_key_function_body<
+ count_element<typename tbb::internal::strip<K>::type>,
+ typename tbb::internal::strip<K>::type& >,
+ KHash >,
+ // buffer of output items
+ public item_buffer<OutputTuple> {
public:
static const int N = tbb::flow::tuple_size<OutputTuple>::value;
typedef OutputTuple output_type;
typedef InputTuple input_type;
- typedef tagged_buffer<tag_value, size_t, NO_TAG> my_tag_buffer;
+ typedef K key_type;
+ typedef typename tbb::internal::strip<key_type>::type unref_key_type;
+ typedef KHash key_hash_compare;
+ // must use K without ref.
+ typedef count_element<unref_key_type> count_element_type;
+ // method that lets us refer to the key of this type.
+ typedef key_to_count_functor<unref_key_type> key_to_count_func;
+ typedef internal::type_to_key_function_body< count_element_type, unref_key_type&> TtoK_function_body_type;
+ typedef internal::type_to_key_function_body_leaf<count_element_type, unref_key_type&, key_to_count_func> TtoK_function_body_leaf_type;
+ // this is the type of the special table that keeps track of the number of discrete
+ // elements corresponding to each key that we've seen.
+ typedef hash_buffer< unref_key_type&, count_element_type, TtoK_function_body_type, key_hash_compare >
+ key_to_count_buffer_type;
typedef item_buffer<output_type> output_buffer_type;
- typedef join_node_base<tag_matching, InputTuple, OutputTuple> my_node_type; // for forwarding
+ typedef join_node_base<key_matching<key_type,key_hash_compare>, InputTuple, OutputTuple> base_node_type; // for forwarding
+ typedef matching_forwarding_base<key_type> forwarding_base_type;
// ----------- Aggregator ------------
// the aggregator is only needed to serialize the access to the hash table.
private:
enum op_type { res_count, inc_count, may_succeed, try_make };
enum op_stat {WAIT=0, SUCCEEDED, FAILED};
- typedef join_node_FE<tag_matching, InputTuple, OutputTuple> my_class;
+ typedef join_node_FE<key_matching<key_type,key_hash_compare>, InputTuple, OutputTuple> class_type;
- class tag_matching_FE_operation : public aggregated_operation<tag_matching_FE_operation> {
+ class key_matching_FE_operation : public aggregated_operation<key_matching_FE_operation> {
public:
char type;
- union {
- tag_value my_val;
- output_type* my_output;
- };
+ unref_key_type my_val;
+ output_type* my_output;
task *bypass_t;
bool enqueue_task;
// constructor for value parameter
- tag_matching_FE_operation(const tag_value& e , bool q_task , op_type t) : type(char(t)), my_val(e),
- bypass_t(NULL), enqueue_task(q_task) {}
- tag_matching_FE_operation(output_type *p, op_type t) : type(char(t)), my_output(p), bypass_t(NULL),
+ key_matching_FE_operation(const unref_key_type& e , bool q_task , op_type t) : type(char(t)), my_val(e),
+ my_output(NULL), bypass_t(NULL), enqueue_task(q_task) {}
+ key_matching_FE_operation(output_type *p, op_type t) : type(char(t)), my_output(p), bypass_t(NULL),
enqueue_task(true) {}
// constructor with no parameter
- tag_matching_FE_operation(op_type t) : type(char(t)), bypass_t(NULL), enqueue_task(true) {}
+ key_matching_FE_operation(op_type t) : type(char(t)), my_output(NULL), bypass_t(NULL), enqueue_task(true) {}
};
- typedef internal::aggregating_functor<my_class, tag_matching_FE_operation> my_handler;
- friend class internal::aggregating_functor<my_class, tag_matching_FE_operation>;
- aggregator<my_handler, tag_matching_FE_operation> my_aggregator;
+ typedef internal::aggregating_functor<class_type, key_matching_FE_operation> handler_type;
+ friend class internal::aggregating_functor<class_type, key_matching_FE_operation>;
+ aggregator<handler_type, key_matching_FE_operation> my_aggregator;
// called from aggregator, so serialized
- // construct as many output objects as possible.
// returns a task pointer if the a task would have been enqueued but we asked that
// it be returned. Otherwise returns NULL.
- task * fill_output_buffer(tag_value t, bool should_enqueue, bool handle_task) {
+ task * fill_output_buffer(unref_key_type &t, bool should_enqueue, bool handle_task) {
output_type l_out;
task *rtask = NULL;
- task* tp = this->my_graph_ptr->root_task();
- bool do_fwd = should_enqueue && this->buffer_empty() && tp;
- this->current_tag = t;
- this->tagged_delete(this->current_tag); // remove the tag
+ bool do_fwd = should_enqueue && this->buffer_empty() && this->graph_pointer->is_active();
+ this->current_key = t;
+ this->delete_with_key(this->current_key); // remove the key
if(join_helper<N>::get_items(my_inputs, l_out)) { // <== call back
this->push_back(l_out);
if(do_fwd) { // we enqueue if receiving an item from predecessor, not if successor asks for item
- rtask = new ( task::allocate_additional_child_of( *tp ) )
- forward_task_bypass<my_node_type>(*my_node);
+ rtask = new ( task::allocate_additional_child_of( *(this->graph_pointer->root_task()) ) )
+ forward_task_bypass<base_node_type>(*my_node);
if(handle_task) {
FLOW_SPAWN(*rtask);
rtask = NULL;
}
// retire the input values
join_helper<N>::reset_ports(my_inputs); // <== call back
- this->current_tag = NO_TAG;
}
else {
__TBB_ASSERT(false, "should have had something to push");
return rtask;
}
- void handle_operations(tag_matching_FE_operation* op_list) {
- tag_matching_FE_operation *current;
+ void handle_operations(key_matching_FE_operation* op_list) {
+ key_matching_FE_operation *current;
while(op_list) {
current = op_list;
op_list = op_list->next;
}
break;
case inc_count: { // called from input ports
- size_t *p = 0;
- tag_value t = current->my_val;
+ count_element_type *p = 0;
+ unref_key_type &t = current->my_val;
bool do_enqueue = current->enqueue_task;
- if(!(this->tagged_find_ref(t,p))) {
- this->tagged_insert(t, 0);
- if(!(this->tagged_find_ref(t,p))) {
- __TBB_ASSERT(false, "should find tag after inserting it");
+ if(!(this->find_ref_with_key(t,p))) {
+ count_element_type ev;
+ ev.my_key = t;
+ ev.my_value = 0;
+ this->insert_with_key(ev);
+ if(!(this->find_ref_with_key(t,p))) {
+ __TBB_ASSERT(false, "should find key after inserting it");
}
}
- if(++(*p) == size_t(N)) {
+ if(++(p->my_value) == size_t(N)) {
task *rtask = fill_output_buffer(t, true, do_enqueue);
__TBB_ASSERT(!rtask || !do_enqueue, "task should not be returned");
current->bypass_t = rtask;
public:
template<typename FunctionTuple>
- join_node_FE(graph &g, FunctionTuple tag_funcs) : forwarding_base(g), my_node(NULL) {
+ join_node_FE(graph &g, FunctionTuple &TtoK_funcs) : forwarding_base_type(g), my_node(NULL) {
join_helper<N>::set_join_node_pointer(my_inputs, this);
- join_helper<N>::set_tag_func(my_inputs, tag_funcs);
- my_aggregator.initialize_handler(my_handler(this));
+ join_helper<N>::set_key_functors(my_inputs, TtoK_funcs);
+ my_aggregator.initialize_handler(handler_type(this));
+ TtoK_function_body_type *cfb = new TtoK_function_body_leaf_type(key_to_count_func());
+ this->set_key_func(cfb);
}
- join_node_FE(const join_node_FE& other) : forwarding_base(*(other.forwarding_base::my_graph_ptr)), my_tag_buffer(),
+ join_node_FE(const join_node_FE& other) : forwarding_base_type(*(other.forwarding_base_type::graph_pointer)), key_to_count_buffer_type(),
output_buffer_type() {
my_node = NULL;
join_helper<N>::set_join_node_pointer(my_inputs, this);
- join_helper<N>::copy_tag_functors(my_inputs, const_cast<input_type &>(other.my_inputs));
- my_aggregator.initialize_handler(my_handler(this));
+ join_helper<N>::copy_key_functors(my_inputs, const_cast<input_type &>(other.my_inputs));
+ my_aggregator.initialize_handler(handler_type(this));
+ TtoK_function_body_type *cfb = new TtoK_function_body_leaf_type(key_to_count_func());
+ this->set_key_func(cfb);
}
// needed for forwarding
- void set_my_node(my_node_type *new_my_node) { my_node = new_my_node; }
+ void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; }
void reset_port_count() { // called from BE
- tag_matching_FE_operation op_data(res_count);
+ key_matching_FE_operation op_data(res_count);
my_aggregator.execute(&op_data);
return;
}
// if all input_ports have items, spawn forward to try and consume tuples
// return a task if we are asked and did create one.
- task *increment_tag_count(tag_value t, bool handle_task) { // called from input_ports
- tag_matching_FE_operation op_data(t, handle_task, inc_count);
+ /*override*/ task *increment_key_count(unref_key_type const & t, bool handle_task) { // called from input_ports
+ key_matching_FE_operation op_data(t, handle_task, inc_count);
my_aggregator.execute(&op_data);
return op_data.bypass_t;
}
protected:
- void reset( __TBB_PFG_RESET_ARG( reset_flags f )) {
+ void reset( reset_flags f ) {
// called outside of parallel contexts
- join_helper<N>::reset_inputs(my_inputs __TBB_PFG_RESET_ARG( __TBB_COMMA f));
+ join_helper<N>::reset_inputs(my_inputs, f);
- my_tag_buffer::reset(); // have to reset the tag counts
- output_buffer_type::reset(); // also the queue of outputs
- my_node->current_tag = NO_TAG;
+ key_to_count_buffer_type::reset();
+ output_buffer_type::reset();
}
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ void extract() {
+ // called outside of parallel contexts
+ join_helper<N>::extract_inputs(my_inputs);
+ key_to_count_buffer_type::reset(); // have to reset the tag counts
+ output_buffer_type::reset(); // also the queue of outputs
+ // my_node->current_tag = NO_TAG;
+ }
+#endif
// all methods on input ports should be called under mutual exclusion from join_node_base.
bool tuple_build_may_succeed() { // called from back-end
- tag_matching_FE_operation op_data(may_succeed);
+ key_matching_FE_operation op_data(may_succeed);
my_aggregator.execute(&op_data);
return op_data.status == SUCCEEDED;
}
- // cannot lock while calling back to input_ports. current_tag will only be set
+ // cannot lock while calling back to input_ports. current_key will only be set
// and reset under the aggregator, so it will remain consistent.
bool try_to_make_tuple(output_type &out) {
- tag_matching_FE_operation op_data(&out,try_make);
+ key_matching_FE_operation op_data(&out,try_make);
my_aggregator.execute(&op_data);
return op_data.status == SUCCEEDED;
}
void tuple_accepted() {
- reset_port_count(); // reset current_tag after ports reset.
+ reset_port_count(); // reset current_key after ports reset.
}
void tuple_rejected() {
}
input_type my_inputs; // input ports
- my_node_type *my_node;
- }; // join_node_FE<tag_matching, InputTuple, OutputTuple>
+ base_node_type *my_node;
+ }; // join_node_FE<key_matching<K,KHash>, InputTuple, OutputTuple>
//! join_node_base
- template<graph_buffer_policy JP, typename InputTuple, typename OutputTuple>
+ template<typename JP, typename InputTuple, typename OutputTuple>
class join_node_base : public graph_node, public join_node_FE<JP, InputTuple, OutputTuple>,
public sender<OutputTuple> {
protected:
using input_ports_type::tuple_accepted;
using input_ports_type::tuple_rejected;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<successor_type *> successor_vector_type;
+ typedef typename sender<output_type>::built_successors_type built_successors_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
private:
#endif
};
enum op_stat {WAIT=0, SUCCEEDED, FAILED};
- typedef join_node_base<JP,InputTuple,OutputTuple> my_class;
+ typedef join_node_base<JP,InputTuple,OutputTuple> class_type;
class join_node_base_operation : public aggregated_operation<join_node_base_operation> {
public:
successor_type *my_succ;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
size_t cnt_val;
- successor_vector_type *svec;
+ successor_list_type *slist;
#endif
};
task *bypass_t;
join_node_base_operation(op_type t) : type(char(t)), bypass_t(NULL) {}
};
- typedef internal::aggregating_functor<my_class, join_node_base_operation> my_handler;
- friend class internal::aggregating_functor<my_class, join_node_base_operation>;
+ typedef internal::aggregating_functor<class_type, join_node_base_operation> handler_type;
+ friend class internal::aggregating_functor<class_type, join_node_base_operation>;
bool forwarder_busy;
- aggregator<my_handler, join_node_base_operation> my_aggregator;
+ aggregator<handler_type, join_node_base_operation> my_aggregator;
void handle_operations(join_node_base_operation* op_list) {
join_node_base_operation *current;
switch(current->type) {
case reg_succ: {
my_successors.register_successor(*(current->my_succ));
- task* tp = this->graph_node::my_graph.root_task();
- if(tuple_build_may_succeed() && !forwarder_busy && tp) {
- task *rtask = new ( task::allocate_additional_child_of(*tp) )
+ if(tuple_build_may_succeed() && !forwarder_busy && this->graph_node::my_graph.is_active()) {
+ task *rtask = new ( task::allocate_additional_child_of(*(this->graph_node::my_graph.root_task())) )
forward_task_bypass
<join_node_base<JP,InputTuple,OutputTuple> >(*this);
FLOW_SPAWN(*rtask);
bool build_succeeded;
task *last_task = NULL;
output_type out;
- if(tuple_build_may_succeed()) {
+ if(tuple_build_may_succeed()) { // checks output queue of FE
do {
- build_succeeded = try_to_make_tuple(out);
+ build_succeeded = try_to_make_tuple(out); // fetch front_end of queue
if(build_succeeded) {
task *new_task = my_successors.try_put_task(out);
last_task = combine_tasks(last_task, new_task);
__TBB_store_with_release(current->status, SUCCEEDED);
break;
case blt_succ_cpy:
- my_successors.copy_successors(*(current->svec));
+ my_successors.copy_successors(*(current->slist));
__TBB_store_with_release(current->status, SUCCEEDED);
break;
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
join_node_base(graph &g) : graph_node(g), input_ports_type(g), forwarder_busy(false) {
my_successors.set_owner(this);
input_ports_type::set_my_node(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
join_node_base(const join_node_base& other) :
sender<OutputTuple>(), forwarder_busy(false), my_successors() {
my_successors.set_owner(this);
input_ports_type::set_my_node(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
template<typename FunctionTuple>
join_node_base(graph &g, FunctionTuple f) : graph_node(g), input_ports_type(g, f), forwarder_busy(false) {
my_successors.set_owner(this);
input_ports_type::set_my_node(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
bool register_successor(successor_type &r) {
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ /*override*/built_successors_type &built_successors() { return my_successors.built_successors(); }
+
/*override*/void internal_add_built_successor( successor_type &r) {
join_node_base_operation op_data(r, add_blt_succ);
my_aggregator.execute(&op_data);
return op_data.cnt_val;
}
- /*override*/ void copy_successors(successor_vector_type &v) {
+ /*override*/ void copy_successors(successor_list_type &l) {
join_node_base_operation op_data(blt_succ_cpy);
- op_data.svec = &v;
+ op_data.slist = &l;
my_aggregator.execute(&op_data);
}
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
- protected:
-
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {
- input_ports_type::reset(__TBB_PFG_RESET_ARG(f));
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- my_successors.reset(f);
+ /*override*/void extract() {
+ input_ports_type::extract();
+ my_successors.built_successors().sender_extract(*this);
+ }
#endif
+
+ protected:
+
+ /*override*/void reset_node(reset_flags f) {
+ input_ports_type::reset(f);
+ if(f & rf_clear_edges) my_successors.clear();
}
private:
return op_data.bypass_t;
}
- };
+ }; // join_node_base
// join base class type generator
- template<int N, template<class> class PT, typename OutputTuple, graph_buffer_policy JP>
+ template<int N, template<class> class PT, typename OutputTuple, typename JP>
struct join_base {
typedef typename internal::join_node_base<JP, typename wrap_tuple_elements<N,PT,OutputTuple>::type, OutputTuple> type;
};
+ template<int N, typename OutputTuple, typename K, typename KHash>
+ struct join_base<N, key_matching_port, OutputTuple, key_matching<K,KHash> > {
+ typedef key_matching<K, KHash> key_traits_type;
+ typedef K key_type;
+ typedef KHash key_hash_compare;
+ typedef typename internal::join_node_base< key_traits_type,
+ // ports type
+ typename wrap_key_tuple_elements<N,key_matching_port,key_traits_type,OutputTuple>::type,
+ OutputTuple > type;
+ };
+
//! unfolded_join_node : passes input_ports_type to join_node_base. We build the input port type
- // using tuple_element. The class PT is the port type (reserving_port, queueing_port, tag_matching_port)
- // and should match the graph_buffer_policy.
+ // using tuple_element. The class PT is the port type (reserving_port, queueing_port, key_matching_port)
+ // and should match the typename.
- template<int N, template<class> class PT, typename OutputTuple, graph_buffer_policy JP>
+ template<int N, template<class> class PT, typename OutputTuple, typename JP>
class unfolded_join_node : public join_base<N,PT,OutputTuple,JP>::type {
public:
typedef typename wrap_tuple_elements<N, PT, OutputTuple>::type input_ports_type;
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
- // tag_matching unfolded_join_node. This must be a separate specialization because the constructors
+ // key_matching unfolded_join_node. This must be a separate specialization because the constructors
// differ.
- template<typename OutputTuple>
- class unfolded_join_node<2,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<2,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<2,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<2,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
public:
- typedef typename wrap_tuple_elements<2,tag_matching_port,OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<2,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
+ typedef join_node_base<key_matching<K,KHash>, input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
typedef typename tbb::flow::tuple< f0_p, f1_p > func_initializer_type;
public:
template<typename Body0, typename Body1>
unfolded_join_node(graph &g, Body0 body0, Body1 body1) : base_type(g,
func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 2, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
- template<typename OutputTuple>
- class unfolded_join_node<3,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<3,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<3,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<3,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
public:
- typedef typename wrap_tuple_elements<3, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<3,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
+ typedef join_node_base<key_matching<K,KHash>, input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2) : base_type(g,
func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 3, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
- template<typename OutputTuple>
- class unfolded_join_node<4,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<4,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<4,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<4,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;
public:
- typedef typename wrap_tuple_elements<4, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<4,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
- typedef typename internal::function_body<T3, tag_value> *f3_p;
+ typedef join_node_base<key_matching<K,KHash>, input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
+ typedef typename internal::type_to_key_function_body<T3, K> *f3_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2, typename Body3>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3) : base_type(g,
func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2),
- new internal::function_body_leaf<T3, tag_value, Body3>(body3)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2),
+ new internal::type_to_key_function_body_leaf<T3, K, Body3>(body3)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 4, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
- template<typename OutputTuple>
- class unfolded_join_node<5,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<5,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<5,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<5,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3;
typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;
public:
- typedef typename wrap_tuple_elements<5, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<5,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
- typedef typename internal::function_body<T3, tag_value> *f3_p;
- typedef typename internal::function_body<T4, tag_value> *f4_p;
+ typedef join_node_base<key_matching<K,KHash> , input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
+ typedef typename internal::type_to_key_function_body<T3, K> *f3_p;
+ typedef typename internal::type_to_key_function_body<T4, K> *f4_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2, typename Body3, typename Body4>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4) : base_type(g,
func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2),
- new internal::function_body_leaf<T3, tag_value, Body3>(body3),
- new internal::function_body_leaf<T4, tag_value, Body4>(body4)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2),
+ new internal::type_to_key_function_body_leaf<T3, K, Body3>(body3),
+ new internal::type_to_key_function_body_leaf<T4, K, Body4>(body4)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 5, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
#if __TBB_VARIADIC_MAX >= 6
- template<typename OutputTuple>
- class unfolded_join_node<6,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<6,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<6,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<6,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4;
typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5;
public:
- typedef typename wrap_tuple_elements<6, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<6,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
- typedef typename internal::function_body<T3, tag_value> *f3_p;
- typedef typename internal::function_body<T4, tag_value> *f4_p;
- typedef typename internal::function_body<T5, tag_value> *f5_p;
+ typedef join_node_base<key_matching<K,KHash> , input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
+ typedef typename internal::type_to_key_function_body<T3, K> *f3_p;
+ typedef typename internal::type_to_key_function_body<T4, K> *f4_p;
+ typedef typename internal::type_to_key_function_body<T5, K> *f5_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2, typename Body3, typename Body4, typename Body5>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, Body5 body5)
: base_type(g, func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2),
- new internal::function_body_leaf<T3, tag_value, Body3>(body3),
- new internal::function_body_leaf<T4, tag_value, Body4>(body4),
- new internal::function_body_leaf<T5, tag_value, Body5>(body5)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2),
+ new internal::type_to_key_function_body_leaf<T3, K, Body3>(body3),
+ new internal::type_to_key_function_body_leaf<T4, K, Body4>(body4),
+ new internal::type_to_key_function_body_leaf<T5, K, Body5>(body5)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 6, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
#endif
#if __TBB_VARIADIC_MAX >= 7
- template<typename OutputTuple>
- class unfolded_join_node<7,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<7,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<7,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<7,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5;
typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6;
public:
- typedef typename wrap_tuple_elements<7, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<7,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
- typedef typename internal::function_body<T3, tag_value> *f3_p;
- typedef typename internal::function_body<T4, tag_value> *f4_p;
- typedef typename internal::function_body<T5, tag_value> *f5_p;
- typedef typename internal::function_body<T6, tag_value> *f6_p;
+ typedef join_node_base<key_matching<K,KHash> , input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
+ typedef typename internal::type_to_key_function_body<T3, K> *f3_p;
+ typedef typename internal::type_to_key_function_body<T4, K> *f4_p;
+ typedef typename internal::type_to_key_function_body<T5, K> *f5_p;
+ typedef typename internal::type_to_key_function_body<T6, K> *f6_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2, typename Body3, typename Body4,
typename Body5, typename Body6>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4,
Body5 body5, Body6 body6) : base_type(g, func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2),
- new internal::function_body_leaf<T3, tag_value, Body3>(body3),
- new internal::function_body_leaf<T4, tag_value, Body4>(body4),
- new internal::function_body_leaf<T5, tag_value, Body5>(body5),
- new internal::function_body_leaf<T6, tag_value, Body6>(body6)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2),
+ new internal::type_to_key_function_body_leaf<T3, K, Body3>(body3),
+ new internal::type_to_key_function_body_leaf<T4, K, Body4>(body4),
+ new internal::type_to_key_function_body_leaf<T5, K, Body5>(body5),
+ new internal::type_to_key_function_body_leaf<T6, K, Body6>(body6)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 7, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
#endif
#if __TBB_VARIADIC_MAX >= 8
- template<typename OutputTuple>
- class unfolded_join_node<8,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<8,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<8,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<8,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6;
typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7;
public:
- typedef typename wrap_tuple_elements<8, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<8,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
- typedef typename internal::function_body<T3, tag_value> *f3_p;
- typedef typename internal::function_body<T4, tag_value> *f4_p;
- typedef typename internal::function_body<T5, tag_value> *f5_p;
- typedef typename internal::function_body<T6, tag_value> *f6_p;
- typedef typename internal::function_body<T7, tag_value> *f7_p;
+ typedef join_node_base<key_matching<K,KHash> , input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
+ typedef typename internal::type_to_key_function_body<T3, K> *f3_p;
+ typedef typename internal::type_to_key_function_body<T4, K> *f4_p;
+ typedef typename internal::type_to_key_function_body<T5, K> *f5_p;
+ typedef typename internal::type_to_key_function_body<T6, K> *f6_p;
+ typedef typename internal::type_to_key_function_body<T7, K> *f7_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2, typename Body3, typename Body4,
typename Body5, typename Body6, typename Body7>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4,
Body5 body5, Body6 body6, Body7 body7) : base_type(g, func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2),
- new internal::function_body_leaf<T3, tag_value, Body3>(body3),
- new internal::function_body_leaf<T4, tag_value, Body4>(body4),
- new internal::function_body_leaf<T5, tag_value, Body5>(body5),
- new internal::function_body_leaf<T6, tag_value, Body6>(body6),
- new internal::function_body_leaf<T7, tag_value, Body7>(body7)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2),
+ new internal::type_to_key_function_body_leaf<T3, K, Body3>(body3),
+ new internal::type_to_key_function_body_leaf<T4, K, Body4>(body4),
+ new internal::type_to_key_function_body_leaf<T5, K, Body5>(body5),
+ new internal::type_to_key_function_body_leaf<T6, K, Body6>(body6),
+ new internal::type_to_key_function_body_leaf<T7, K, Body7>(body7)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 8, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
#endif
#if __TBB_VARIADIC_MAX >= 9
- template<typename OutputTuple>
- class unfolded_join_node<9,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<9,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<9,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<9,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7;
typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8;
public:
- typedef typename wrap_tuple_elements<9, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<9,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
- typedef typename internal::function_body<T3, tag_value> *f3_p;
- typedef typename internal::function_body<T4, tag_value> *f4_p;
- typedef typename internal::function_body<T5, tag_value> *f5_p;
- typedef typename internal::function_body<T6, tag_value> *f6_p;
- typedef typename internal::function_body<T7, tag_value> *f7_p;
- typedef typename internal::function_body<T8, tag_value> *f8_p;
+ typedef join_node_base<key_matching<K,KHash> , input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
+ typedef typename internal::type_to_key_function_body<T3, K> *f3_p;
+ typedef typename internal::type_to_key_function_body<T4, K> *f4_p;
+ typedef typename internal::type_to_key_function_body<T5, K> *f5_p;
+ typedef typename internal::type_to_key_function_body<T6, K> *f6_p;
+ typedef typename internal::type_to_key_function_body<T7, K> *f7_p;
+ typedef typename internal::type_to_key_function_body<T8, K> *f8_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2, typename Body3, typename Body4,
typename Body5, typename Body6, typename Body7, typename Body8>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4,
Body5 body5, Body6 body6, Body7 body7, Body8 body8) : base_type(g, func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2),
- new internal::function_body_leaf<T3, tag_value, Body3>(body3),
- new internal::function_body_leaf<T4, tag_value, Body4>(body4),
- new internal::function_body_leaf<T5, tag_value, Body5>(body5),
- new internal::function_body_leaf<T6, tag_value, Body6>(body6),
- new internal::function_body_leaf<T7, tag_value, Body7>(body7),
- new internal::function_body_leaf<T8, tag_value, Body8>(body8)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2),
+ new internal::type_to_key_function_body_leaf<T3, K, Body3>(body3),
+ new internal::type_to_key_function_body_leaf<T4, K, Body4>(body4),
+ new internal::type_to_key_function_body_leaf<T5, K, Body5>(body5),
+ new internal::type_to_key_function_body_leaf<T6, K, Body6>(body6),
+ new internal::type_to_key_function_body_leaf<T7, K, Body7>(body7),
+ new internal::type_to_key_function_body_leaf<T8, K, Body8>(body8)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 9, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
#endif
#if __TBB_VARIADIC_MAX >= 10
- template<typename OutputTuple>
- class unfolded_join_node<10,tag_matching_port,OutputTuple,tag_matching> : public
- join_base<10,tag_matching_port,OutputTuple,tag_matching>::type {
+ template<typename OutputTuple, typename K, typename KHash>
+ class unfolded_join_node<10,key_matching_port,OutputTuple,key_matching<K,KHash> > : public
+ join_base<10,key_matching_port,OutputTuple,key_matching<K,KHash> >::type {
typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0;
typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1;
typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2;
typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8;
typedef typename tbb::flow::tuple_element<9, OutputTuple>::type T9;
public:
- typedef typename wrap_tuple_elements<10, tag_matching_port, OutputTuple>::type input_ports_type;
+ typedef typename wrap_key_tuple_elements<10,key_matching_port,key_matching<K,KHash>,OutputTuple>::type input_ports_type;
typedef OutputTuple output_type;
private:
- typedef join_node_base<tag_matching, input_ports_type, output_type > base_type;
- typedef typename internal::function_body<T0, tag_value> *f0_p;
- typedef typename internal::function_body<T1, tag_value> *f1_p;
- typedef typename internal::function_body<T2, tag_value> *f2_p;
- typedef typename internal::function_body<T3, tag_value> *f3_p;
- typedef typename internal::function_body<T4, tag_value> *f4_p;
- typedef typename internal::function_body<T5, tag_value> *f5_p;
- typedef typename internal::function_body<T6, tag_value> *f6_p;
- typedef typename internal::function_body<T7, tag_value> *f7_p;
- typedef typename internal::function_body<T8, tag_value> *f8_p;
- typedef typename internal::function_body<T9, tag_value> *f9_p;
+ typedef join_node_base<key_matching<K,KHash> , input_ports_type, output_type > base_type;
+ typedef typename internal::type_to_key_function_body<T0, K> *f0_p;
+ typedef typename internal::type_to_key_function_body<T1, K> *f1_p;
+ typedef typename internal::type_to_key_function_body<T2, K> *f2_p;
+ typedef typename internal::type_to_key_function_body<T3, K> *f3_p;
+ typedef typename internal::type_to_key_function_body<T4, K> *f4_p;
+ typedef typename internal::type_to_key_function_body<T5, K> *f5_p;
+ typedef typename internal::type_to_key_function_body<T6, K> *f6_p;
+ typedef typename internal::type_to_key_function_body<T7, K> *f7_p;
+ typedef typename internal::type_to_key_function_body<T8, K> *f8_p;
+ typedef typename internal::type_to_key_function_body<T9, K> *f9_p;
typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p, f9_p > func_initializer_type;
public:
template<typename Body0, typename Body1, typename Body2, typename Body3, typename Body4,
typename Body5, typename Body6, typename Body7, typename Body8, typename Body9>
unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4,
Body5 body5, Body6 body6, Body7 body7, Body8 body8, Body9 body9) : base_type(g, func_initializer_type(
- new internal::function_body_leaf<T0, tag_value, Body0>(body0),
- new internal::function_body_leaf<T1, tag_value, Body1>(body1),
- new internal::function_body_leaf<T2, tag_value, Body2>(body2),
- new internal::function_body_leaf<T3, tag_value, Body3>(body3),
- new internal::function_body_leaf<T4, tag_value, Body4>(body4),
- new internal::function_body_leaf<T5, tag_value, Body5>(body5),
- new internal::function_body_leaf<T6, tag_value, Body6>(body6),
- new internal::function_body_leaf<T7, tag_value, Body7>(body7),
- new internal::function_body_leaf<T8, tag_value, Body8>(body8),
- new internal::function_body_leaf<T9, tag_value, Body9>(body9)
- ) ) {}
+ new internal::type_to_key_function_body_leaf<T0, K, Body0>(body0),
+ new internal::type_to_key_function_body_leaf<T1, K, Body1>(body1),
+ new internal::type_to_key_function_body_leaf<T2, K, Body2>(body2),
+ new internal::type_to_key_function_body_leaf<T3, K, Body3>(body3),
+ new internal::type_to_key_function_body_leaf<T4, K, Body4>(body4),
+ new internal::type_to_key_function_body_leaf<T5, K, Body5>(body5),
+ new internal::type_to_key_function_body_leaf<T6, K, Body6>(body6),
+ new internal::type_to_key_function_body_leaf<T7, K, Body7>(body7),
+ new internal::type_to_key_function_body_leaf<T8, K, Body8>(body8),
+ new internal::type_to_key_function_body_leaf<T9, K, Body9>(body9)
+ ) ) {
+ __TBB_STATIC_ASSERT(tbb::flow::tuple_size<OutputTuple>::value == 10, "wrong number of body initializers");
+ }
unfolded_join_node(const unfolded_join_node &other) : base_type(other) {}
};
#endif
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
blt_pred_cnt, blt_pred_cpy // create vector copies of preds and succs
#endif
};
- typedef function_input_base<Input, A, ImplType> my_class;
+ typedef function_input_base<Input, A, ImplType> class_type;
public:
//! The input type of this receiver
typedef Input input_type;
typedef sender<Input> predecessor_type;
+ typedef predecessor_cache<input_type, null_mutex > predecessor_cache_type;
+ typedef function_input_queue<input_type, A> input_queue_type;
+ typedef typename A::template rebind< input_queue_type >::other queue_allocator_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<predecessor_type *> predecessor_vector_type;
+ typedef typename predecessor_cache_type::built_predecessors_type built_predecessors_type;
+ typedef typename receiver<input_type>::predecessor_list_type predecessor_list_type;
#endif
//! Constructor for function_input_base
- function_input_base( graph &g, size_t max_concurrency, function_input_queue<input_type,A> *q = NULL )
+ function_input_base( graph &g, size_t max_concurrency, input_queue_type *q = NULL)
: my_graph(g), my_max_concurrency(max_concurrency), my_concurrency(0),
my_queue(q), forwarder_busy(false) {
my_predecessors.set_owner(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
//! Copy constructor
- function_input_base( const function_input_base& src, function_input_queue<input_type,A> *q = NULL ) :
+ function_input_base( const function_input_base& src, input_queue_type *q = NULL) :
receiver<Input>(), tbb::internal::no_assign(),
my_graph(src.my_graph), my_max_concurrency(src.my_max_concurrency),
my_concurrency(0), my_queue(q), forwarder_busy(false)
{
my_predecessors.set_owner(this);
- my_aggregator.initialize_handler(my_handler(this));
+ my_aggregator.initialize_handler(handler_type(this));
}
//! Destructor
+ // The queue is allocated by the constructor for {multi}function_node.
+ // TODO: pass the graph_buffer_policy to the base so it can allocate the queue instead.
+ // This would be an interface-breaking change.
virtual ~function_input_base() {
if ( my_queue ) delete my_queue;
}
if ( my_max_concurrency == 0 ) {
return create_body_task( t );
} else {
- my_operation op_data(t, tryput_bypass);
+ operation_type op_data(t, tryput_bypass);
my_aggregator.execute(&op_data);
if(op_data.status == SUCCEEDED ) {
return op_data.bypass_t;
//! Adds src to the list of cached predecessors.
/* override */ bool register_predecessor( predecessor_type &src ) {
- my_operation op_data(reg_pred);
+ operation_type op_data(reg_pred);
op_data.r = &src;
my_aggregator.execute(&op_data);
return true;
//! Removes src from the list of cached predecessors.
/* override */ bool remove_predecessor( predecessor_type &src ) {
- my_operation op_data(rem_pred);
+ operation_type op_data(rem_pred);
op_data.r = &src;
my_aggregator.execute(&op_data);
return true;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
//! Adds to list of predecessors added by make_edge
/*override*/ void internal_add_built_predecessor( predecessor_type &src) {
- my_operation op_data(add_blt_pred);
+ operation_type op_data(add_blt_pred);
op_data.r = &src;
my_aggregator.execute(&op_data);
}
//! removes from to list of predecessors (used by remove_edge)
/*override*/ void internal_delete_built_predecessor( predecessor_type &src) {
- my_operation op_data(del_blt_pred);
+ operation_type op_data(del_blt_pred);
op_data.r = &src;
my_aggregator.execute(&op_data);
}
/*override*/ size_t predecessor_count() {
- my_operation op_data(blt_pred_cnt);
+ operation_type op_data(blt_pred_cnt);
my_aggregator.execute(&op_data);
return op_data.cnt_val;
}
- /*override*/ void copy_predecessors(predecessor_vector_type &v) {
- my_operation op_data(blt_pred_cpy);
+ /*override*/ void copy_predecessors(predecessor_list_type &v) {
+ operation_type op_data(blt_pred_cpy);
op_data.predv = &v;
my_aggregator.execute(&op_data);
}
+
+ /*override*/built_predecessors_type &built_predecessors() {
+ return my_predecessors.built_predecessors();
+ }
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
protected:
- void reset_function_input_base( __TBB_PFG_RESET_ARG(reset_flags f)) {
+ void reset_function_input_base( reset_flags f) {
my_concurrency = 0;
if(my_queue) {
my_queue->reset();
}
- reset_receiver(__TBB_PFG_RESET_ARG(f));
+ reset_receiver(f);
forwarder_busy = false;
}
graph& my_graph;
const size_t my_max_concurrency;
size_t my_concurrency;
- function_input_queue<input_type, A> *my_queue;
+ input_queue_type *my_queue;
predecessor_cache<input_type, null_mutex > my_predecessors;
- /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) {
- my_predecessors.reset(__TBB_PFG_RESET_ARG(f));
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- __TBB_ASSERT(!(f & rf_extract) || my_predecessors.empty(), "function_input_base reset failed");
-#endif
+ /*override*/void reset_receiver( reset_flags f) {
+ if( f & rf_clear_edges) my_predecessors.clear();
+ else
+ my_predecessors.reset();
+ __TBB_ASSERT(!(f & rf_clear_edges) || my_predecessors.empty(), "function_input_base reset failed");
}
private:
- friend class apply_body_task_bypass< my_class, input_type >;
- friend class forward_task_bypass< my_class >;
+ friend class apply_body_task_bypass< class_type, input_type >;
+ friend class forward_task_bypass< class_type >;
- class my_operation : public aggregated_operation< my_operation > {
+ class operation_type : public aggregated_operation< operation_type > {
public:
char type;
union {
predecessor_type *r;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
size_t cnt_val;
- predecessor_vector_type *predv;
+ predecessor_list_type *predv;
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
};
tbb::task *bypass_t;
- my_operation(const input_type& e, op_type t) :
+ operation_type(const input_type& e, op_type t) :
type(char(t)), elem(const_cast<input_type*>(&e)) {}
- my_operation(op_type t) : type(char(t)), r(NULL) {}
+ operation_type(op_type t) : type(char(t)), r(NULL) {}
};
bool forwarder_busy;
- typedef internal::aggregating_functor<my_class, my_operation> my_handler;
- friend class internal::aggregating_functor<my_class, my_operation>;
- aggregator< my_handler, my_operation > my_aggregator;
+ typedef internal::aggregating_functor<class_type, operation_type> handler_type;
+ friend class internal::aggregating_functor<class_type, operation_type>;
+ aggregator< handler_type, operation_type > my_aggregator;
- void handle_operations(my_operation *op_list) {
- my_operation *tmp;
+ void handle_operations(operation_type *op_list) {
+ operation_type *tmp;
while (op_list) {
tmp = op_list;
op_list = op_list->next;
}
//! Put to the node, but return the task instead of enqueueing it
- void internal_try_put_task(my_operation *op) {
+ void internal_try_put_task(operation_type *op) {
__TBB_ASSERT(my_max_concurrency != 0, NULL);
if (my_concurrency < my_max_concurrency) {
++my_concurrency;
}
//! Tries to spawn bodies if available and if concurrency allows
- void internal_forward(my_operation *op) {
+ void internal_forward(operation_type *op) {
op->bypass_t = NULL;
if (my_concurrency<my_max_concurrency || !my_max_concurrency) {
input_type i;
task * apply_body_bypass( input_type &i ) {
task * new_task = static_cast<ImplType *>(this)->apply_body_impl_bypass(i);
if ( my_max_concurrency != 0 ) {
- my_operation op_data(app_body_bypass); // tries to pop an item or get_item, enqueues another apply_body
+ operation_type op_data(app_body_bypass); // tries to pop an item or get_item, enqueues another apply_body
my_aggregator.execute(&op_data);
tbb::task *ttask = op_data.bypass_t;
new_task = combine_tasks(new_task, ttask);
//! allocates a task to call apply_body( input )
inline task * create_body_task( const input_type &input ) {
- task* tp = my_graph.root_task();
- return (tp) ?
- new(task::allocate_additional_child_of(*tp))
- apply_body_task_bypass < my_class, input_type >(*this, input) :
+ return (my_graph.is_active()) ?
+ new(task::allocate_additional_child_of(*(my_graph.root_task())))
+ apply_body_task_bypass < class_type, input_type >(*this, input) :
NULL;
}
//! This is executed by an enqueued task, the "forwarder"
task *forward_task() {
- my_operation op_data(try_fwd);
+ operation_type op_data(try_fwd);
task *rval = NULL;
do {
op_data.status = WAIT;
}
inline task *create_forward_task() {
- task* tp = my_graph.root_task();
- return (tp) ?
- new(task::allocate_additional_child_of(*tp)) forward_task_bypass< my_class >(*this) :
+ return (my_graph.is_active()) ?
+ new(task::allocate_additional_child_of(*(my_graph.root_task()))) forward_task_bypass< class_type >(*this) :
NULL;
}
public:
typedef Input input_type;
typedef Output output_type;
+ typedef function_body<input_type, output_type> function_body_type;
typedef function_input<Input,Output,A> my_class;
typedef function_input_base<Input, A, my_class> base_type;
typedef function_input_queue<input_type, A> input_queue_type;
-
// constructor
template<typename Body>
- function_input( graph &g, size_t max_concurrency, Body& body, function_input_queue<input_type,A> *q = NULL ) :
+ function_input( graph &g, size_t max_concurrency, Body& body, input_queue_type *q = NULL ) :
base_type(g, max_concurrency, q),
- my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) {
+ my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ),
+ my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) {
}
//! Copy constructor
function_input( const function_input& src, input_queue_type *q = NULL ) :
base_type(src, q),
- my_body( src.my_body->clone() ) {
+ my_body( src.my_init_body->clone() ),
+ my_init_body(src.my_init_body->clone() ) {
}
~function_input() {
delete my_body;
+ delete my_init_body;
}
template< typename Body >
Body copy_function_object() {
- internal::function_body<input_type, output_type> &body_ref = *this->my_body;
+ function_body_type &body_ref = *this->my_body;
return dynamic_cast< internal::function_body_leaf<input_type, output_type, Body> & >(body_ref).get_body();
}
protected:
- void reset_function_input(__TBB_PFG_RESET_ARG(reset_flags f)) {
- base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f));
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- if(f & rf_reset_bodies) my_body->reset_body();
-#endif
+ void reset_function_input(reset_flags f) {
+ base_type::reset_function_input_base(f);
+ if(f & rf_reset_bodies) {
+ function_body_type *tmp = my_init_body->clone();
+ delete my_body;
+ my_body = tmp;
+ }
}
- function_body<input_type, output_type> *my_body;
+ function_body_type *my_body;
+ function_body_type *my_init_body;
virtual broadcast_cache<output_type > &successors() = 0;
}; // function_input
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- // helper templates to reset the successor edges of the output ports of an multifunction_node
- template<int N>
- struct reset_element {
- template<typename P>
- static void reset_this(P &p, reset_flags f) {
- (void)tbb::flow::get<N-1>(p).successors().reset(f);
- reset_element<N-1>::reset_this(p, f);
- }
- template<typename P>
- static bool this_empty(P &p) {
+ // helper templates to clear the successor edges of the output ports of an multifunction_node
+ template<int N> struct clear_element {
+ template<typename P> static void clear_this(P &p) {
+ (void)tbb::flow::get<N-1>(p).successors().clear();
+ clear_element<N-1>::clear_this(p);
+ }
+ template<typename P> static bool this_empty(P &p) {
if(tbb::flow::get<N-1>(p).successors().empty())
- return reset_element<N-1>::this_empty(p);
+ return clear_element<N-1>::this_empty(p);
return false;
}
};
- template<>
- struct reset_element<1> {
- template<typename P>
- static void reset_this(P &p, reset_flags f) {
- (void)tbb::flow::get<0>(p).successors().reset(f);
+ template<> struct clear_element<1> {
+ template<typename P> static void clear_this(P &p) {
+ (void)tbb::flow::get<0>(p).successors().clear();
}
- template<typename P>
- static bool this_empty(P &p) {
+ template<typename P> static bool this_empty(P &p) {
return tbb::flow::get<0>(p).successors().empty();
}
};
+
+#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ // helper templates to extract the output ports of an multifunction_node from graph
+ template<int N> struct extract_element {
+ template<typename P> static void extract_this(P &p) {
+ (void)tbb::flow::get<N-1>(p).successors().built_successors().sender_extract(tbb::flow::get<N-1>(p));
+ extract_element<N-1>::extract_this(p);
+ }
+ };
+
+ template<> struct extract_element<1> {
+ template<typename P> static void extract_this(P &p) {
+ (void)tbb::flow::get<0>(p).successors().built_successors().sender_extract(tbb::flow::get<0>(p));
+ }
+ };
#endif
//! Implements methods for a function node that takes a type Input as input
static const int N = tbb::flow::tuple_size<OutputPortSet>::value;
typedef Input input_type;
typedef OutputPortSet output_ports_type;
+ typedef multifunction_body<input_type, output_ports_type> multifunction_body_type;
typedef multifunction_input<Input,OutputPortSet,A> my_class;
typedef function_input_base<Input, A, my_class> base_type;
typedef function_input_queue<input_type, A> input_queue_type;
-
// constructor
template<typename Body>
- multifunction_input(
- graph &g,
- size_t max_concurrency,
+ multifunction_input(
+ graph &g,
+ size_t max_concurrency,
Body& body,
- function_input_queue<input_type,A> *q = NULL ) :
+ input_queue_type *q = NULL ) :
base_type(g, max_concurrency, q),
- my_body( new internal::multifunction_body_leaf<input_type, output_ports_type, Body>(body) ) {
+ my_body( new internal::multifunction_body_leaf<input_type, output_ports_type, Body>(body) ),
+ my_init_body( new internal::multifunction_body_leaf<input_type, output_ports_type, Body>(body) ) {
}
//! Copy constructor
multifunction_input( const multifunction_input& src, input_queue_type *q = NULL ) :
base_type(src, q),
- my_body( src.my_body->clone() ) {
+ my_body( src.my_init_body->clone() ),
+ my_init_body(src.my_init_body->clone() ) {
}
~multifunction_input() {
delete my_body;
+ delete my_init_body;
}
template< typename Body >
Body copy_function_object() {
- internal::multifunction_body<input_type, output_ports_type> &body_ref = *this->my_body;
+ multifunction_body_type &body_ref = *this->my_body;
return dynamic_cast< internal::multifunction_body_leaf<input_type, output_ports_type, Body> & >(body_ref).get_body();
}
output_ports_type &output_ports(){ return my_output_ports; }
protected:
-
- /*override*/void reset(__TBB_PFG_RESET_ARG(reset_flags f)) {
- base_type::reset_function_input_base(__TBB_PFG_RESET_ARG(f));
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- reset_element<N>::reset_this(my_output_ports, f);
- if(f & rf_reset_bodies) my_body->reset_body();
- __TBB_ASSERT(!(f & rf_extract) || reset_element<N>::this_empty(my_output_ports), "multifunction_node reset failed");
+ /*override*/void extract() {
+ extract_element<N>::extract_this(my_output_ports);
+ }
#endif
+
+ /*override*/void reset(reset_flags f) {
+ base_type::reset_function_input_base(f);
+ if(f & rf_clear_edges)clear_element<N>::clear_this(my_output_ports);
+ if(f & rf_reset_bodies) {
+ multifunction_body_type *tmp = my_init_body->clone();
+ delete my_body;
+ my_body = tmp;
+ }
+ __TBB_ASSERT(!(f & rf_clear_edges) || clear_element<N>::this_empty(my_output_ports), "multifunction_node reset failed");
}
- multifunction_body<input_type, output_ports_type> *my_body;
+ multifunction_body_type *my_body;
+ multifunction_body_type *my_init_body;
output_ports_type my_output_ports;
}; // multifunction_input
//! The output type of this receiver
typedef Output output_type;
+ typedef function_body<input_type, output_type> function_body_type;
template< typename Body >
continue_input( graph &g, Body& body )
: my_graph_ptr(&g),
- my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { }
+ my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ),
+ my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { }
template< typename Body >
continue_input( graph &g, int number_of_predecessors, Body& body )
: continue_receiver( number_of_predecessors ), my_graph_ptr(&g),
- my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { }
+ my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ),
+ my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) )
+ { }
continue_input( const continue_input& src ) : continue_receiver(src),
- my_graph_ptr(src.my_graph_ptr), my_body( src.my_body->clone() ) {}
+ my_graph_ptr(src.my_graph_ptr),
+ my_body( src.my_init_body->clone() ),
+ my_init_body( src.my_init_body->clone() ) {}
~continue_input() {
delete my_body;
+ delete my_init_body;
}
template< typename Body >
Body copy_function_object() {
- internal::function_body<input_type, output_type> &body_ref = *my_body;
+ function_body_type &body_ref = *my_body;
return dynamic_cast< internal::function_body_leaf<input_type, output_type, Body> & >(body_ref).get_body();
}
- /*override*/void reset_receiver( __TBB_PFG_RESET_ARG(reset_flags f)) {
- continue_receiver::reset_receiver(__TBB_PFG_RESET_ARG(f));
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- if(f & rf_reset_bodies) my_body->reset_body();
-#endif
+ /*override*/void reset_receiver( reset_flags f) {
+ continue_receiver::reset_receiver(f);
+ if(f & rf_reset_bodies) {
+ function_body_type *tmp = my_init_body->clone();
+ delete my_body;
+ my_body = tmp;
+ }
}
protected:
graph* my_graph_ptr;
- function_body<input_type, output_type> *my_body;
+ function_body_type *my_body;
+ function_body_type *my_init_body;
virtual broadcast_cache<output_type > &successors() = 0;
//! Spawns a task that applies the body
/* override */ task *execute( ) {
- task* tp = my_graph_ptr->root_task();
- return (tp) ?
- new ( task::allocate_additional_child_of( *tp ) )
+ return (my_graph_ptr->is_active()) ?
+ new ( task::allocate_additional_child_of( *(my_graph_ptr->root_task()) ) )
apply_body_task_bypass< continue_input< Output >, continue_msg >( *this, continue_msg() ) :
NULL;
}
}; // continue_input
+
+#if __TBB_PREVIEW_ASYNC_NODE
+
+ //! Implements methods for a async node that takes a type Input as input and
+ // submit it to Asynchronous activity
+ template < typename Input, typename A, typename AsyncGatewayType >
+ class async_input : public function_input_base<Input, A, async_input<Input, A, AsyncGatewayType> > {
+ public:
+ typedef Input input_type;
+ typedef AsyncGatewayType async_gateway_type;
+ typedef async_body< input_type, async_gateway_type > async_body_type;
+ typedef async_input< Input, A, async_gateway_type > my_class;
+ typedef function_input_base<Input, A, my_class> base_type;
+
+ // constructor
+ template<typename Body>
+ async_input( graph &g, Body& body ) :
+ base_type( g, unlimited ),
+ my_body( new internal::async_body_leaf< input_type, Body, async_gateway_type >(body) ),
+ my_init_body( new internal::async_body_leaf< input_type, Body, async_gateway_type >(body) )
+ {
+ }
+
+ //! Copy constructor
+ async_input( const async_input& src ) :
+ base_type( src ),
+ my_body( src.my_init_body->clone() ),
+ my_init_body(src.my_init_body->clone() ) {
+ }
+
+ ~async_input() {
+ delete my_body;
+ delete my_init_body;
+ }
+
+ template< typename Body >
+ Body copy_function_object() {
+ async_body_type &body_ref = *this->my_body;
+ return dynamic_cast< internal::async_body_leaf<input_type, Body, async_gateway_type> & >(body_ref).get_body();
+ }
+
+ task * apply_body_impl_bypass( const input_type &i) {
+ // TODO: This FGT instrumentation only captures the submission of the work
+ // but not the async thread activity.
+ // We will have to think about the best way to capture that.
+ tbb::internal::fgt_begin_body( my_body );
+ (*my_body)( i, async_gateway() );
+ tbb::internal::fgt_end_body( my_body );
+ return NULL;
+ }
+
+ virtual async_gateway_type& async_gateway() = 0;
+
+ protected:
+ void reset_async_input(reset_flags f) {
+ base_type::reset_function_input_base(f);
+ if(f & rf_reset_bodies) {
+ async_body_type *tmp = my_init_body->clone();
+ delete my_body;
+ my_body = tmp;
+ }
+ }
+
+ async_body_type *my_body;
+ async_body_type *my_init_body;
+ };
+#endif // __TBB_PREVIEW_ASYNC_NODE
//! Implements methods for both executable and function nodes that puts Output to its successors
template< typename Output >
class function_output : public sender<Output> {
public:
-#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- template<int N> friend struct reset_element;
-#endif
+ template<int N> friend struct clear_element;
typedef Output output_type;
typedef receiver<output_type> successor_type;
typedef broadcast_cache<output_type> broadcast_cache_type;
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
- typedef std::vector<successor_type *> successor_vector_type;
+ typedef typename sender<output_type>::built_successors_type built_successors_type;
+ typedef typename sender<output_type>::successor_list_type successor_list_type;
#endif
function_output() { my_successors.set_owner(this); }
}
#if TBB_PREVIEW_FLOW_GRAPH_FEATURES
+ built_successors_type &built_successors() { return successors().built_successors(); }
+
+
/*override*/ void internal_add_built_successor( receiver<output_type> &r) {
successors().internal_add_built_successor( r );
}
return successors().successor_count();
}
- /*override*/ void copy_successors( successor_vector_type &v) {
+ /*override*/ void copy_successors( successor_list_type &v) {
successors().copy_successors(v);
}
#endif /* TBB_PREVIEW_FLOW_GRAPH_FEATURES */
//
// get<I>(output_ports).try_put(output_value);
//
+ // if task pointer is returned will always spawn and return true, else
// return value will be bool returned from successors.try_put.
task *try_put_task(const output_type &i) { return my_successors.try_put_task(i); }
+ broadcast_cache_type &successors() { return my_successors; }
protected:
broadcast_cache_type my_successors;
- broadcast_cache_type &successors() { return my_successors; }
}; // function_output
}
}; // multifunction_output
+//composite_node
+#if TBB_PREVIEW_FLOW_GRAPH_TRACE
+ template<typename CompositeType>
+ void add_nodes_impl(CompositeType*, bool) {}
+
+ template< typename CompositeType, typename NodeType1, typename... NodeTypes >
+ void add_nodes_impl(CompositeType *c_node, bool visible, const NodeType1& n1, const NodeTypes&... n) {
+ void *addr = const_cast<NodeType1 *>(&n1);
+
+ if(visible)
+ tbb::internal::itt_relation_add( tbb::internal::ITT_DOMAIN_FLOW, c_node, tbb::internal::FLOW_NODE, tbb::internal::__itt_relation_is_parent_of, addr, tbb::internal::FLOW_NODE );
+ else
+ tbb::internal::itt_relation_add( tbb::internal::ITT_DOMAIN_FLOW, addr, tbb::internal::FLOW_NODE, tbb::internal::__itt_relation_is_child_of, c_node, tbb::internal::FLOW_NODE );
+ add_nodes_impl(c_node, visible, n...);
+ }
+#endif
+
} // internal
#endif // __TBB__flow_graph_node_impl_H
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
reasons why the executable file might be covered by the GNU General Public License.
*/
-// tagged buffer that can expand, and can support as many deletions as additions
-// list-based, with elements of list held in array (for destruction management),
-// multiplicative hashing (like ets). No synchronization built-in.
+// a hash table buffer that can expand, and can support as many deletions as
+// additions, list-based, with elements of list held in array (for destruction
+// management), multiplicative hashing (like ets). No synchronization built-in.
//
-#ifndef __TBB__flow_graph_tagged_buffer_impl_H
-#define __TBB__flow_graph_tagged_buffer_impl_H
+#ifndef __TBB__flow_graph_hash_buffer_impl_H
+#define __TBB__flow_graph_hash_buffer_impl_H
#ifndef __TBB_flow_graph_H
#error Do not #include this internal file directly; use public TBB headers instead.
#endif
-// included in namespace tbb::flow::interface7::internal
+// included in namespace tbb::flow::interface8::internal
-template<typename T, typename U, size_t NoTagMark>
-struct otherData {
- T t;
- U next;
- otherData() : t(NoTagMark), next(NULL) {}
-};
-
-template<typename TagType, typename ValueType, size_t NoTagMark>
+// elements in the table are a simple list; we need pointer to next element to
+// traverse the chain
+template<typename ValueType>
struct buffer_element_type {
// the second parameter below is void * because we can't forward-declare the type
// itself, so we just reinterpret_cast below.
- typedef typename aligned_pair<ValueType, otherData<TagType, void *, NoTagMark> >::type type;
+ typedef typename aligned_pair<ValueType, void *>::type type;
};
template
<
- typename TagType,
- typename ValueType,
- size_t NoTagMark = 0,
- typename Allocator=tbb::cache_aligned_allocator< typename buffer_element_type<TagType, ValueType, NoTagMark>::type >
+ typename Key, // type of key within ValueType
+ typename ValueType,
+ typename ValueToKey, // abstract method that returns "const Key" or "const Key&" given ValueType
+ typename HashCompare, // has hash and equal
+ typename Allocator=tbb::cache_aligned_allocator< typename aligned_pair<ValueType, void *>::type >
>
-class tagged_buffer {
+class hash_buffer : public HashCompare {
public:
static const size_t INITIAL_SIZE = 8; // initial size of the hash pointer table
- static const TagType NO_TAG = TagType(NoTagMark);
typedef ValueType value_type;
- typedef typename buffer_element_type<TagType, ValueType, NO_TAG>::type element_type;
+ typedef typename buffer_element_type< value_type >::type element_type;
typedef value_type *pointer_type;
typedef element_type *list_array_type; // array we manage manually
typedef list_array_type *pointer_array_type;
typedef typename Allocator::template rebind<list_array_type>::other pointer_array_allocator_type;
typedef typename Allocator::template rebind<element_type>::other elements_array_allocator;
+ typedef typename tbb::internal::strip<Key>::type Knoref;
+
private:
+ ValueToKey *my_key;
size_t my_size;
size_t nelements;
pointer_array_type pointer_array; // pointer_array[my_size]
size_t mask() { return my_size - 1; }
- static size_t hash(TagType t) {
- return uintptr_t(t)*tbb::internal::select_size_t_constant<0x9E3779B9,0x9E3779B97F4A7C15ULL>::value;
- }
-
void set_up_free_list( element_type **p_free_list, list_array_type la, size_t sz) {
for(size_t i=0; i < sz - 1; ++i ) { // construct free list
- la[i].second.next = &(la[i+1]);
- la[i].second.t = NO_TAG;
+ la[i].second = &(la[i+1]);
}
- la[sz-1].second.next = NULL;
- *p_free_list = &(la[0]);
+ la[sz-1].second = NULL;
+ *p_free_list = (element_type *)&(la[0]);
}
// cleanup for exceptions
set_up_free_list(&new_free_list, new_elements_array, my_size );
for(size_t i=0; i < my_size; ++i) {
- for( element_type* op = pointer_array[i]; op; op = (element_type *)(op->second.next)) {
+ for( element_type* op = pointer_array[i]; op; op = (element_type *)(op->second)) {
value_type *ov = reinterpret_cast<value_type *>(&(op->first));
// could have std::move semantics
- internal_tagged_insert(new_pointer_array, new_size, new_free_list, op->second.t, *ov);
+ internal_insert_with_key(new_pointer_array, new_size, new_free_list, *ov);
}
}
my_cleanup.my_pa = NULL;
// v should have perfect forwarding if std::move implemented.
// we use this method to move elements in grow_array, so can't use class fields
- void internal_tagged_insert( element_type **p_pointer_array, size_t p_sz, list_array_type &p_free_list,
- const TagType t, const value_type &v) {
+ void internal_insert_with_key( element_type **p_pointer_array, size_t p_sz, list_array_type &p_free_list,
+ const value_type &v) {
size_t l_mask = p_sz-1;
- size_t h = hash(t) & l_mask;
+ __TBB_ASSERT(my_key, "Error: value-to-key functor not provided");
+ size_t h = this->hash((*my_key)(v)) & l_mask;
__TBB_ASSERT(p_free_list, "Error: free list not set up.");
- element_type* my_elem = p_free_list; p_free_list = (element_type *)(p_free_list->second.next);
- my_elem->second.t = t;
+ element_type* my_elem = p_free_list; p_free_list = (element_type *)(p_free_list->second);
(void) new(&(my_elem->first)) value_type(v);
- my_elem->second.next = p_pointer_array[h];
+ my_elem->second = p_pointer_array[h];
p_pointer_array[h] = my_elem;
}
for(size_t i = 0; i < sz; ++i ) {
element_type *p_next;
for( element_type *p = pa[i]; p; p = p_next) {
- p_next = (element_type *)p->second.next;
- value_type *vp = reinterpret_cast<value_type *>(&(p->first));
- vp->~value_type();
+ p_next = (element_type *)p->second;
+ internal::punned_cast<value_type *>(&(p->first))->~value_type();
}
}
- pointer_array_allocator_type().deallocate(pa, sz);
+ pointer_array_allocator_type().deallocate(pa, sz);
pa = NULL;
}
// Separate test (if allocation of pa throws, el may be allocated.
}
public:
- tagged_buffer() : my_size(INITIAL_SIZE), nelements(0) {
+ hash_buffer() : my_key(NULL), my_size(INITIAL_SIZE), nelements(0) {
internal_initialize_buffer();
}
- ~tagged_buffer() {
+ ~hash_buffer() {
internal_free_buffer(pointer_array, elements_array, my_size, nelements);
+ if(my_key) delete my_key;
}
void reset() {
internal_initialize_buffer();
}
- bool tagged_insert(const TagType t, const value_type &v) {
- pointer_type p;
- if(tagged_find_ref(t, p)) {
+ // Take ownership of func object allocated with new.
+ // This method is only used internally, so can't be misused by user.
+ void set_key_func(ValueToKey *vtk) { my_key = vtk; }
+ // pointer is used to clone()
+ ValueToKey* get_key_func() { return my_key; }
+
+ bool insert_with_key(const value_type &v) {
+ pointer_type p = NULL;
+ __TBB_ASSERT(my_key, "Error: value-to-key functor not provided");
+ if(find_ref_with_key((*my_key)(v), p)) {
p->~value_type();
(void) new(p) value_type(v); // copy-construct into the space
return false;
}
++nelements;
if(nelements*2 > my_size) grow_array();
- internal_tagged_insert(pointer_array, my_size, free_list, t, v);
+ internal_insert_with_key(pointer_array, my_size, free_list, v);
return true;
}
- // returns reference to array element.v
- bool tagged_find_ref(const TagType t, pointer_type &v) {
- size_t i = hash(t) & mask();
- for(element_type* p = pointer_array[i]; p; p = (element_type *)(p->second.next)) {
- if(p->second.t == t) {
- v = reinterpret_cast<pointer_type>(&(p->first));
+ // returns true and sets v to array element if found, else returns false.
+ bool find_ref_with_key(const Knoref& k, pointer_type &v) {
+ size_t i = this->hash(k) & mask();
+ for(element_type* p = pointer_array[i]; p; p = (element_type *)(p->second)) {
+ pointer_type pv = reinterpret_cast<pointer_type>(&(p->first));
+ __TBB_ASSERT(my_key, "Error: value-to-key functor not provided");
+ if(this->equal((*my_key)(*pv), k)) {
+ v = pv;
return true;
}
}
return false;
}
- bool tagged_find( const TagType t, value_type &v) {
+ bool find_with_key( const Knoref& k, value_type &v) {
value_type *p;
- if(tagged_find_ref(t, p)) {
+ if(find_ref_with_key(k, p)) {
v = *p;
return true;
}
return false;
}
- void tagged_delete(const TagType t) {
- size_t h = hash(t) & mask();
+ void delete_with_key(const Knoref& k) {
+ size_t h = this->hash(k) & mask();
element_type* prev = NULL;
- for(element_type* p = pointer_array[h]; p; prev = p, p = (element_type *)(p->second.next)) {
- if(p->second.t == t) {
- value_type *vp = reinterpret_cast<value_type *>(&(p->first));
+ for(element_type* p = pointer_array[h]; p; prev = p, p = (element_type *)(p->second)) {
+ value_type *vp = reinterpret_cast<value_type *>(&(p->first));
+ __TBB_ASSERT(my_key, "Error: value-to-key functor not provided");
+ if(this->equal((*my_key)(*vp), k)) {
vp->~value_type();
- p->second.t = NO_TAG;
- if(prev) prev->second.next = p->second.next;
- else pointer_array[h] = (element_type *)(p->second.next);
- p->second.next = free_list;
+ if(prev) prev->second = p->second;
+ else pointer_array[h] = (element_type *)(p->second);
+ p->second = free_list;
free_list = p;
--nelements;
return;
}
}
- __TBB_ASSERT(false, "tag not found for delete");
+ __TBB_ASSERT(false, "key not found for delete");
}
};
-#endif // __TBB__flow_graph_tagged_buffer_impl_H
+#endif // __TBB__flow_graph_hash_buffer_impl_H
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index );
}
-template < typename TypesTuple, typename PortsTuple, int N >
+template<typename InputType>
+void register_input_port(void *node, tbb::flow::interface8::receiver<InputType>* port, string_index name_index) {
+ //TODO: Make fgt_internal_create_input_port a function template?
+ fgt_internal_create_input_port( node, port, name_index);
+}
+
+template < typename PortsTuple, int N >
struct fgt_internal_input_helper {
static void register_port( void *node, PortsTuple &ports ) {
- fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element<N-1,TypesTuple>::type > * >(&(tbb::flow::get<N-1>(ports))),
- static_cast<tbb::internal::string_index>(FLOW_INPUT_PORT_0 + N - 1) );
- fgt_internal_input_helper<TypesTuple, PortsTuple, N-1>::register_port( node, ports );
+ register_input_port( node, &(tbb::flow::get<N-1>(ports)), static_cast<tbb::internal::string_index>(FLOW_INPUT_PORT_0 + N - 1) );
+ fgt_internal_input_helper<PortsTuple, N-1>::register_port( node, ports );
}
};
-template < typename TypesTuple, typename PortsTuple >
-struct fgt_internal_input_helper<TypesTuple,PortsTuple,1> {
+template < typename PortsTuple >
+struct fgt_internal_input_helper<PortsTuple, 1> {
static void register_port( void *node, PortsTuple &ports ) {
- fgt_internal_create_input_port( node, (void*)static_cast< tbb::flow::interface7::receiver< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))),
- FLOW_INPUT_PORT_0 );
+ register_input_port( node, &(tbb::flow::get<0>(ports)), FLOW_INPUT_PORT_0 );
}
};
-template < typename TypesTuple, typename PortsTuple, int N >
+template<typename OutputType>
+void register_output_port(void *node, tbb::flow::interface8::sender<OutputType>* port, string_index name_index) {
+ //TODO: Make fgt_internal_create_output_port a function template?
+ fgt_internal_create_output_port( node, port, name_index);
+}
+
+template < typename PortsTuple, int N >
struct fgt_internal_output_helper {
static void register_port( void *node, PortsTuple &ports ) {
- fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element<N-1,TypesTuple>::type > * >(&(tbb::flow::get<N-1>(ports))),
- static_cast<tbb::internal::string_index>(FLOW_OUTPUT_PORT_0 + N - 1) );
- fgt_internal_output_helper<TypesTuple, PortsTuple, N-1>::register_port( node, ports );
+ register_output_port( node, &(tbb::flow::get<N-1>(ports)), static_cast<tbb::internal::string_index>(FLOW_OUTPUT_PORT_0 + N - 1) );
+ fgt_internal_output_helper<PortsTuple, N-1>::register_port( node, ports );
}
};
-template < typename TypesTuple, typename PortsTuple >
-struct fgt_internal_output_helper<TypesTuple,PortsTuple,1> {
+template < typename PortsTuple >
+struct fgt_internal_output_helper<PortsTuple,1> {
static void register_port( void *node, PortsTuple &ports ) {
- fgt_internal_create_output_port( node, (void*)static_cast< tbb::flow::interface7::sender< typename tbb::flow::tuple_element<0,TypesTuple>::type > * >(&(tbb::flow::get<0>(ports))),
- FLOW_OUTPUT_PORT_0 );
+ register_output_port( node, &(tbb::flow::get<0>(ports)), FLOW_OUTPUT_PORT_0 );
}
};
template< typename NodeType >
void fgt_multioutput_node_desc( const NodeType *node, const char *desc ) {
- void *addr = (void *)( static_cast< tbb::flow::interface7::receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) );
+ void *addr = (void *)( static_cast< tbb::flow::interface8::receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) );
+ itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc );
+}
+
+template< typename NodeType >
+void fgt_multiinput_multioutput_node_desc( const NodeType *node, const char *desc ) {
+ void *addr = const_cast<NodeType *>(node);
itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc );
}
template< typename NodeType >
static inline void fgt_node_desc( const NodeType *node, const char *desc ) {
- void *addr = (void *)( static_cast< tbb::flow::interface7::sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) );
+ void *addr = (void *)( static_cast< tbb::flow::interface8::sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) );
itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc );
}
itt_relation_add( ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE );
}
-template< typename OutputTuple, int N, typename PortsTuple >
+template< int N, typename PortsTuple >
static inline void fgt_multioutput_node( string_index t, void *g, void *input_port, PortsTuple &ports ) {
itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t );
fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 );
- fgt_internal_output_helper<OutputTuple, PortsTuple, N>::register_port( input_port, ports );
+ fgt_internal_output_helper<PortsTuple, N>::register_port( input_port, ports );
}
-template< typename OutputTuple, int N, typename PortsTuple >
+template< int N, typename PortsTuple >
static inline void fgt_multioutput_node_with_body( string_index t, void *g, void *input_port, PortsTuple &ports, void *body ) {
itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t );
fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 );
- fgt_internal_output_helper<OutputTuple, PortsTuple, N>::register_port( input_port, ports );
+ fgt_internal_output_helper<PortsTuple, N>::register_port( input_port, ports );
fgt_body( input_port, body );
}
-template< typename InputTuple, int N, typename PortsTuple >
+template< int N, typename PortsTuple >
static inline void fgt_multiinput_node( string_index t, void *g, PortsTuple &ports, void *output_port) {
itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t );
fgt_internal_create_output_port( output_port, output_port, FLOW_OUTPUT_PORT_0 );
- fgt_internal_input_helper<InputTuple, PortsTuple, N>::register_port( output_port, ports );
+ fgt_internal_input_helper<PortsTuple, N>::register_port( output_port, ports );
}
static inline void fgt_node( string_index t, void *g, void *output_port ) {
static inline void fgt_body( void * /*node*/, void * /*body*/ ) { }
-template< typename OutputTuple, int N, typename PortsTuple >
+template< int N, typename PortsTuple >
static inline void fgt_multioutput_node( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { }
-template< typename OutputTuple, int N, typename PortsTuple >
+template< int N, typename PortsTuple >
static inline void fgt_multioutput_node_with_body( string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { }
-template< typename InputTuple, int N, typename PortsTuple >
+template< int N, typename PortsTuple >
static inline void fgt_multiinput_node( string_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { }
static inline void fgt_node( string_index /*t*/, void * /*g*/, void * /*output_port*/ ) { }
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#error Do not #include this internal file directly; use public TBB headers instead.
#endif
-// included in namespace tbb::flow::interface7
+// included in namespace tbb::flow::interface8
namespace internal {
-// wrap each element of a tuple in a template, and make a tuple of the result.
+ // the change to key_matching (adding a K and KHash template parameter, making it a class)
+ // means we have to pass this data to the key_matching_port. All the ports have only one
+ // template parameter, so we have to wrap the following types in a trait:
+ //
+ // . K == key_type
+ // . KHash == hash and compare for Key
+ // . TtoK == function_body that given an object of T, returns its K
+ // . T == type accepted by port, and stored in the hash table
+ //
+ // The port will have an additional parameter on node construction, which is a function_body
+ // that accepts a const T& and returns a K which is the field in T which is its K.
+ template<typename Kp, typename KHashp, typename Tp>
+ struct KeyTrait {
+ typedef Kp K;
+ typedef Tp T;
+ typedef internal::type_to_key_function_body<T,K> TtoK;
+ typedef KHashp KHash;
+ };
+
+// wrap each element of a tuple in a template, and make a tuple of the result.
template<int N, template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements;
+ // A wrapper that generates the traits needed for each port of a key-matching join,
+ // and the type of the tuple of input ports.
+ template<int N, template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements;
+
template<template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements<1, PT, TypeTuple> {
typedef typename tbb::flow::tuple<
type;
};
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<1, PT, KeyTraits, TypeTuple > {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0> > type;
+ };
+
template<template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements<2, PT, TypeTuple> {
typedef typename tbb::flow::tuple<
type;
};
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<2, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1> > type;
+ };
+
template<template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements<3, PT, TypeTuple> {
typedef typename tbb::flow::tuple<
type;
};
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<3, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2> > type;
+ };
+
template<template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements<4, PT, TypeTuple> {
typedef typename tbb::flow::tuple<
type;
};
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<4, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<3,TypeTuple>::type> KeyTrait3;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2>,
+ PT<KeyTrait3> > type;
+ };
+
template<template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements<5, PT, TypeTuple> {
typedef typename tbb::flow::tuple<
type;
};
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<5, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<3,TypeTuple>::type> KeyTrait3;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<4,TypeTuple>::type> KeyTrait4;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2>,
+ PT<KeyTrait3>, PT<KeyTrait4> > type;
+ };
+
#if __TBB_VARIADIC_MAX >= 6
template<template<class> class PT, typename TypeTuple>
struct wrap_tuple_elements<6, PT, TypeTuple> {
PT<typename tbb::flow::tuple_element<5,TypeTuple>::type> >
type;
};
+
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<6, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<3,TypeTuple>::type> KeyTrait3;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<4,TypeTuple>::type> KeyTrait4;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<5,TypeTuple>::type> KeyTrait5;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2>, PT<KeyTrait3>,
+ PT<KeyTrait4>, PT<KeyTrait5> > type;
+ };
#endif
#if __TBB_VARIADIC_MAX >= 7
PT<typename tbb::flow::tuple_element<6,TypeTuple>::type> >
type;
};
+
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<7, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<3,TypeTuple>::type> KeyTrait3;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<4,TypeTuple>::type> KeyTrait4;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<5,TypeTuple>::type> KeyTrait5;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<6,TypeTuple>::type> KeyTrait6;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2>, PT<KeyTrait3>,
+ PT<KeyTrait4>, PT<KeyTrait5>, PT<KeyTrait6> > type;
+ };
#endif
#if __TBB_VARIADIC_MAX >= 8
PT<typename tbb::flow::tuple_element<7,TypeTuple>::type> >
type;
};
+
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<8, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<3,TypeTuple>::type> KeyTrait3;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<4,TypeTuple>::type> KeyTrait4;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<5,TypeTuple>::type> KeyTrait5;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<6,TypeTuple>::type> KeyTrait6;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<7,TypeTuple>::type> KeyTrait7;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2>, PT<KeyTrait3>,
+ PT<KeyTrait4>, PT<KeyTrait5>, PT<KeyTrait6>, PT<KeyTrait7> > type;
+ };
#endif
#if __TBB_VARIADIC_MAX >= 9
PT<typename tbb::flow::tuple_element<8,TypeTuple>::type> >
type;
};
+
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<9, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<3,TypeTuple>::type> KeyTrait3;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<4,TypeTuple>::type> KeyTrait4;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<5,TypeTuple>::type> KeyTrait5;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<6,TypeTuple>::type> KeyTrait6;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<7,TypeTuple>::type> KeyTrait7;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<8,TypeTuple>::type> KeyTrait8;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2>, PT<KeyTrait3>,
+ PT<KeyTrait4>, PT<KeyTrait5>, PT<KeyTrait6>, PT<KeyTrait7>, PT<KeyTrait8> > type;
+ };
#endif
#if __TBB_VARIADIC_MAX >= 10
PT<typename tbb::flow::tuple_element<9,TypeTuple>::type> >
type;
};
+
+ template<template<class> class PT, typename KeyTraits, typename TypeTuple>
+ struct wrap_key_tuple_elements<10, PT, KeyTraits, TypeTuple> {
+ typedef typename KeyTraits::key_type K;
+ typedef typename KeyTraits::hash_compare_type KHash;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<0,TypeTuple>::type> KeyTrait0;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<1,TypeTuple>::type> KeyTrait1;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<2,TypeTuple>::type> KeyTrait2;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<3,TypeTuple>::type> KeyTrait3;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<4,TypeTuple>::type> KeyTrait4;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<5,TypeTuple>::type> KeyTrait5;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<6,TypeTuple>::type> KeyTrait6;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<7,TypeTuple>::type> KeyTrait7;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<8,TypeTuple>::type> KeyTrait8;
+ typedef KeyTrait<K, KHash, typename tbb::flow::tuple_element<9,TypeTuple>::type> KeyTrait9;
+ typedef typename tbb::flow::tuple< PT<KeyTrait0>, PT<KeyTrait1>, PT<KeyTrait2>, PT<KeyTrait3>,
+ PT<KeyTrait4>, PT<KeyTrait5>, PT<KeyTrait6>, PT<KeyTrait7>, PT<KeyTrait8>,
+ PT<KeyTrait9> > type;
+ };
#endif
//! type mimicking std::pair but with trailing fill to ensure each element of an array
};
// is the specified type included in a tuple?
-
-template<class U, class V> struct is_same_type { static const bool value = false; };
-template<class W> struct is_same_type<W,W> { static const bool value = true; };
-
template<class Q, size_t N, class Tuple>
struct is_element_of {
typedef typename tbb::flow::tuple_element<N-1, Tuple>::type T_i;
- static const bool value = is_same_type<Q,T_i>::value || is_element_of<Q,N-1,Tuple>::value;
+ static const bool value = tbb::internal::is_same_type<Q,T_i>::value || is_element_of<Q,N-1,Tuple>::value;
};
template<class Q, class Tuple>
struct is_element_of<Q,0,Tuple> {
typedef typename tbb::flow::tuple_element<0, Tuple>::type T_i;
- static const bool value = is_same_type<Q,T_i>::value;
+ static const bool value = tbb::internal::is_same_type<Q,T_i>::value;
};
// allow the construction of types that are listed tuple. If a disallowed type
using tbb::internal::punned_cast;
struct tagged_null_type {};
-template<typename TagType, typename T0, typename T1=tagged_null_type, typename T2=tagged_null_type, typename T3=tagged_null_type,
+template<typename TagType, typename T0, typename T1=tagged_null_type, typename T2=tagged_null_type, typename T3=tagged_null_type,
typename T4=tagged_null_type, typename T5=tagged_null_type, typename T6=tagged_null_type,
typename T7=tagged_null_type, typename T8=tagged_null_type, typename T9=tagged_null_type>
class tagged_msg {
typedef tbb::flow::tuple<T0, T1, T2, T3, T4
+ //TODO: Should we reject lists longer than a tuple can hold?
#if __TBB_VARIADIC_MAX >= 6
, T5
#endif
#if __TBB_VARIADIC_MAX >= 10
, T9
#endif
- > Tuple;
+ > Tuple;
private:
class variant {
variant my_msg;
public:
- tagged_msg(): my_tag(TagType(~0)), my_msg(){}
+ tagged_msg(): my_tag(TagType(~0)), my_msg(){}
template<typename T, typename R>
tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(value) {}
-
+
#if __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN
template<typename T, typename R, size_t N>
tagged_msg(T const &index, R (&value)[N]) : my_tag(index), my_msg(value) {}
}; //class tagged_msg
// template to simplify cast and test for tagged_msg in template contexts
-template<typename T, typename V>
-const T& cast_to(V const &v) { return v.template cast_to<T>(); }
+template<typename V, typename T>
+const V& cast_to(T const &t) { return t.template cast_to<V>(); }
-template<typename T, typename V>
-bool is_a(V const &v) { return v.template is_a<T>(); }
+template<typename V, typename T>
+bool is_a(T const &t) { return t.template is_a<V>(); }
} // namespace internal
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
--- /dev/null
+/*
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
+
+ This file is part of Threading Building Blocks. Threading Building Blocks is free software;
+ you can redistribute it and/or modify it under the terms of the GNU General Public License
+ version 2 as published by the Free Software Foundation. Threading Building Blocks is
+ distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details. You should have received a copy of
+ the GNU General Public License along with Threading Building Blocks; if not, write to the
+ Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+ As a special exception, you may use this file as part of a free software library without
+ restriction. Specifically, if other files instantiate templates or use macros or inline
+ functions from this file, or you compile this file and link it with other files to produce
+ an executable, this file does not by itself cause the resulting executable to be covered
+ by the GNU General Public License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General Public License.
+*/
+
+// must be included outside namespaces.
+#ifndef __TBB_tbb_hash_compare_impl_H
+#define __TBB_tbb_hash_compare_impl_H
+
+#include <string>
+
+namespace tbb {
+namespace interface5 {
+namespace internal {
+
+// Template class for hash compare
+template<typename Key, typename Hasher, typename Key_equality>
+class hash_compare
+{
+public:
+ typedef Hasher hasher;
+ typedef Key_equality key_equal;
+
+ hash_compare() {}
+
+ hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
+
+ hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
+
+ size_t operator()(const Key& key) const {
+ return ((size_t)my_hash_object(key));
+ }
+
+ bool operator()(const Key& key1, const Key& key2) const {
+ return (!my_key_compare_object(key1, key2));
+ }
+
+ Hasher my_hash_object; // The hash object
+ Key_equality my_key_compare_object; // The equality comparator object
+};
+
+//! Hash multiplier
+static const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value;
+
+} // namespace internal
+
+//! Hasher functions
+template<typename T>
+inline size_t tbb_hasher( const T& t ) {
+ return static_cast<size_t>( t ) * internal::hash_multiplier;
+}
+template<typename P>
+inline size_t tbb_hasher( P* ptr ) {
+ size_t const h = reinterpret_cast<size_t>( ptr );
+ return (h >> 3) ^ h;
+}
+template<typename E, typename S, typename A>
+inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
+ size_t h = 0;
+ for( const E* c = s.c_str(); *c; ++c )
+ h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
+ return h;
+}
+template<typename F, typename S>
+inline size_t tbb_hasher( const std::pair<F,S>& p ) {
+ return tbb_hasher(p.first) ^ tbb_hasher(p.second);
+}
+
+} // namespace interface5
+using interface5::tbb_hasher;
+
+// Template class for hash compare
+template<typename Key>
+class tbb_hash
+{
+public:
+ tbb_hash() {}
+
+ size_t operator()(const Key& key) const
+ {
+ return tbb_hasher(key);
+ }
+};
+
+//! hash_compare that is default argument for concurrent_hash_map
+template<typename Key>
+struct tbb_hash_compare {
+ static size_t hash( const Key& a ) { return tbb_hasher(a); }
+ static bool equal( const Key& a, const Key& b ) { return a == b; }
+};
+
+} // namespace tbb
+#endif /* __TBB_tbb_hash_compare_impl_H */
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
TBB_STRING_RESOURCE(FLOW_JOIN_NODE_TAG_MATCHING, "join_node (tag_matching)")
TBB_STRING_RESOURCE(FLOW_LIMITER_NODE, "limiter_node")
TBB_STRING_RESOURCE(FLOW_MULTIFUNCTION_NODE, "multifunction_node")
-TBB_STRING_RESOURCE(FLOW_OR_NODE, "or_node") //no longer in use, kept for backward compatibilty
+TBB_STRING_RESOURCE(FLOW_OR_NODE, "or_node") //no longer in use, kept for backward compatibility
TBB_STRING_RESOURCE(FLOW_OVERWRITE_NODE, "overwrite_node")
TBB_STRING_RESOURCE(FLOW_PRIORITY_QUEUE_NODE, "priority_queue_node")
TBB_STRING_RESOURCE(FLOW_QUEUE_NODE, "queue_node")
TBB_STRING_RESOURCE(FLOW_OBJECT_NAME, "object_name")
TBB_STRING_RESOURCE(FLOW_NULL, "null")
TBB_STRING_RESOURCE(FLOW_INDEXER_NODE, "indexer_node")
+TBB_STRING_RESOURCE(FLOW_COMPOSITE_NODE, "composite_node")
+TBB_STRING_RESOURCE(FLOW_ASYNC_NODE, "async_node")
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
--- /dev/null
+/*
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
+
+ This file is part of Threading Building Blocks. Threading Building Blocks is free software;
+ you can redistribute it and/or modify it under the terms of the GNU General Public License
+ version 2 as published by the Free Software Foundation. Threading Building Blocks is
+ distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
+ implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details. You should have received a copy of
+ the GNU General Public License along with Threading Building Blocks; if not, write to the
+ Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+ As a special exception, you may use this file as part of a free software library without
+ restriction. Specifically, if other files instantiate templates or use macros or inline
+ functions from this file, or you compile this file and link it with other files to produce
+ an executable, this file does not by itself cause the resulting executable to be covered
+ by the GNU General Public License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General Public License.
+*/
+
+#ifndef __TBB_template_helpers_H
+#define __TBB_template_helpers_H
+
+#include <utility>
+
+namespace tbb { namespace internal {
+
+//! Enables one or the other code branches
+template<bool Condition, typename T = void> struct enable_if {};
+template<typename T> struct enable_if<true, T> { typedef T type; };
+
+//! Strips its template type argument from cv- and ref-qualifiers
+template<typename T> struct strip { typedef T type; };
+template<typename T> struct strip<const T> { typedef T type; };
+template<typename T> struct strip<volatile T> { typedef T type; };
+template<typename T> struct strip<const volatile T> { typedef T type; };
+template<typename T> struct strip<T&> { typedef T type; };
+template<typename T> struct strip<const T&> { typedef T type; };
+template<typename T> struct strip<volatile T&> { typedef T type; };
+template<typename T> struct strip<const volatile T&> { typedef T type; };
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+template<typename T> struct strip<T&&> { typedef T type; };
+template<typename T> struct strip<const T&&> { typedef T type; };
+template<typename T> struct strip<volatile T&&> { typedef T type; };
+template<typename T> struct strip<const volatile T&&> { typedef T type; };
+#endif
+//! Specialization for arrays converts to a corresponding pointer
+template<typename T, size_t N> struct strip<T(&)[N]> { typedef T* type; };
+template<typename T, size_t N> struct strip<const T(&)[N]> { typedef const T* type; };
+template<typename T, size_t N> struct strip<volatile T(&)[N]> { typedef volatile T* type; };
+template<typename T, size_t N> struct strip<const volatile T(&)[N]> { typedef const volatile T* type; };
+
+//! Detects whether two given types are the same
+template<class U, class V> struct is_same_type { static const bool value = false; };
+template<class W> struct is_same_type<W,W> { static const bool value = true; };
+
+template<typename T> struct is_ref { static const bool value = false; };
+template<typename U> struct is_ref<U&> { static const bool value = true; };
+
+#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+
+//! Allows to store a function parameter pack as a variable and later pass it to another function
+template< typename... Types >
+struct stored_pack;
+
+template<>
+struct stored_pack<>
+{
+ typedef stored_pack<> pack_type;
+ stored_pack() {}
+
+ // Friend front-end functions
+ template< typename F, typename Pack > friend void call( F&& f, Pack&& p );
+ template< typename Ret, typename F, typename Pack > friend Ret call_and_return( F&& f, Pack&& p );
+
+protected:
+ // Ideally, ref-qualified non-static methods would be used,
+ // but that would greatly reduce the set of compilers where it works.
+ template< typename Ret, typename F, typename... Preceding >
+ static Ret call( F&& f, const pack_type& /*pack*/, Preceding&&... params ) {
+ return std::forward<F>(f)( std::forward<Preceding>(params)... );
+ }
+ template< typename Ret, typename F, typename... Preceding >
+ static Ret call( F&& f, pack_type&& /*pack*/, Preceding&&... params ) {
+ return std::forward<F>(f)( std::forward<Preceding>(params)... );
+ }
+};
+
+template< typename T, typename... Types >
+struct stored_pack<T, Types...> : stored_pack<Types...>
+{
+ typedef stored_pack<T, Types...> pack_type;
+ typedef stored_pack<Types...> pack_remainder;
+ // Since lifetime of original values is out of control, copies should be made.
+ // Thus references should be stripped away from the deduced type.
+ typename strip<T>::type leftmost_value;
+
+ // Here rvalue references act in the same way as forwarding references,
+ // as long as class template parameters were deduced via forwarding references.
+ stored_pack( T&& t, Types&&... types )
+ : pack_remainder(std::forward<Types>(types)...), leftmost_value(std::forward<T>(t)) {}
+
+ // Friend front-end functions
+ template< typename F, typename Pack > friend void call( F&& f, Pack&& p );
+ template< typename Ret, typename F, typename Pack > friend Ret call_and_return( F&& f, Pack&& p );
+
+protected:
+ template< typename Ret, typename F, typename... Preceding >
+ static Ret call( F&& f, const pack_type& pack, Preceding&&... params ) {
+ return pack_remainder::template call<Ret>(
+ std::forward<F>(f), static_cast<const pack_remainder&>(pack),
+ std::forward<Preceding>(params)... , pack.leftmost_value
+ );
+ }
+ template< typename Ret, typename F, typename... Preceding >
+ static Ret call( F&& f, pack_type&& pack, Preceding&&... params ) {
+ return pack_remainder::template call<Ret>(
+ std::forward<F>(f), static_cast<pack_remainder&&>(pack),
+ std::forward<Preceding>(params)... , std::move(pack.leftmost_value)
+ );
+ }
+};
+
+//! Calls the given function with arguments taken from a stored_pack
+template< typename F, typename Pack >
+void call( F&& f, Pack&& p ) {
+ strip<Pack>::type::template call<void>( std::forward<F>(f), std::forward<Pack>(p) );
+}
+
+template< typename Ret, typename F, typename Pack >
+Ret call_and_return( F&& f, Pack&& p ) {
+ return strip<Pack>::type::template call<Ret>( std::forward<F>(f), std::forward<Pack>(p) );
+}
+
+template< typename... Types >
+stored_pack<Types...> save_pack( Types&&... types ) {
+ return stored_pack<Types...>( std::forward<Types>(types)... );
+}
+
+#endif /* __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */
+} } // namespace internal, namespace tbb
+
+#endif /* __TBB_template_helpers_H */
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#define __TBB_Pause(V) __TBB_machine_pause(V)
#endif /* !__TBB_Pause */
+namespace tbb { namespace internal { typedef uint64_t machine_tsc_t; } }
+static inline tbb::internal::machine_tsc_t __TBB_machine_time_stamp() {
+#if __INTEL_COMPILER
+ return _rdtsc();
+#else
+ tbb::internal::uint32_t hi, lo;
+ __asm__ __volatile__("rdtsc" : "=d"(hi), "=a"(lo));
+ return (tbb::internal::machine_tsc_t( hi ) << 32) | lo;
+#endif
+}
+#define __TBB_time_stamp() __TBB_machine_time_stamp()
+
// API to retrieve/update FPU control setting
#ifndef __TBB_CPU_CTL_ENV_PRESENT
#define __TBB_CPU_CTL_ENV_PRESENT 1
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE
//__TBB_compiler_fence() defined just in case, as it seems not to be used on its own anywhere else
+#ifndef __TBB_compiler_fence
#if _MSC_VER
//TODO: any way to use same intrinsics on windows and linux?
#pragma intrinsic(_ReadWriteBarrier)
#else
#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory")
#endif
+#endif
#ifndef __TBB_full_memory_fence
#if _MSC_VER
#endif
#endif
+#ifndef __TBB_control_consistency_helper
#define __TBB_control_consistency_helper() __TBB_compiler_fence()
+#endif
namespace tbb { namespace internal {
//TODO: is there any way to reuse definition of memory_order enum from ICC instead of copy paste.
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include <sched.h>
#define __TBB_Yield() sched_yield()
-// low-level timing intrinsic and its type
-#define __TBB_machine_time_stamp() _rdtsc()
-typedef uint64_t machine_tsc_t;
-
/** Specifics **/
#define __TBB_STEALING_ABORT_ON_CONTENTION 1
#define __TBB_YIELD2P 1
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
reasons why the executable file might be covered by the GNU General Public License.
*/
-#ifndef __TBB_machine_msvc_ia32_common_H
+#if !defined(__TBB_machine_H) || defined(__TBB_machine_msvc_ia32_common_H)
+#error Do not #include this internal file directly; use public TBB headers instead.
+#endif
+
#define __TBB_machine_msvc_ia32_common_H
#include <intrin.h>
-//TODO: consider moving this macro to tbb_config.h and used there MSVC asm is used
+//TODO: consider moving this macro to tbb_config.h and using where MSVC asm is used
#if !_M_X64 || __INTEL_COMPILER
#define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 1
-
- #if _M_X64
- #define __TBB_r(reg_name) r##reg_name
- #else
- #define __TBB_r(reg_name) e##reg_name
- #endif
#else
//MSVC in x64 mode does not accept inline assembler
#define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 0
+ #define __TBB_NO_X86_MSVC_INLINE_ASM_MSG "The compiler being used is not supported (outdated?)"
+#endif
+
+#if _M_X64
+ #define __TBB_r(reg_name) r##reg_name
+ #define __TBB_W(name) name##64
+ namespace tbb { namespace internal { namespace msvc_intrinsics {
+ typedef __int64 word;
+ }}}
+#else
+ #define __TBB_r(reg_name) e##reg_name
+ #define __TBB_W(name) name
+ namespace tbb { namespace internal { namespace msvc_intrinsics {
+ typedef long word;
+ }}}
+#endif
+
+#if _MSC_VER>=1600 && (!__INTEL_COMPILER || __INTEL_COMPILER>=1310)
+ // S is the operand size in bytes, B is the suffix for intrinsics for that size
+ #define __TBB_MACHINE_DEFINE_ATOMICS(S,B,T,U) \
+ __pragma(intrinsic( _InterlockedCompareExchange##B )) \
+ static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \
+ return _InterlockedCompareExchange##B ( (T*)ptr, value, comparand ); \
+ } \
+ __pragma(intrinsic( _InterlockedExchangeAdd##B )) \
+ static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \
+ return _InterlockedExchangeAdd##B ( (T*)ptr, addend ); \
+ } \
+ __pragma(intrinsic( _InterlockedExchange##B )) \
+ static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \
+ return _InterlockedExchange##B ( (T*)ptr, value ); \
+ }
+
+ // Atomic intrinsics for 1, 2, and 4 bytes are available for x86 & x64
+ __TBB_MACHINE_DEFINE_ATOMICS(1,8,char,__int8)
+ __TBB_MACHINE_DEFINE_ATOMICS(2,16,short,__int16)
+ __TBB_MACHINE_DEFINE_ATOMICS(4,,long,__int32)
+
+ #if __TBB_WORDSIZE==8
+ __TBB_MACHINE_DEFINE_ATOMICS(8,64,__int64,__int64)
+ #endif
+
+ #undef __TBB_MACHINE_DEFINE_ATOMICS
+ #define __TBB_ATOMIC_PRIMITIVES_DEFINED
+#endif /*_MSC_VER>=1600*/
+
+#if _MSC_VER>=1300 || __INTEL_COMPILER>=1100
+ #pragma intrinsic(_ReadWriteBarrier)
+ #pragma intrinsic(_mm_mfence)
+ #define __TBB_compiler_fence() _ReadWriteBarrier()
+ #define __TBB_full_memory_fence() _mm_mfence()
+#elif __TBB_X86_MSVC_INLINE_ASM_AVAILABLE
+ #define __TBB_compiler_fence() __asm { __asm nop }
+ #define __TBB_full_memory_fence() __asm { __asm mfence }
+#else
+ #error Unsupported compiler; define __TBB_{control,acquire,release}_consistency_helper to support it
#endif
-#define __TBB_NO_X86_MSVC_INLINE_ASM_MSG "The compiler being used is not supported (outdated?)"
+#define __TBB_control_consistency_helper() __TBB_compiler_fence()
+#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()
+#define __TBB_release_consistency_helper() __TBB_compiler_fence()
-#if (_MSC_VER >= 1300) || (__INTEL_COMPILER) //Use compiler intrinsic when available
- #define __TBB_PAUSE_USE_INTRINSIC 1
+#if (_MSC_VER>=1300) || (__INTEL_COMPILER)
#pragma intrinsic(_mm_pause)
- namespace tbb { namespace internal { namespace intrinsics { namespace msvc {
- static inline void __TBB_machine_pause (uintptr_t delay ) {
+ namespace tbb { namespace internal { namespace msvc_intrinsics {
+ static inline void pause (uintptr_t delay ) {
for (;delay>0; --delay )
_mm_pause();
}
- }}}}
+ }}}
+ #define __TBB_Pause(V) tbb::internal::msvc_intrinsics::pause(V)
+ #define __TBB_SINGLE_PAUSE _mm_pause()
#else
#if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE
#error __TBB_NO_X86_MSVC_INLINE_ASM_MSG
#endif
-
- namespace tbb { namespace internal { namespace inline_asm { namespace msvc {
- static inline void __TBB_machine_pause (uintptr_t delay ) {
+ namespace tbb { namespace internal { namespace msvc_inline_asm
+ static inline void pause (uintptr_t delay ) {
_asm
{
mov __TBB_r(ax), delay
}
return;
}
- }}}}
-#endif
-
-static inline void __TBB_machine_pause (uintptr_t delay ){
- #if __TBB_PAUSE_USE_INTRINSIC
- tbb::internal::intrinsics::msvc::__TBB_machine_pause(delay);
- #else
- tbb::internal::inline_asm::msvc::__TBB_machine_pause(delay);
- #endif
-}
-
-//TODO: move this function to windows_api.h or to place where it is used
-#if (_MSC_VER<1400) && (!_WIN64) && (__TBB_X86_MSVC_INLINE_ASM_AVAILABLE)
- static inline void* __TBB_machine_get_current_teb () {
- void* pteb;
- __asm mov eax, fs:[0x18]
- __asm mov pteb, eax
- return pteb;
- }
+ }}}
+ #define __TBB_Pause(V) tbb::internal::msvc_inline_asm::pause(V)
+ #define __TBB_SINGLE_PAUSE __asm pause
#endif
-#if ( _MSC_VER>=1400 && !defined(__INTEL_COMPILER) ) || (__INTEL_COMPILER>=1200)
+#if (_MSC_VER>=1400 && !__INTEL_COMPILER) || (__INTEL_COMPILER>=1200)
// MSVC did not have this intrinsic prior to VC8.
// ICL 11.1 fails to compile a TBB example if __TBB_Log2 uses the intrinsic.
- #define __TBB_LOG2_USE_BSR_INTRINSIC 1
- #if _M_X64
- #define __TBB_BSR_INTRINSIC _BitScanReverse64
- #else
- #define __TBB_BSR_INTRINSIC _BitScanReverse
- #endif
- #pragma intrinsic(__TBB_BSR_INTRINSIC)
-
- namespace tbb { namespace internal { namespace intrinsics { namespace msvc {
- inline uintptr_t __TBB_machine_lg( uintptr_t i ){
+ #pragma intrinsic(__TBB_W(_BitScanReverse))
+ namespace tbb { namespace internal { namespace msvc_intrinsics {
+ static inline uintptr_t lg_bsr( uintptr_t i ){
unsigned long j;
- __TBB_BSR_INTRINSIC( &j, i );
+ __TBB_W(_BitScanReverse)( &j, i );
return j;
}
- }}}}
+ }}}
+ #define __TBB_Log2(V) tbb::internal::msvc_intrinsics::lg_bsr(V)
#else
#if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE
#error __TBB_NO_X86_MSVC_INLINE_ASM_MSG
#endif
-
- namespace tbb { namespace internal { namespace inline_asm { namespace msvc {
- inline uintptr_t __TBB_machine_lg( uintptr_t i ){
+ namespace tbb { namespace internal { namespace msvc_inline_asm {
+ static inline uintptr_t lg_bsr( uintptr_t i ){
uintptr_t j;
__asm
{
}
return j;
}
- }}}}
+ }}}
+ #define __TBB_Log2(V) tbb::internal::msvc_inline_asm::lg_bsr(V)
#endif
-static inline intptr_t __TBB_machine_lg( uintptr_t i ) {
-#if __TBB_LOG2_USE_BSR_INTRINSIC
- return tbb::internal::intrinsics::msvc::__TBB_machine_lg(i);
+#if _MSC_VER>=1400
+ #pragma intrinsic(__TBB_W(_InterlockedOr))
+ #pragma intrinsic(__TBB_W(_InterlockedAnd))
+ namespace tbb { namespace internal { namespace msvc_intrinsics {
+ static inline void lock_or( volatile void *operand, intptr_t addend ){
+ __TBB_W(_InterlockedOr)((volatile word*)operand, addend);
+ }
+ static inline void lock_and( volatile void *operand, intptr_t addend ){
+ __TBB_W(_InterlockedAnd)((volatile word*)operand, addend);
+ }
+ }}}
+ #define __TBB_AtomicOR(P,V) tbb::internal::msvc_intrinsics::lock_or(P,V)
+ #define __TBB_AtomicAND(P,V) tbb::internal::msvc_intrinsics::lock_and(P,V)
#else
- return tbb::internal::inline_asm::msvc::__TBB_machine_lg(i);
+ #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE
+ #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG
+ #endif
+ namespace tbb { namespace internal { namespace msvc_inline_asm {
+ static inline void lock_or( volatile void *operand, __int32 addend ) {
+ __asm
+ {
+ mov eax, addend
+ mov edx, [operand]
+ lock or [edx], eax
+ }
+ }
+ static inline void lock_and( volatile void *operand, __int32 addend ) {
+ __asm
+ {
+ mov eax, addend
+ mov edx, [operand]
+ lock and [edx], eax
+ }
+ }
+ }}}
+ #define __TBB_AtomicOR(P,V) tbb::internal::msvc_inline_asm::lock_or(P,V)
+ #define __TBB_AtomicAND(P,V) tbb::internal::msvc_inline_asm::lock_and(P,V)
#endif
+
+#pragma intrinsic(__rdtsc)
+namespace tbb { namespace internal { typedef uint64_t machine_tsc_t; } }
+static inline tbb::internal::machine_tsc_t __TBB_machine_time_stamp() {
+ return __rdtsc();
}
+#define __TBB_time_stamp() __TBB_machine_time_stamp()
// API to retrieve/update FPU control setting
#define __TBB_CPU_CTL_ENV_PRESENT 1
#define __TBB_Yield() std::this_thread::yield()
#endif
-#define __TBB_Pause(V) __TBB_machine_pause(V)
-#define __TBB_Log2(V) __TBB_machine_lg(V)
-
#undef __TBB_r
+#undef __TBB_W
+#undef __TBB_word
extern "C" {
__int8 __TBB_EXPORTED_FUNC __TBB_machine_try_lock_elided (volatile void* ptr);
void __TBB_EXPORTED_FUNC __TBB_machine_unlock_elided (volatile void* ptr);
// 'pause' instruction aborts HLE/RTM transactions
-#if __TBB_PAUSE_USE_INTRINSIC
- inline static void __TBB_machine_try_lock_elided_cancel() { _mm_pause(); }
-#else
- inline static void __TBB_machine_try_lock_elided_cancel() { _asm pause; }
-#endif
+ inline static void __TBB_machine_try_lock_elided_cancel() { __TBB_SINGLE_PAUSE; }
#if __TBB_TSX_INTRINSICS_PRESENT
#define __TBB_machine_is_in_transaction _xtest
void __TBB_EXPORTED_FUNC __TBB_machine_transaction_conflict_abort();
#endif /* __TBB_TSX_INTRINSICS_PRESENT */
}
-
-#endif /* __TBB_machine_msvc_ia32_common_H */
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#define __TBB_machine_windows_ia32_H
-#include "msvc_ia32_common.h"
-
-#define __TBB_WORDSIZE 4
-#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE
-
-#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100)
- #define __TBB_compiler_fence() __asm { __asm nop }
- #define __TBB_full_memory_fence() __asm { __asm mfence }
-#elif _MSC_VER >= 1300 || __INTEL_COMPILER
- #pragma intrinsic(_ReadWriteBarrier)
- #pragma intrinsic(_mm_mfence)
- #define __TBB_compiler_fence() _ReadWriteBarrier()
- #define __TBB_full_memory_fence() _mm_mfence()
-#else
- #error Unsupported compiler - need to define __TBB_{control,acquire,release}_consistency_helper to support it
-#endif
-
-#define __TBB_control_consistency_helper() __TBB_compiler_fence()
-#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()
-#define __TBB_release_consistency_helper() __TBB_compiler_fence()
-
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
// Workaround for overzealous compiler warnings in /Wp64 mode
#pragma warning (push)
#pragma warning (disable: 4244 4267)
#endif
+#include "msvc_ia32_common.h"
+
+#define __TBB_WORDSIZE 4
+#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE
+
extern "C" {
__int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand );
__int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend );
__int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr);
}
-//TODO: use _InterlockedXXX intrinsics as they available since VC 2005
+#ifndef __TBB_ATOMIC_PRIMITIVES_DEFINED
+
#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,U,A,C) \
static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \
T result; \
#undef __TBB_MACHINE_DEFINE_ATOMICS
-static inline void __TBB_machine_OR( volatile void *operand, __int32 addend ) {
- __asm
- {
- mov eax, addend
- mov edx, [operand]
- lock or [edx], eax
- }
-}
-
-static inline void __TBB_machine_AND( volatile void *operand, __int32 addend ) {
- __asm
- {
- mov eax, addend
- mov edx, [operand]
- lock and [edx], eax
- }
-}
-
-#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V)
-#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V)
+#endif /*__TBB_ATOMIC_PRIMITIVES_DEFINED*/
//TODO: Check if it possible and profitable for IA-32 architecture on (Linux and Windows)
//to use of 64-bit load/store via floating point registers together with full fence
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#pragma warning (pop)
#endif // warnings 4244, 4267 are back
-
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#define __TBB_WORDSIZE 8
#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE
-#include <intrin.h>
#include "msvc_ia32_common.h"
-//TODO: Use _InterlockedXXX16 intrinsics for 2 byte operations
-#if !__INTEL_COMPILER
- #pragma intrinsic(_InterlockedOr64)
- #pragma intrinsic(_InterlockedAnd64)
- #pragma intrinsic(_InterlockedCompareExchange)
- #pragma intrinsic(_InterlockedCompareExchange64)
- #pragma intrinsic(_InterlockedExchangeAdd)
- #pragma intrinsic(_InterlockedExchangeAdd64)
- #pragma intrinsic(_InterlockedExchange)
- #pragma intrinsic(_InterlockedExchange64)
-#endif /* !(__INTEL_COMPILER) */
-
-#if __INTEL_COMPILER && (__INTEL_COMPILER < 1100)
- #define __TBB_compiler_fence() __asm { __asm nop }
- #define __TBB_full_memory_fence() __asm { __asm mfence }
-#elif _MSC_VER >= 1300 || __INTEL_COMPILER
- #pragma intrinsic(_ReadWriteBarrier)
- #pragma intrinsic(_mm_mfence)
- #define __TBB_compiler_fence() _ReadWriteBarrier()
- #define __TBB_full_memory_fence() _mm_mfence()
-#endif
+#ifndef __TBB_ATOMIC_PRIMITIVES_DEFINED
-#define __TBB_control_consistency_helper() __TBB_compiler_fence()
-#define __TBB_acquire_consistency_helper() __TBB_compiler_fence()
-#define __TBB_release_consistency_helper() __TBB_compiler_fence()
+#include <intrin.h>
+#pragma intrinsic(_InterlockedCompareExchange,_InterlockedExchangeAdd,_InterlockedExchange)
+#pragma intrinsic(_InterlockedCompareExchange64,_InterlockedExchangeAdd64,_InterlockedExchange64)
// ATTENTION: if you ever change argument types in machine-specific primitives,
// please take care of atomic_word<> specializations in tbb/atomic.h
return _InterlockedExchange64( (__int64*)ptr, value );
}
+#endif /*__TBB_ATOMIC_PRIMITIVES_DEFINED*/
+
#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1
#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1
#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1
#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1
-
-inline void __TBB_machine_OR( volatile void *operand, intptr_t addend ) {
- _InterlockedOr64((__int64*)operand, addend);
-}
-
-inline void __TBB_machine_AND( volatile void *operand, intptr_t addend ) {
- _InterlockedAnd64((__int64*)operand, addend);
-}
-
-#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V)
-#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V)
-
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 2) & 1) +
((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 3) & 1) +
((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 4) & 1) +
- ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1; // +1 accomodates for the master thread
+ ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1; // +1 accommodates for the master thread
}
static inline int __TBB_XBOX360_GetHardwareThreadIndex(int workerThreadIndex)
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include "scalable_allocator.h"
#include <new> // std::bad_alloc
+#include <stdexcept> // std::runtime_error, std::invalid_argument
+// required in C++03 to construct std::runtime_error and std::invalid_argument
+#include <string>
#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC
#include <utility> // std::forward
#endif
//! Allocate space for n objects.
pointer allocate( size_type n, const void* /*hint*/ = 0) {
- return static_cast<pointer>( my_pool->malloc( n*sizeof(value_type) ) );
+ pointer p = static_cast<pointer>( my_pool->malloc( n*sizeof(value_type) ) );
+ if (!p)
+ tbb::internal::throw_exception(std::bad_alloc());
+ return p;
}
//! Free previously allocated block of memory.
void deallocate( pointer p, size_type ) {
rml::MemPoolPolicy args(allocate_request, deallocate_request,
sizeof(typename Alloc::value_type));
rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool);
- if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc());
+ if (res!=rml::POOL_OK)
+ tbb::internal::throw_exception(std::runtime_error("Can't create pool"));
}
template <typename Alloc>
void *memory_pool<Alloc>::allocate_request(intptr_t pool_id, size_t & bytes) {
#pragma warning (pop)
#endif
inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_size(size) {
- if( !buf || !size ) __TBB_THROW(std::bad_alloc());
+ if (!buf || !size)
+ // TODO: improve support for mode with exceptions disabled
+ tbb::internal::throw_exception(std::invalid_argument("Zero in parameter is invalid"));
rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true);
rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool);
- if( res!=rml::POOL_OK ) __TBB_THROW(std::bad_alloc());
+ if (res!=rml::POOL_OK)
+ tbb::internal::throw_exception(std::runtime_error("Can't create pool"));
}
inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) {
fixed_pool &self = *reinterpret_cast<fixed_pool*>(pool_id);
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#define __TBB_parallel_do_H
#include "internal/_range_iterator.h"
+#include "internal/_template_helpers.h"
#include "task.h"
#include "aligned_space.h"
#include <iterator>
namespace internal {
template<typename Body, typename Item> class parallel_do_feeder_impl;
template<typename Body> class do_group_task;
-
- //! Strips its template type argument from 'cv' and '&' qualifiers
- template<typename T>
- struct strip { typedef T type; };
- template<typename T>
- struct strip<T&> { typedef T type; };
- template<typename T>
- struct strip<const T&> { typedef T type; };
- template<typename T>
- struct strip<volatile T&> { typedef T type; };
- template<typename T>
- struct strip<const volatile T&> { typedef T type; };
- // Most of the compilers remove cv-qualifiers from non-reference function argument types.
- // But unfortunately there are those that don't.
- template<typename T>
- struct strip<const T> { typedef T type; };
- template<typename T>
- struct strip<volatile T> { typedef T type; };
- template<typename T>
- struct strip<const volatile T> { typedef T type; };
} // namespace internal
//! @endcond
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#endif
#if __TBB_DEFINE_MIC
#define __TBB_NONUNIFORM_TASK_CREATION 1
-#ifdef __TBB_machine_time_stamp
+#ifdef __TBB_time_stamp
#define __TBB_USE_MACHINE_TIME_STAMPS 1
#define __TBB_task_duration() __TBB_STATIC_THRESHOLD
#endif // __TBB_machine_time_stamp
#include "task.h"
#include "aligned_space.h"
#include "atomic.h"
+#include "internal/_template_helpers.h"
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
// Workaround for overzealous compiler warnings
depth_t max_depth() { return my_max_depth; }
};
-//! Helper that enables one or the other code branches (see example in is_splittable_in_proportion)
-template<bool C, typename T = void> struct enable_if { typedef T type; };
-template<typename T> struct enable_if<false, T> { };
-
//! Class determines whether template parameter has static boolean constant
//! 'is_splittable_in_proportion' initialized with value of 'true' or not.
/** If template parameter has such field that has been initialized with non-zero
pass
} my_delay;
#ifdef __TBB_USE_MACHINE_TIME_STAMPS
- machine_tsc_t my_dst_tsc;
+ tbb::internal::machine_tsc_t my_dst_tsc;
#endif
size_t my_begin;
tbb::internal::affinity_id* my_array;
#ifndef __TBB_USE_MACHINE_TIME_STAMPS
my_delay = pass;
#else
- my_dst_tsc = __TBB_machine_time_stamp() + __TBB_task_duration();
+ my_dst_tsc = __TBB_time_stamp() + __TBB_task_duration();
my_delay = run;
} else if( run == my_delay ) {
- if( __TBB_machine_time_stamp() < my_dst_tsc ) {
+ if( __TBB_time_stamp() < my_dst_tsc ) {
__TBB_ASSERT(my_max_depth > 0, NULL);
return false;
}
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
template<typename T_, typename V_, typename U_>
friend filter_t<T_,U_> operator& (const filter_t<T_,V_>& , const filter_t<V_,U_>& );
public:
+ // TODO: add move-constructors, move-assignment, etc. where C++11 is available.
filter_t() : root(NULL) {}
filter_t( const filter_t<T,U>& rhs ) : root(rhs.root) {
if( root ) root->add_ref();
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
class MemoryPool;
typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes);
+// returns non-zero in case of error
typedef int (*rawFreeType)(intptr_t pool_id, void* raw_ptr, size_t raw_bytes);
/*
void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_t alignment);
bool pool_reset(MemoryPool* memPool);
bool pool_free(MemoryPool *memPool, void *object);
+MemoryPool *pool_identify(void *object);
}
#include <new> /* To use new with the placement argument */
#pragma warning (disable: 4100)
#endif
+//! @cond INTERNAL
+namespace internal {
+
+#if TBB_USE_EXCEPTIONS
+// forward declaration is for inlining prevention
+template<typename E> __TBB_NOINLINE( void throw_exception(const E &e) );
+#endif
+
+// keep throw in a separate function to prevent code bloat
+template<typename E>
+void throw_exception(const E &e) {
+ __TBB_THROW(e);
+}
+
+} // namespace internal
+//! @endcond
+
//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5
/** The members are ordered the same way they are in section 20.4.1
of the ISO C++ standard.
//! Allocate space for n objects.
pointer allocate( size_type n, const void* /*hint*/ =0 ) {
- return static_cast<pointer>( scalable_malloc( n * sizeof(value_type) ) );
+ pointer p = static_cast<pointer>( scalable_malloc( n * sizeof(value_type) ) );
+ if (!p)
+ internal::throw_exception(std::bad_alloc());
+ return p;
}
//! Free previously allocated block of memory
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include "tbb_stddef.h"
#include "tbb_machine.h"
+#include "tbb_profiling.h"
#include <climits>
typedef struct ___itt_caller *__itt_caller;
#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
}
- //! Atomically increment reference count and returns its old value.
+ //! Atomically increment reference count.
/** Has acquire semantics */
void increment_ref_count() {
__TBB_FetchAndIncrementWacquire( &prefix().ref_count );
}
+ //! Atomically adds to reference count and returns its new value.
+ /** Has release-acquire semantics */
+ int add_ref_count( int count ) {
+ internal::call_itt_notify( internal::releasing, &prefix().ref_count );
+ internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
+ __TBB_ASSERT( k>=0, "task's reference count underflowed" );
+ if( k==0 )
+ internal::call_itt_notify( internal::acquired, &prefix().ref_count );
+ return int(k);
+ }
+
//! Atomically decrement reference count and returns its new value.
/** Has release semantics. */
int decrement_ref_count() {
//! sets parent task pointer to specified value
void set_parent(task* p) {
#if __TBB_TASK_GROUP_CONTEXT
- __TBB_ASSERT(prefix().context == p->prefix().context, "The tasks must be in the same context");
+ __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
#endif
prefix().parent = p;
}
*next_ptr = &task;
next_ptr = &task.prefix().next;
}
-
+#if __TBB_TODO
+ // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
+ //! Push task onto front of list (FIFO local execution, like individual spawning in the same order).
+ void push_front( task& task ) {
+ if( empty() ) {
+ push_back(task);
+ } else {
+ task.prefix().next = first;
+ first = &task;
+ }
+ }
+#endif
//! Pop the front task from the list.
task& pop_front() {
__TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#include "critical_section.h"
#include "enumerable_thread_specific.h"
#include "flow_graph.h"
+#if TBB_PREVIEW_GLOBAL_CONTROL
+#include "global_control.h"
+#endif
#include "mutex.h"
#include "null_mutex.h"
#include "null_rw_mutex.h"
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
- known compiler/platform issues
**/
+/* This macro marks incomplete code or comments describing ideas which are considered for the future.
+ * See also for plain comment with TODO and FIXME marks for small improvement opportunities.
+ */
+#define __TBB_TODO 0
+
/*Check which standard library we use on OS X.*/
/*__TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed.*/
#if !defined(__TBB_SYMBOL) && __APPLE__
#define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1310 && (__TBB_GCC_VERSION >= 40600 || _LIBCPP_VERSION))
#define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210)
#define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210)
+ #define __TBB_CPP11_LAMBDAS_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1200)
#elif __clang__
//TODO: these options need to be rechecked
/** on OS X* the only way to get C++11 is to use clang. For library features (e.g. exception_ptr) libc++ is also
* required. So there is no need to check GCC version for clang**/
- #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__has_feature(__cxx_variadic_templates__))
- #define __TBB_CPP11_RVALUE_REF_PRESENT (__has_feature(__cxx_rvalue_references__) && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION))
+ #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__has_feature(__cxx_variadic_templates__))
+ #define __TBB_CPP11_RVALUE_REF_PRESENT (__has_feature(__cxx_rvalue_references__) && (__TBB_GCC_VERSION >= 40300 || _LIBCPP_VERSION))
/** TODO: extend exception_ptr related conditions to cover libstdc++ **/
#define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION)
#define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_static_assert__)
#define __TBB_CPP11_STD_BEGIN_END_PRESENT (__has_feature(__cxx_range_for__) && _LIBCPP_VERSION)
#define __TBB_CPP11_AUTO_PRESENT __has_feature(__cxx_auto_type__)
#define __TBB_CPP11_DECLTYPE_PRESENT __has_feature(__cxx_decltype__)
+ #define __TBB_CPP11_LAMBDAS_PRESENT __has_feature(cxx_lambdas)
#elif __GNUC__
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X__
#define __TBB_CPP11_RVALUE_REF_PRESENT __GXX_EXPERIMENTAL_CXX0X__
#define __TBB_CPP11_STD_BEGIN_END_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600)
#define __TBB_CPP11_AUTO_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400)
#define __TBB_CPP11_DECLTYPE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400)
+ #define __TBB_CPP11_LAMBDAS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40500)
#elif _MSC_VER
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (_MSC_VER >= 1800)
#define __TBB_CPP11_RVALUE_REF_PRESENT (_MSC_VER >= 1600)
#define __TBB_INITIALIZER_LISTS_PRESENT (_MSC_VER >= 1800)
#define __TBB_CONSTEXPR_PRESENT 0
#define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (_MSC_VER >= 1800)
- #define __TBB_NOEXCEPT_PRESENT 0 /*for _MSC_VER == 1800*/
+ #define __TBB_NOEXCEPT_PRESENT (_MSC_VER >= 1900)
#define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700)
#define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600)
#define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600)
+ #define __TBB_CPP11_LAMBDAS_PRESENT (_MSC_VER >= 1600)
#else
#define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT 0
#define __TBB_CPP11_RVALUE_REF_PRESENT 0
#define __TBB_CPP11_STD_BEGIN_END_PRESENT 0
#define __TBB_CPP11_AUTO_PRESENT 0
#define __TBB_CPP11_DECLTYPE_PRESENT 0
+ #define __TBB_CPP11_LAMBDAS_PRESENT 0
#endif
// C++11 standard library features
+#define __TBB_CPP11_VARIADIC_TUPLE_PRESENT (!_MSC_VER || _MSC_VER >=1800)
#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700)
#define __TBB_TR1_TYPE_PROPERTIES_IN_STD_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300 || _MSC_VER >= 1600)
// GCC has a partial support of type properties
#define __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700 || __TBB_CPP11_TYPE_PROPERTIES_PRESENT)
// In GCC and MSVC, implementation of std::move_if_noexcept is not aligned with noexcept
-#define __TBB_MOVE_IF_NOEXCEPT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700 || _MSC_VER >= 1800 || __clang__ && _LIBCPP_VERSION && __TBB_NOEXCEPT_PRESENT)
+#define __TBB_MOVE_IF_NOEXCEPT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700 || _MSC_VER >= 1900 || __clang__ && _LIBCPP_VERSION && __TBB_NOEXCEPT_PRESENT)
//TODO: Probably more accurate way is to analyze version of stdlibc++ via__GLIBCXX__ instead of __TBB_GCC_VERSION
#define __TBB_ALLOCATOR_TRAITS_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION || _MSC_VER >= 1700 || \
__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700 && !(__TBB_GCC_VERSION == 40700 && __TBB_DEFINE_MIC) \
/* Actually ICC supports gcc __sync_* intrinsics starting 11.1,
* but 64 bit support for 32 bit target comes in later ones*/
/* TODO: change the version back to 4.1.2 once macro __TBB_WORD_SIZE become optional */
-#if __TBB_GCC_VERSION >= 40306 || __INTEL_COMPILER >= 1200
+/* Assumed that all clang versions have these gcc compatible intrinsics. */
+#if __TBB_GCC_VERSION >= 40306 || __INTEL_COMPILER >= 1200 || __clang__
/** built-in atomics available in GCC since 4.1.2 **/
#define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1
#endif
#endif
#ifdef _VARIADIC_MAX
-#define __TBB_VARIADIC_MAX _VARIADIC_MAX
-#else
-#if _MSC_VER >= 1700
-#define __TBB_VARIADIC_MAX 5 /* current VS11 setting, may change. */
+ #define __TBB_VARIADIC_MAX _VARIADIC_MAX
#else
-#define __TBB_VARIADIC_MAX 10
-#endif
+ #if _MSC_VER == 1700
+ #define __TBB_VARIADIC_MAX 5 // VS11 setting, issue resolved in VS12
+ #elif _MSC_VER == 1600
+ #define __TBB_VARIADIC_MAX 10 // VS10 setting
+ #else
+ #define __TBB_VARIADIC_MAX 15
+ #endif
#endif
/** __TBB_WIN8UI_SUPPORT enables support of New Windows*8 Store Apps and limit a possibility to load
#define __TBB_SSE_STACK_ALIGNMENT_BROKEN 0
#endif
-#if __GNUC__==4 && __GNUC_MINOR__==3 && __GNUC_PATCHLEVEL__==0
+#if __TBB_GCC_VERSION==40300 && !__INTEL_COMPILER && !__clang__
/* GCC of this version may rashly ignore control dependencies */
#define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1
#endif
#define __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN 1
#endif
-#if __TBB_x86_32 && (__linux__ || __APPLE__ || _WIN32 || __sun || __ANDROID__) && (__INTEL_COMPILER || (__GNUC__==3 && __GNUC_MINOR__==3 ) || __SUNPRO_CC)
+#if __TBB_x86_32 && (__linux__ || __APPLE__ || _WIN32 || __sun || __ANDROID__) && (__INTEL_COMPILER || (__GNUC__==3 && __GNUC_MINOR__==3 )||(__MINGW32__ ) && (__GNUC__==4 && __GNUC_MINOR__==5 ) || __SUNPRO_CC)
// Some compilers for IA-32 fail to provide 8-byte alignment of objects on the stack,
// even if the object specifies 8-byte alignment. On such platforms, the IA-32 implementation
// of 64 bit atomics (e.g. atomic<long long>) use different tactics depending upon
// MSVC 2013 and ICC 15 seems do not generate implicit move constructor for empty derived class while should
#define __TBB_CPP11_IMPLICIT_MOVE_MEMBERS_GENERATION_FOR_DERIVED_BROKEN (__TBB_CPP11_RVALUE_REF_PRESENT && \
- ( !__INTEL_COMPILER && _MSC_VER && _MSC_VER <=1800 || __INTEL_COMPILER && __INTEL_COMPILER <= 1500 ))
+ ( !__INTEL_COMPILER && _MSC_VER && _MSC_VER <= 1800 || __INTEL_COMPILER && __INTEL_COMPILER <= 1500 ))
+
+#define __TBB_CPP11_DECLVAL_BROKEN (_MSC_VER == 1600 || (__GNUC__ && __TBB_GCC_VERSION < 40500) )
+
+// Intel C++ compiler has difficulties with copying std::pair with VC11 std::reference_wrapper being a const member
+#define __TBB_COPY_FROM_NON_CONST_REF_BROKEN (_MSC_VER == 1700 && __INTEL_COMPILER && __INTEL_COMPILER < 1600)
+//The implicit upcasting of the tuple of a reference of a derived class to a base class fails on icc 13.X
+//if the system's gcc environment is 4.8
+#if (__INTEL_COMPILER >=1300 && __INTEL_COMPILER <=1310) && __TBB_GCC_VERSION>=40700 && __GXX_EXPERIMENTAL_CXX0X__
+ #define __TBB_UPCAST_OF_TUPLE_OF_REF_BROKEN 1
+#endif
/** End of __TBB_XXX_BROKEN macro section **/
#define __TBB_ALLOCATOR_CONSTRUCT_VARIADIC (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT)
#define __TBB_VARIADIC_PARALLEL_INVOKE (TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT)
+#define __TBB_FLOW_GRAPH_CPP11_FEATURES (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
+ && __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_AUTO_PRESENT) \
+ && __TBB_CPP11_VARIADIC_TUPLE_PRESENT && !__TBB_UPCAST_OF_TUPLE_OF_REF_BROKEN
+#define __TBB_PREVIEW_ASYNC_NODE TBB_PREVIEW_FLOW_GRAPH_NODES
+#define __TBB_PREVIEW_OPENCL_NODE __TBB_FLOW_GRAPH_CPP11_FEATURES && TBB_PREVIEW_FLOW_GRAPH_NODES
#endif /* __TBB_tbb_config_H */
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
__TBB_USE_GENERIC_DWORD_FETCH_ADD
__TBB_USE_GENERIC_DWORD_FETCH_STORE
__TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE
- __TBB_USE_GENERIC_FULL_FENCED_LOAD_STORE
+ __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE
__TBB_USE_GENERIC_RELAXED_LOAD_STORE
__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE
class atomic_backoff : no_copy {
//! Time delay, in units of "pause" instructions.
/** Should be equal to approximately the number of "pause" instructions
- that take the same time as an context switch. */
+ that take the same time as an context switch. Must be a power of two.*/
static const int32_t LOOPS_BEFORE_YIELD = 16;
int32_t count;
public:
}
}
- // pause for a few times and then return false immediately.
+ //! Pause for a few times and return false if saturated.
bool bounded_pause() {
- if( count<=LOOPS_BEFORE_YIELD ) {
- __TBB_Pause(count);
+ __TBB_Pause(count);
+ if( count<LOOPS_BEFORE_YIELD ) {
// Pause twice as long the next time.
count*=2;
return true;
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
// Marketing-driven product version
#define TBB_VERSION_MAJOR 4
-#define TBB_VERSION_MINOR 3
+#define TBB_VERSION_MINOR 4
// Engineering-focused interface version
-#define TBB_INTERFACE_VERSION 8002
+#define TBB_INTERFACE_VERSION 9000
#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000
// The oldest major interface version still supported
//! Type for an assertion handler
typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment );
-#if TBB_USE_ASSERT
-
- #define __TBB_ASSERT_NS(predicate,message,ns) ((predicate)?((void)0) : ns::assertion_failure(__FILE__,__LINE__,#predicate,message))
- //! Assert that x is true.
- /** If x is false, print assertion failure message.
- If the comment argument is not NULL, it is printed as part of the failure message.
- The comment argument has no other effect. */
#if __TBBMALLOC_BUILD
namespace rml { namespace internal {
- #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,rml::internal)
+ #define __TBB_ASSERT_RELEASE(predicate,message) ((predicate)?((void)0) : rml::internal::assertion_failure(__FILE__,__LINE__,#predicate,message))
#else
namespace tbb {
- #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_NS(predicate,message,tbb)
+ #define __TBB_ASSERT_RELEASE(predicate,message) ((predicate)?((void)0) : tbb::assertion_failure(__FILE__,__LINE__,#predicate,message))
#endif
- #define __TBB_ASSERT_EX __TBB_ASSERT
-
//! Set assertion handler and return previous value of it.
assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler );
#else
} // namespace tbb
#endif
+
+#if TBB_USE_ASSERT
+
+ //! Assert that x is true.
+ /** If x is false, print assertion failure message.
+ If the comment argument is not NULL, it is printed as part of the failure message.
+ The comment argument has no other effect. */
+ #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_RELEASE(predicate,message)
+
+ #define __TBB_ASSERT_EX __TBB_ASSERT
+
#else /* !TBB_USE_ASSERT */
//! No-op version of __TBB_ASSERT.
//! The namespace tbb contains all components of the library.
namespace tbb {
-#if _MSC_VER && _MSC_VER<1600
namespace internal {
+#if _MSC_VER && _MSC_VER<1600
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
- } // namespace internal
#else /* Posix */
- namespace internal {
using ::int8_t;
using ::int16_t;
using ::int32_t;
using ::uint16_t;
using ::uint32_t;
using ::uint64_t;
- } // namespace internal
#endif /* Posix */
+ } // namespace internal
using std::size_t;
using std::ptrdiff_t;
//! A function to compute arg modulo divisor where divisor is a power of 2.
template<typename argument_integer_type, typename divisor_integer_type>
inline argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor) {
- // Divisor is assumed to be a power of two (which is valid for current uses).
__TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" );
return (arg & (divisor - 1));
}
-//! A function to determine if "arg is a multiplication of a number and a power of 2".
-// i.e. for strictly positive i and j, with j a power of 2,
+//! A function to determine if arg is a power of 2 at least as big as another power of 2.
+// i.e. for strictly positive i and j, with j being a power of 2,
// determines whether i==j<<k for some nonnegative k (so i==j yields true).
-template<typename argument_integer_type, typename divisor_integer_type>
-inline bool is_power_of_two_factor(argument_integer_type arg, divisor_integer_type divisor) {
- // Divisor is assumed to be a power of two (which is valid for current uses).
- __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" );
- return 0 == (arg & (arg - divisor));
+template<typename argument_integer_type, typename power2_integer_type>
+inline bool is_power_of_two_at_least(argument_integer_type arg, power2_integer_type power2) {
+ __TBB_ASSERT( is_power_of_two(power2), "Divisor should be a power of two" );
+ return 0 == (arg & (arg - power2));
}
//! Utility template function to prevent "unused" warnings by various compilers.
#if __TBB_CPP11_RVALUE_REF_PRESENT
using std::move;
+using std::forward;
#elif defined(_LIBCPP_NAMESPACE)
-// libc++ defines "pre-C++11 move" similarly to our; use it to avoid name conflicts in some cases.
+// libc++ defines "pre-C++11 move and forward" similarly to ours; use it to avoid name conflicts in some cases.
using std::_LIBCPP_NAMESPACE::move;
+using std::_LIBCPP_NAMESPACE::forward;
#else
+// It is assumed that cv qualifiers, if any, are part of the deduced type.
template <typename T>
T& move( T& x ) { return x; }
+template <typename T>
+T& forward( T& x ) { return x; }
+#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
+
+// Helper macros to simplify writing templates working with both C++03 and C++11.
+#if __TBB_CPP11_RVALUE_REF_PRESENT
+#define __TBB_FORWARDING_REF(A) A&&
+#else
+// It is assumed that cv qualifiers, if any, are part of a deduced type.
+// Thus this macro should not be used in public interfaces.
+#define __TBB_FORWARDING_REF(A) A&
+#endif
+#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
+#define __TBB_PARAMETER_PACK ...
+#define __TBB_PACK_EXPANSION(A) A...
+#else
+#define __TBB_PARAMETER_PACK
+#define __TBB_PACK_EXPANSION(A) A
+#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */
+
+#if __TBB_CPP11_DECLTYPE_PRESENT
+#if __TBB_CPP11_DECLVAL_BROKEN
+// Ad-hoc implementation of std::declval
+template <class T> __TBB_FORWARDING_REF(T) declval() /*noexcept*/;
+#else
+using std::declval;
+#endif
#endif
template <bool condition>
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
#define __TBB_tbb_thread_H
#include "tbb_stddef.h"
+
#if _WIN32||_WIN64
#include "machine/windows_api.h"
#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI
#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* )
+namespace tbb { namespace internal {
#if __TBB_WIN8UI_SUPPORT
-typedef size_t thread_id_type;
+ typedef size_t thread_id_type;
#else // __TBB_WIN8UI_SUPPORT
-typedef DWORD thread_id_type;
+ typedef DWORD thread_id_type;
#endif // __TBB_WIN8UI_SUPPORT
+}} //namespace tbb::internal
#else
#define __TBB_NATIVE_THREAD_ROUTINE void*
#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* )
#include <pthread.h>
+namespace tbb { namespace internal {
+ typedef pthread_t thread_id_type;
+}} //namespace tbb::internal
#endif // _WIN32||_WIN64
+#include "atomic.h"
+#include "internal/_tbb_hash_compare_impl.h"
#include "tick_count.h"
#if !TBB_USE_EXCEPTIONS && _MSC_VER
#pragma warning (disable: 4530)
#endif
+#include <utility> //for swap
#include <iosfwd>
#if !TBB_USE_EXCEPTIONS && _MSC_VER
};
class tbb_thread_v3::id {
-#if _WIN32||_WIN64
thread_id_type my_id;
id( thread_id_type id_ ) : my_id(id_) {}
-#else
- pthread_t my_id;
- id( pthread_t id_ ) : my_id(id_) {}
-#endif // _WIN32||_WIN64
+
friend class tbb_thread_v3;
public:
id() __TBB_NOEXCEPT(true) : my_id(0) {}
return out;
}
friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();
+
+ friend inline size_t tbb_hasher( const tbb_thread_v3::id& id ) {
+ __TBB_STATIC_ASSERT(sizeof(id.my_id) <= sizeof(size_t), "Implementaion assumes that thread_id_type fits into machine word");
+ return tbb::tbb_hasher(id.my_id);
+ }
+
+ // A workaround for lack of tbb::atomic<id> (which would require id to be POD in C++03).
+ friend id atomic_compare_and_swap(id& location, const id& value, const id& comparand){
+ return as_atomic(location.my_id).compare_and_swap(value.my_id, comparand.my_id);
+ }
}; // tbb_thread_v3::id
tbb_thread_v3::id tbb_thread_v3::get_id() const __TBB_NOEXCEPT(true) {
return id(my_handle);
#endif // _WIN32||_WIN64
}
+
void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 );
tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();
void __TBB_EXPORTED_FUNC thread_yield_v3();
}
inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true) {
- tbb::tbb_thread::native_handle_type h = t1.my_handle;
- t1.my_handle = t2.my_handle;
- t2.my_handle = h;
+ std::swap(t1.my_handle, t2.my_handle);
#if _WIN32||_WIN64
- thread_id_type i = t1.my_thread_id;
- t1.my_thread_id = t2.my_thread_id;
- t2.my_thread_id = i;
+ std::swap(t1.my_thread_id, t2.my_thread_id);
#endif /* _WIN32||_WIN64 */
}
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
/*
- Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+ Copyright 2005-2015 Intel Corporation. All Rights Reserved.
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
you can redistribute it and/or modify it under the terms of the GNU General Public License
-; Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+
+; Copyright 2005-2015 Intel Corporation. All Rights Reserved.
+;
+; This file is part of Threading Building Blocks. Threading Building Blocks is free software;
+; you can redistribute it and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation. Threading Building Blocks is
+; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
+; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+; See the GNU General Public License for more details. You should have received a copy of
+; the GNU General Public License along with Threading Building Blocks; if not, write to the
+; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+;
+; As a special exception, you may use this file as part of a free software library without
+; restriction. Specifically, if other files instantiate templates or use macros or inline
+; functions from this file, or you compile this file and link it with other files to produce
+; an executable, this file does not by itself cause the resulting executable to be covered
+; by the GNU General Public License. This exception does not however invalidate any other
+; reasons why the executable file might be covered by the GNU General Public License.
+
+; This file is organized with a section for each .cpp file.
+; Each of these sections is in alphabetical order.
+
+EXPORTS
+
+
+
+; Copyright 2005-2015 Intel Corporation. All Rights Reserved.
;
-; The source code contained or described herein and all documents related
-; to the source code ("Material") are owned by Intel Corporation or its
-; suppliers or licensors. Title to the Material remains with Intel
-; Corporation or its suppliers and licensors. The Material is protected
-; by worldwide copyright laws and treaty provisions. No part of the
-; Material may be used, copied, reproduced, modified, published, uploaded,
-; posted, transmitted, distributed, or disclosed in any way without
-; Intel's prior express written permission.
+; This file is part of Threading Building Blocks. Threading Building Blocks is free software;
+; you can redistribute it and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation. Threading Building Blocks is
+; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
+; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+; See the GNU General Public License for more details. You should have received a copy of
+; the GNU General Public License along with Threading Building Blocks; if not, write to the
+; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
;
-; No license under any patent, copyright, trade secret or other
-; intellectual property right is granted to or conferred upon you by
-; disclosure or delivery of the Materials, either expressly, by
-; implication, inducement, estoppel or otherwise. Any license under such
-; intellectual property rights must be express and approved by Intel in
-; writing.
+; As a special exception, you may use this file as part of a free software library without
+; restriction. Specifically, if other files instantiate templates or use macros or inline
+; functions from this file, or you compile this file and link it with other files to produce
+; an executable, this file does not by itself cause the resulting executable to be covered
+; by the GNU General Public License. This exception does not however invalidate any other
+; reasons why the executable file might be covered by the GNU General Public License.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-; This file is organized with a section for each .cpp file.
-; Each of these sections is in alphabetical order.
-EXPORTS
-; Copyright 2005-2014 Intel Corporation. All Rights Reserved.
-;
-; The source code contained or described herein and all documents related
-; to the source code ("Material") are owned by Intel Corporation or its
-; suppliers or licensors. Title to the Material remains with Intel
-; Corporation or its suppliers and licensors. The Material is protected
-; by worldwide copyright laws and treaty provisions. No part of the
-; Material may be used, copied, reproduced, modified, published, uploaded,
-; posted, transmitted, distributed, or disclosed in any way without
-; Intel's prior express written permission.
-;
-; No license under any patent, copyright, trade secret or other
-; intellectual property right is granted to or conferred upon you by
-; disclosure or delivery of the Materials, either expressly, by
-; implication, inducement, estoppel or otherwise. Any license under such
-; intellectual property rights must be express and approved by Intel in
-; writing.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
-
-
-
-
-
+
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
-
-
+
+
-
-
-
-
-
-
-
-
+
+
-
-
-
+
+
-
-
?initialize@task_scheduler_init@tbb@@QEAAXH_K@Z
?initialize@task_scheduler_init@tbb@@QEAAXH@Z
?terminate@task_scheduler_init@tbb@@QEAAXXZ
+
?observe@task_scheduler_observer_v3@internal@tbb@@QEAAX_N@Z
-
+
+
+
?internal_current_slot@task_arena_base@internal@interface7@tbb@@KAHXZ
?internal_initialize@task_arena_base@internal@interface7@tbb@@IEAAXXZ
?internal_terminate@task_arena_base@internal@interface7@tbb@@IEAAXXZ
?internal_wait@task_arena_base@internal@interface7@tbb@@IEBAXXZ
+
+
?destroy@task@tbb@@QEAAXAEAV12@@Z
+
+
?allocate@allocate_root_with_context_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z
?free@allocate_root_with_context_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z
?change_group@task@tbb@@QEAAXAEAVtask_group_context@2@@Z
?init@task_group_context@tbb@@IEAAXXZ
?register_pending_exception@task_group_context@tbb@@QEAAXXZ
??1task_group_context@tbb@@QEAA@XZ
+
?set_priority@task_group_context@tbb@@QEAAXW4priority_t@2@@Z
?priority@task_group_context@tbb@@QEBA?AW4priority_t@2@XZ
+
?name@captured_exception@tbb@@UEBAPEBDXZ
?what@captured_exception@tbb@@UEBAPEBDXZ
??1captured_exception@tbb@@UEAA@XZ
?clear@captured_exception@tbb@@QEAAXXZ
+
?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ
?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z
?what@bad_last_alloc@tbb@@UEBAPEBDXZ
?itt_set_sync_name_v3@internal@tbb@@YAXPEAXPEB_W@Z
+
+
+
+
+
+
+
??_7pipeline@tbb@@6B@
??0pipeline@tbb@@QEAA@XZ
??1filter@tbb@@UEAA@XZ
?clear@pipeline@tbb@@QEAAXXZ
?inject_token@pipeline@tbb@@AEAAXAEAVtask@2@@Z
?run@pipeline@tbb@@QEAAX_K@Z
+
?run@pipeline@tbb@@QEAAX_KAEAVtask_group_context@2@@Z
+
?process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ
?try_process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ
?set_end_of_input@filter@tbb@@IEAAXXZ
?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXXZ
+
?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPEAV12@@Z
?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z
?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z
?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPEAV12@@Z
+
?internal_construct@spin_rw_mutex_v3@tbb@@AEAAXXZ
?internal_upgrade@spin_rw_mutex_v3@tbb@@AEAA_NXZ
?internal_downgrade@spin_rw_mutex_v3@tbb@@AEAAXXZ
?internal_construct@critical_section_v4@internal@tbb@@QEAAXXZ
+
?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QEBA_NXZ
?internal_size@concurrent_queue_base@internal@tbb@@IEBA_JXZ
+
??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@@Z
??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@_K@Z
??1concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@XZ
?move_content@concurrent_queue_base_v8@internal@tbb@@IEAAXAEAV123@@Z
+
?internal_assign@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z
?internal_capacity@concurrent_vector_base@internal@tbb@@IEBA_KXZ
?internal_clear@concurrent_vector_base@internal@tbb@@IEAAXP6AXPEAX_K@Z_N@Z
?internal_reserve@concurrent_vector_base@internal@tbb@@IEAAX_K00@Z
+
??1concurrent_vector_base_v3@internal@tbb@@IEAA@XZ
?internal_assign@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z
?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IEBA_KXZ
?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z
+?active_value@global_control@interface9@tbb@@CA_KH@Z
+?internal_create@global_control@interface9@tbb@@AEAAXXZ
+?internal_destroy@global_control@interface9@tbb@@AEAAXXZ
+
+
+
+
-; Copyright 2005-2014 Intel Corporation. All Rights Reserved.
+
+; Copyright 2005-2015 Intel Corporation. All Rights Reserved.
;
-; The source code contained or described herein and all documents related
-; to the source code ("Material") are owned by Intel Corporation or its
-; suppliers or licensors. Title to the Material remains with Intel
-; Corporation or its suppliers and licensors. The Material is protected
-; by worldwide copyright laws and treaty provisions. No part of the
-; Material may be used, copied, reproduced, modified, published, uploaded,
-; posted, transmitted, distributed, or disclosed in any way without
-; Intel's prior express written permission.
+; This file is part of Threading Building Blocks. Threading Building Blocks is free software;
+; you can redistribute it and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation. Threading Building Blocks is
+; distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
+; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+; See the GNU General Public License for more details. You should have received a copy of
+; the GNU General Public License along with Threading Building Blocks; if not, write to the
+; Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
;
-; No license under any patent, copyright, trade secret or other
-; intellectual property right is granted to or conferred upon you by
-; disclosure or delivery of the Materials, either expressly, by
-; implication, inducement, estoppel or otherwise. Any license under such
-; intellectual property rights must be express and approved by Intel in
-; writing.
+; As a special exception, you may use this file as part of a free software library without
+; restriction. Specifically, if other files instantiate templates or use macros or inline
+; functions from this file, or you compile this file and link it with other files to produce
+; an executable, this file does not by itself cause the resulting executable to be covered
+; by the GNU General Public License. This exception does not however invalidate any other
+; reasons why the executable file might be covered by the GNU General Public License.
EXPORTS
?pool_realloc@rml@@YAPEAXPEAVMemoryPool@1@PEAX_K@Z
?pool_aligned_realloc@rml@@YAPEAXPEAVMemoryPool@1@PEAX_K2@Z
?pool_aligned_malloc@rml@@YAPEAXPEAVMemoryPool@1@_K1@Z
+?pool_identify@rml@@YAPEAVMemoryPool@1@PEAX@Z