2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_enumerable_thread_specific_H
30 #define __TBB_enumerable_thread_specific_H
32 #include "concurrent_vector.h"
33 #include "tbb_thread.h"
34 #include "cache_aligned_allocator.h"
35 #include "aligned_space.h"
36 #include <string.h> // for memcpy
39 #include "machine/windows_api.h"
46 //! enum for selecting between single key and key-per-instance versions
47 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
49 namespace interface6 {
54 template<ets_key_usage_type ETS_key_type>
55 class ets_base: tbb::internal::no_copy {
58 typedef DWORD key_type;
60 typedef pthread_t key_type;
62 #if __TBB_GCC_3_3_PROTECTED_BROKEN
70 slot& at( size_t k ) {
71 return ((slot*)(void*)(this+1))[k];
73 size_t size() const {return (size_t)1<<lg_size;}
74 size_t mask() const {return size()-1;}
75 size_t start( size_t h ) const {
76 return h>>(8*sizeof(size_t)-lg_size);
82 bool empty() const {return !key;}
83 bool match( key_type k ) const {return key==k;}
84 bool claim( key_type k ) {
85 __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
86 return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
89 #if __TBB_GCC_3_3_PROTECTED_BROKEN
93 static key_type key_of_current_thread() {
94 tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
96 memcpy( &k, &id, sizeof(k) );
100 //! Root of linked list of arrays of decreasing size.
101 /** NULL if and only if my_count==0.
102 Each array in the list is half the size of its predecessor. */
103 atomic<array*> my_root;
104 atomic<size_t> my_count;
105 virtual void* create_local() = 0;
106 virtual void* create_array(size_t _size) = 0; // _size in bytes
107 virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
108 array* allocate( size_t lg_size ) {
109 size_t n = 1<<lg_size;
110 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
111 a->lg_size = lg_size;
112 std::memset( a+1, 0, n*sizeof(slot) );
115 void free(array* a) {
116 size_t n = 1<<(a->lg_size);
117 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
119 static size_t hash( key_type k ) {
120 // Multiplicative hashing. Client should use *upper* bits.
121 // casts required for Mac gcc4.* compiler
122 #if __TBB_WORDSIZE == 4
123 return uintptr_t(k)*0x9E3779B9;
125 return uintptr_t(k)*0x9E3779B97F4A7C15;
129 ets_base() {my_root=NULL; my_count=0;}
130 virtual ~ets_base(); // g++ complains if this is not virtual...
131 void* table_lookup( bool& exists );
133 slot& table_find( key_type k ) {
136 size_t mask = r->mask();
137 for(size_t i = r->start(h);;i=(i+1)&mask) {
139 if( s.empty() || s.match(k) )
143 void table_reserve_for_copy( const ets_base& other ) {
144 __TBB_ASSERT(!my_root,NULL);
145 __TBB_ASSERT(!my_count,NULL);
146 if( other.my_root ) {
147 array* a = allocate(other.my_root->lg_size);
150 my_count = other.my_count;
155 template<ets_key_usage_type ETS_key_type>
156 ets_base<ETS_key_type>::~ets_base() {
157 __TBB_ASSERT(!my_root, NULL);
160 template<ets_key_usage_type ETS_key_type>
161 void ets_base<ETS_key_type>::table_clear() {
162 while( array* r = my_root ) {
169 template<ets_key_usage_type ETS_key_type>
170 void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
171 const key_type k = key_of_current_thread();
173 __TBB_ASSERT(k!=0,NULL);
176 for( array* r=my_root; r; r=r->next ) {
177 size_t mask=r->mask();
178 for(size_t i = r->start(h); ;i=(i+1)&mask) {
180 if( s.empty() ) break;
183 // Success at top level
187 // Success at some other level. Need to insert at top level.
195 // Key does not yet exist
197 found = create_local();
199 size_t c = ++my_count;
201 if( !r || c>r->size()/2 ) {
202 size_t s = r ? r->lg_size : 2;
203 while( c>size_t(1)<<(s-1) ) ++s;
204 array* a = allocate(s);
207 array* new_r = my_root.compare_and_swap(a,r);
208 if( new_r==r ) break;
209 if( new_r->lg_size>=s ) {
210 // Another thread inserted an equal or bigger array, so our array is superfluous.
219 // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it.
221 size_t mask = ir->mask();
222 for(size_t i = ir->start(h);;i=(i+1)&mask) {
233 //! Specialization that exploits native TLS
235 class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
236 typedef ets_base<ets_no_key> super;
238 typedef DWORD tls_key_t;
239 void create_key() { my_key = TlsAlloc(); }
240 void destroy_key() { TlsFree(my_key); }
241 void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
242 void* get_tls() { return (void *)TlsGetValue(my_key); }
244 typedef pthread_key_t tls_key_t;
245 void create_key() { pthread_key_create(&my_key, NULL); }
246 void destroy_key() { pthread_key_delete(my_key); }
247 void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
248 void* get_tls() const { return pthread_getspecific(my_key); }
251 virtual void* create_local() = 0;
252 virtual void* create_array(size_t _size) = 0; // _size in bytes
253 virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
255 ets_base() {create_key();}
256 ~ets_base() {destroy_key();}
257 void* table_lookup( bool& exists ) {
258 void* found = get_tls();
262 found = super::table_lookup(exists);
270 super::table_clear();
274 //! Random access iterator for traversing the thread local copies.
275 template< typename Container, typename Value >
276 class enumerable_thread_specific_iterator
277 #if defined(_WIN64) && defined(_MSC_VER)
278 // Ensure that Microsoft's internal template function _Val_type works correctly.
279 : public std::iterator<std::random_access_iterator_tag,Value>
280 #endif /* defined(_WIN64) && defined(_MSC_VER) */
282 //! current position in the concurrent_vector
284 Container *my_container;
285 typename Container::size_type my_index;
286 mutable Value *my_value;
288 template<typename C, typename T>
289 friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset,
290 const enumerable_thread_specific_iterator<C,T>& v );
292 template<typename C, typename T, typename U>
293 friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
294 const enumerable_thread_specific_iterator<C,U>& j );
296 template<typename C, typename T, typename U>
297 friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
298 const enumerable_thread_specific_iterator<C,U>& j );
300 template<typename C, typename T, typename U>
301 friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
303 template<typename C, typename U>
304 friend class enumerable_thread_specific_iterator;
308 enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
309 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
311 //! Default constructor
312 enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
315 enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
316 my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
318 enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
319 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
322 enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
328 enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
329 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
332 enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
338 Value& operator*() const {
339 Value* value = my_value;
341 value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
343 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
347 Value& operator[]( ptrdiff_t k ) const {
348 return (*my_container)[my_index + k].value;
351 Value* operator->() const {return &operator*();}
353 enumerable_thread_specific_iterator& operator++() {
359 enumerable_thread_specific_iterator& operator--() {
366 enumerable_thread_specific_iterator operator++(int) {
367 enumerable_thread_specific_iterator result = *this;
374 enumerable_thread_specific_iterator operator--(int) {
375 enumerable_thread_specific_iterator result = *this;
382 typedef ptrdiff_t difference_type;
383 typedef Value value_type;
384 typedef Value* pointer;
385 typedef Value& reference;
386 typedef std::random_access_iterator_tag iterator_category;
389 template<typename Container, typename T>
390 enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset,
391 const enumerable_thread_specific_iterator<Container,T>& v ) {
392 return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
395 template<typename Container, typename T, typename U>
396 bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
397 const enumerable_thread_specific_iterator<Container,U>& j ) {
398 return i.my_index==j.my_index && i.my_container == j.my_container;
401 template<typename Container, typename T, typename U>
402 bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
403 const enumerable_thread_specific_iterator<Container,U>& j ) {
407 template<typename Container, typename T, typename U>
408 bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
409 const enumerable_thread_specific_iterator<Container,U>& j ) {
410 return i.my_index<j.my_index;
413 template<typename Container, typename T, typename U>
414 bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
415 const enumerable_thread_specific_iterator<Container,U>& j ) {
419 template<typename Container, typename T, typename U>
420 bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
421 const enumerable_thread_specific_iterator<Container,U>& j ) {
425 template<typename Container, typename T, typename U>
426 bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
427 const enumerable_thread_specific_iterator<Container,U>& j ) {
431 template<typename Container, typename T, typename U>
432 ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
433 const enumerable_thread_specific_iterator<Container,U>& j ) {
434 return i.my_index-j.my_index;
437 template<typename SegmentedContainer, typename Value >
438 class segmented_iterator
439 #if defined(_WIN64) && defined(_MSC_VER)
440 : public std::iterator<std::input_iterator_tag, Value>
443 template<typename C, typename T, typename U>
444 friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
446 template<typename C, typename T, typename U>
447 friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
449 template<typename C, typename U>
450 friend class segmented_iterator;
454 segmented_iterator() {my_segcont = NULL;}
456 segmented_iterator( const SegmentedContainer& _segmented_container ) :
457 my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
458 outer_iter(my_segcont->end()) { }
460 ~segmented_iterator() {}
462 typedef typename SegmentedContainer::iterator outer_iterator;
463 typedef typename SegmentedContainer::value_type InnerContainer;
464 typedef typename InnerContainer::iterator inner_iterator;
467 typedef ptrdiff_t difference_type;
468 typedef Value value_type;
469 typedef typename SegmentedContainer::size_type size_type;
470 typedef Value* pointer;
471 typedef Value& reference;
472 typedef std::input_iterator_tag iterator_category;
476 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
477 my_segcont(other.my_segcont),
478 outer_iter(other.outer_iter),
479 // can we assign a default-constructed iterator to inner if we're at the end?
480 inner_iter(other.inner_iter)
485 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
487 my_segcont = other.my_segcont;
488 outer_iter = other.outer_iter;
489 if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
494 // allow assignment of outer iterator to segmented iterator. Once it is
495 // assigned, move forward until a non-empty inner container is found or
496 // the end of the outer container is reached.
497 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
498 __TBB_ASSERT(my_segcont != NULL, NULL);
499 // check that this iterator points to something inside the segmented container
500 for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
501 if( !outer_iter->empty() ) {
502 inner_iter = outer_iter->begin();
510 segmented_iterator& operator++() {
516 segmented_iterator operator++(int) {
517 segmented_iterator tmp = *this;
522 bool operator==(const outer_iterator& other_outer) const {
523 __TBB_ASSERT(my_segcont != NULL, NULL);
524 return (outer_iter == other_outer &&
525 (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
528 bool operator!=(const outer_iterator& other_outer) const {
529 return !operator==(other_outer);
534 reference operator*() const {
535 __TBB_ASSERT(my_segcont != NULL, NULL);
536 __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
537 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
542 pointer operator->() const { return &operator*();}
545 SegmentedContainer* my_segcont;
546 outer_iterator outer_iter;
547 inner_iterator inner_iter;
550 __TBB_ASSERT(my_segcont != NULL, NULL);
551 __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
552 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
554 while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
555 inner_iter = outer_iter->begin();
558 }; // segmented_iterator
560 template<typename SegmentedContainer, typename T, typename U>
561 bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
562 const segmented_iterator<SegmentedContainer,U>& j ) {
563 if(i.my_segcont != j.my_segcont) return false;
564 if(i.my_segcont == NULL) return true;
565 if(i.outer_iter != j.outer_iter) return false;
566 if(i.outer_iter == i.my_segcont->end()) return true;
567 return i.inner_iter == j.inner_iter;
571 template<typename SegmentedContainer, typename T, typename U>
572 bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
573 const segmented_iterator<SegmentedContainer,U>& j ) {
578 struct destruct_only: tbb::internal::no_copy {
579 tbb::aligned_space<T,1> value;
580 ~destruct_only() {value.begin()[0].~T();}
584 struct construct_by_default: tbb::internal::no_assign {
585 void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
586 construct_by_default( int ) {}
590 struct construct_by_exemplar: tbb::internal::no_assign {
592 void construct(void*where) {new(where) T(exemplar);}
593 construct_by_exemplar( const T& t ) : exemplar(t) {}
596 template<typename T, typename Finit>
597 struct construct_by_finit: tbb::internal::no_assign {
599 void construct(void* where) {new(where) T(f());}
600 construct_by_finit( const Finit& f_ ) : f(f_) {}
603 // storage for initialization function pointer
605 class callback_base {
608 virtual callback_base* clone() = 0;
609 // Destruct and free *this
610 virtual void destroy() = 0;
611 // Need virtual destructor to satisfy GCC compiler warning
612 virtual ~callback_base() { }
613 // Construct T at where
614 virtual void construct(void* where) = 0;
617 template <typename T, typename Constructor>
618 class callback_leaf: public callback_base<T>, Constructor {
619 template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
621 typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
623 /*override*/ callback_base<T>* clone() {
624 void* where = my_allocator_type().allocate(1);
625 return new(where) callback_leaf(*this);
628 /*override*/ void destroy() {
629 my_allocator_type().destroy(this);
630 my_allocator_type().deallocate(this,1);
633 /*override*/ void construct(void* where) {
634 Constructor::construct(where);
638 static callback_base<T>* make( const X& x ) {
639 void* where = my_allocator_type().allocate(1);
640 return new(where) callback_leaf(x);
644 //! Template for adding padding in order to avoid false sharing
645 /** ModularSize should be sizeof(U) modulo the cache line size.
646 All maintenance of the space will be done explicitly on push_back,
647 and all thread local copies must be destroyed before the concurrent
650 template<typename U, size_t ModularSize>
652 char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
654 tbb::internal::punned_cast<U*>(&value)->~U();
658 } // namespace internal
661 //! The enumerable_thread_specific container
662 /** enumerable_thread_specific has the following properties:
663 - thread-local copies are lazily created, with default, exemplar or function initialization.
664 - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant.
665 - the contained objects need not have operator=() defined if combine is not used.
666 - enumerable_thread_specific containers may be copy-constructed or assigned.
667 - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed.
668 - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods
670 @par Segmented iterator
671 When the thread-local objects are containers with input_iterators defined, a segmented iterator may
672 be used to iterate over all the elements of all thread-local copies.
674 @par combine and combine_each
675 - Both methods are defined for enumerable_thread_specific.
676 - combine() requires the the type T have operator=() defined.
677 - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.)
678 - Both are evaluated in serial context (the methods are assumed to be non-benign.)
680 @ingroup containers */
681 template <typename T,
682 typename Allocator=cache_aligned_allocator<T>,
683 ets_key_usage_type ETS_key_type=ets_no_key >
684 class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
686 template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
688 typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
690 //! A generic range, used to create range objects from the iterators
692 class generic_range_type: public blocked_range<I> {
694 typedef T value_type;
695 typedef T& reference;
696 typedef const T& const_reference;
698 typedef ptrdiff_t difference_type;
699 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
701 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
702 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
705 typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
706 typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
708 internal::callback_base<T> *my_construct_callback;
710 internal_collection_type my_locals;
712 /*override*/ void* create_local() {
714 void* lref = &my_locals[my_locals.push_back(padded_element())];
716 void* lref = &*my_locals.push_back(padded_element());
718 my_construct_callback->construct(lref);
722 void unconstruct_locals() {
723 for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
728 typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
731 /*override*/ void* create_array(size_t _size) {
732 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
733 return array_allocator_type().allocate(nelements);
736 /*override*/ void free_array( void* _ptr, size_t _size) {
737 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
738 array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
744 typedef Allocator allocator_type;
745 typedef T value_type;
746 typedef T& reference;
747 typedef const T& const_reference;
749 typedef const T* const_pointer;
750 typedef typename internal_collection_type::size_type size_type;
751 typedef typename internal_collection_type::difference_type difference_type;
754 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
755 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
757 // Parallel range types
758 typedef generic_range_type< iterator > range_type;
759 typedef generic_range_type< const_iterator > const_range_type;
761 //! Default constructor. Each local instance of T is default constructed.
762 enumerable_thread_specific() :
763 my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0) )
766 //! Constructor with initializer functor. Each local instance of T is constructed by T(finit()).
767 template <typename Finit>
768 enumerable_thread_specific( Finit finit ) :
769 my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) )
772 //! Constuctor with exemplar. Each local instance of T is copied-constructed from the exemplar.
773 enumerable_thread_specific(const T& exemplar) :
774 my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )
778 ~enumerable_thread_specific() {
779 my_construct_callback->destroy();
780 this->clear(); // deallocation before the derived class is finished destructing
781 // So free(array *) is still accessible
784 //! returns reference to local, discarding exists
787 return local(exists);
790 //! Returns reference to calling thread's local copy, creating one if necessary
791 reference local(bool& exists) {
792 void* ptr = this->table_lookup(exists);
796 //! Get the number of local copies
797 size_type size() const { return my_locals.size(); }
799 //! true if there have been no local copies created
800 bool empty() const { return my_locals.empty(); }
803 iterator begin() { return iterator( my_locals, 0 ); }
805 iterator end() { return iterator(my_locals, my_locals.size() ); }
807 //! begin const iterator
808 const_iterator begin() const { return const_iterator(my_locals, 0); }
810 //! end const iterator
811 const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
813 //! Get range for parallel algorithms
814 range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
816 //! Get const range for parallel algorithms
817 const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
819 //! Destroys local copies
821 unconstruct_locals();
824 // callback is not destroyed
825 // exemplar is not destroyed
830 template<typename U, typename A2, ets_key_usage_type C2>
831 void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
835 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
836 enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
838 internal_copy(other);
841 enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
843 internal_copy(other);
848 template<typename U, typename A2, ets_key_usage_type C2>
849 enumerable_thread_specific &
850 internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
851 if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
853 my_construct_callback->destroy();
854 my_construct_callback = 0;
855 internal_copy( other );
863 enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
864 return internal_assign(other);
867 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
868 enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
870 return internal_assign(other);
873 // combine_func_t has signature T(T,T) or T(const T&, const T&)
874 template <typename combine_func_t>
875 T combine(combine_func_t f_combine) {
876 if(begin() == end()) {
877 internal::destruct_only<T> location;
878 my_construct_callback->construct(location.value.begin());
879 return *location.value.begin();
881 const_iterator ci = begin();
884 my_result = f_combine( my_result, *ci );
888 // combine_func_t has signature void(T) or void(const T&)
889 template <typename combine_func_t>
890 void combine_each(combine_func_t f_combine) {
891 for(const_iterator ci = begin(); ci != end(); ++ci) {
896 }; // enumerable_thread_specific
898 template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
899 template<typename U, typename A2, ets_key_usage_type C2>
900 void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
901 // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
902 my_construct_callback = other.my_construct_callback->clone();
904 typedef internal::ets_base<ets_no_key> base;
905 __TBB_ASSERT(my_locals.size()==0,NULL);
906 this->table_reserve_for_copy( other );
907 for( base::array* r=other.my_root; r; r=r->next ) {
908 for( size_t i=0; i<r->size(); ++i ) {
909 base::slot& s1 = r->at(i);
911 base::slot& s2 = this->table_find(s1.key);
914 void* lref = &my_locals[my_locals.push_back(padded_element())];
916 void* lref = &*my_locals.push_back(padded_element());
918 s2.ptr = new(lref) T(*(U*)s1.ptr);
921 // Skip the duplicate
928 template< typename Container >
931 // This intermediate typedef is to address issues with VC7.1 compilers
932 typedef typename Container::value_type conval_type;
937 typedef typename conval_type::size_type size_type;
938 typedef typename conval_type::difference_type difference_type;
939 typedef typename conval_type::allocator_type allocator_type;
940 typedef typename conval_type::value_type value_type;
941 typedef typename conval_type::reference reference;
942 typedef typename conval_type::const_reference const_reference;
943 typedef typename conval_type::pointer pointer;
944 typedef typename conval_type::const_pointer const_pointer;
946 typedef typename internal::segmented_iterator<Container, value_type> iterator;
947 typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
949 flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
950 my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
952 flattened2d( const Container &c ) :
953 my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
955 iterator begin() { return iterator(*my_container) = my_begin; }
956 iterator end() { return iterator(*my_container) = my_end; }
957 const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
958 const_iterator end() const { return const_iterator(*my_container) = my_end; }
960 size_type size() const {
961 size_type tot_size = 0;
962 for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
963 tot_size += i->size();
970 Container *my_container;
971 typename Container::const_iterator my_begin;
972 typename Container::const_iterator my_end;
976 template <typename Container>
977 flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
978 return flattened2d<Container>(c, b, e);
981 template <typename Container>
982 flattened2d<Container> flatten2d(const Container &c) {
983 return flattened2d<Container>(c);
989 using interface6::internal::segmented_iterator;
992 using interface6::enumerable_thread_specific;
993 using interface6::flattened2d;
994 using interface6::flatten2d;