2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_enumerable_thread_specific_H
30 #define __TBB_enumerable_thread_specific_H
32 #include "concurrent_vector.h"
33 #include "tbb_thread.h"
34 #include "tbb_allocator.h"
35 #include "cache_aligned_allocator.h"
36 #include "aligned_space.h"
37 #include <string.h> // for memcpy
40 #include "machine/windows_api.h"
47 //! enum for selecting between single key and key-per-instance versions
48 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
50 namespace interface6 {
55 template<ets_key_usage_type ETS_key_type>
56 class ets_base: tbb::internal::no_copy {
59 typedef DWORD key_type;
61 typedef pthread_t key_type;
63 #if __TBB_GCC_3_3_PROTECTED_BROKEN
71 slot& at( size_t k ) {
72 return ((slot*)(void*)(this+1))[k];
74 size_t size() const {return (size_t)1<<lg_size;}
75 size_t mask() const {return size()-1;}
76 size_t start( size_t h ) const {
77 return h>>(8*sizeof(size_t)-lg_size);
83 bool empty() const {return !key;}
84 bool match( key_type k ) const {return key==k;}
85 bool claim( key_type k ) {
86 __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
87 return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
90 #if __TBB_GCC_3_3_PROTECTED_BROKEN
94 static key_type key_of_current_thread() {
95 tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
97 memcpy( &k, &id, sizeof(k) );
101 //! Root of linked list of arrays of decreasing size.
102 /** NULL if and only if my_count==0.
103 Each array in the list is half the size of its predecessor. */
104 atomic<array*> my_root;
105 atomic<size_t> my_count;
106 virtual void* create_local() = 0;
107 virtual void* create_array(size_t _size) = 0; // _size in bytes
108 virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
109 array* allocate( size_t lg_size ) {
110 size_t n = 1<<lg_size;
111 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
112 a->lg_size = lg_size;
113 std::memset( a+1, 0, n*sizeof(slot) );
116 void free(array* a) {
117 size_t n = 1<<(a->lg_size);
118 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
120 static size_t hash( key_type k ) {
121 // Multiplicative hashing. Client should use *upper* bits.
122 // casts required for Mac gcc4.* compiler
123 #if __TBB_WORDSIZE == 4
124 return uintptr_t(k)*0x9E3779B9;
126 return uintptr_t(k)*0x9E3779B97F4A7C15;
130 ets_base() {my_root=NULL; my_count=0;}
131 virtual ~ets_base(); // g++ complains if this is not virtual...
132 void* table_lookup( bool& exists );
134 slot& table_find( key_type k ) {
137 size_t mask = r->mask();
138 for(size_t i = r->start(h);;i=(i+1)&mask) {
140 if( s.empty() || s.match(k) )
144 void table_reserve_for_copy( const ets_base& other ) {
145 __TBB_ASSERT(!my_root,NULL);
146 __TBB_ASSERT(!my_count,NULL);
147 if( other.my_root ) {
148 array* a = allocate(other.my_root->lg_size);
151 my_count = other.my_count;
156 template<ets_key_usage_type ETS_key_type>
157 ets_base<ETS_key_type>::~ets_base() {
158 __TBB_ASSERT(!my_root, NULL);
161 template<ets_key_usage_type ETS_key_type>
162 void ets_base<ETS_key_type>::table_clear() {
163 while( array* r = my_root ) {
170 template<ets_key_usage_type ETS_key_type>
171 void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
172 const key_type k = key_of_current_thread();
174 __TBB_ASSERT(k!=0,NULL);
177 for( array* r=my_root; r; r=r->next ) {
178 size_t mask=r->mask();
179 for(size_t i = r->start(h); ;i=(i+1)&mask) {
181 if( s.empty() ) break;
184 // Success at top level
188 // Success at some other level. Need to insert at top level.
196 // Key does not yet exist
198 found = create_local();
200 size_t c = ++my_count;
202 if( !r || c>r->size()/2 ) {
203 size_t s = r ? r->lg_size : 2;
204 while( c>size_t(1)<<(s-1) ) ++s;
205 array* a = allocate(s);
208 array* new_r = my_root.compare_and_swap(a,r);
209 if( new_r==r ) break;
210 if( new_r->lg_size>=s ) {
211 // Another thread inserted an equal or bigger array, so our array is superfluous.
220 // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it.
222 size_t mask = ir->mask();
223 for(size_t i = ir->start(h);;i=(i+1)&mask) {
234 //! Specialization that exploits native TLS
236 class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
237 typedef ets_base<ets_no_key> super;
239 typedef DWORD tls_key_t;
240 void create_key() { my_key = TlsAlloc(); }
241 void destroy_key() { TlsFree(my_key); }
242 void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
243 void* get_tls() { return (void *)TlsGetValue(my_key); }
245 typedef pthread_key_t tls_key_t;
246 void create_key() { pthread_key_create(&my_key, NULL); }
247 void destroy_key() { pthread_key_delete(my_key); }
248 void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
249 void* get_tls() const { return pthread_getspecific(my_key); }
252 virtual void* create_local() = 0;
253 virtual void* create_array(size_t _size) = 0; // _size in bytes
254 virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
256 ets_base() {create_key();}
257 ~ets_base() {destroy_key();}
258 void* table_lookup( bool& exists ) {
259 void* found = get_tls();
263 found = super::table_lookup(exists);
271 super::table_clear();
275 //! Random access iterator for traversing the thread local copies.
276 template< typename Container, typename Value >
277 class enumerable_thread_specific_iterator
278 #if defined(_WIN64) && defined(_MSC_VER)
279 // Ensure that Microsoft's internal template function _Val_type works correctly.
280 : public std::iterator<std::random_access_iterator_tag,Value>
281 #endif /* defined(_WIN64) && defined(_MSC_VER) */
283 //! current position in the concurrent_vector
285 Container *my_container;
286 typename Container::size_type my_index;
287 mutable Value *my_value;
289 template<typename C, typename T>
290 friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset,
291 const enumerable_thread_specific_iterator<C,T>& v );
293 template<typename C, typename T, typename U>
294 friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
295 const enumerable_thread_specific_iterator<C,U>& j );
297 template<typename C, typename T, typename U>
298 friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
299 const enumerable_thread_specific_iterator<C,U>& j );
301 template<typename C, typename T, typename U>
302 friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
304 template<typename C, typename U>
305 friend class enumerable_thread_specific_iterator;
309 enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
310 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
312 //! Default constructor
313 enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
316 enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
317 my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
319 enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
320 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
323 enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
329 enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
330 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
333 enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
339 Value& operator*() const {
340 Value* value = my_value;
342 value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
344 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
348 Value& operator[]( ptrdiff_t k ) const {
349 return (*my_container)[my_index + k].value;
352 Value* operator->() const {return &operator*();}
354 enumerable_thread_specific_iterator& operator++() {
360 enumerable_thread_specific_iterator& operator--() {
367 enumerable_thread_specific_iterator operator++(int) {
368 enumerable_thread_specific_iterator result = *this;
375 enumerable_thread_specific_iterator operator--(int) {
376 enumerable_thread_specific_iterator result = *this;
383 typedef ptrdiff_t difference_type;
384 typedef Value value_type;
385 typedef Value* pointer;
386 typedef Value& reference;
387 typedef std::random_access_iterator_tag iterator_category;
390 template<typename Container, typename T>
391 enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset,
392 const enumerable_thread_specific_iterator<Container,T>& v ) {
393 return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
396 template<typename Container, typename T, typename U>
397 bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
398 const enumerable_thread_specific_iterator<Container,U>& j ) {
399 return i.my_index==j.my_index && i.my_container == j.my_container;
402 template<typename Container, typename T, typename U>
403 bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
404 const enumerable_thread_specific_iterator<Container,U>& j ) {
408 template<typename Container, typename T, typename U>
409 bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
410 const enumerable_thread_specific_iterator<Container,U>& j ) {
411 return i.my_index<j.my_index;
414 template<typename Container, typename T, typename U>
415 bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
416 const enumerable_thread_specific_iterator<Container,U>& j ) {
420 template<typename Container, typename T, typename U>
421 bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
422 const enumerable_thread_specific_iterator<Container,U>& j ) {
426 template<typename Container, typename T, typename U>
427 bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
428 const enumerable_thread_specific_iterator<Container,U>& j ) {
432 template<typename Container, typename T, typename U>
433 ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
434 const enumerable_thread_specific_iterator<Container,U>& j ) {
435 return i.my_index-j.my_index;
438 template<typename SegmentedContainer, typename Value >
439 class segmented_iterator
440 #if defined(_WIN64) && defined(_MSC_VER)
441 : public std::iterator<std::input_iterator_tag, Value>
444 template<typename C, typename T, typename U>
445 friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
447 template<typename C, typename T, typename U>
448 friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
450 template<typename C, typename U>
451 friend class segmented_iterator;
455 segmented_iterator() {my_segcont = NULL;}
457 segmented_iterator( const SegmentedContainer& _segmented_container ) :
458 my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
459 outer_iter(my_segcont->end()) { }
461 ~segmented_iterator() {}
463 typedef typename SegmentedContainer::iterator outer_iterator;
464 typedef typename SegmentedContainer::value_type InnerContainer;
465 typedef typename InnerContainer::iterator inner_iterator;
468 typedef ptrdiff_t difference_type;
469 typedef Value value_type;
470 typedef typename SegmentedContainer::size_type size_type;
471 typedef Value* pointer;
472 typedef Value& reference;
473 typedef std::input_iterator_tag iterator_category;
477 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
478 my_segcont(other.my_segcont),
479 outer_iter(other.outer_iter),
480 // can we assign a default-constructed iterator to inner if we're at the end?
481 inner_iter(other.inner_iter)
486 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
488 my_segcont = other.my_segcont;
489 outer_iter = other.outer_iter;
490 if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
495 // allow assignment of outer iterator to segmented iterator. Once it is
496 // assigned, move forward until a non-empty inner container is found or
497 // the end of the outer container is reached.
498 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
499 __TBB_ASSERT(my_segcont != NULL, NULL);
500 // check that this iterator points to something inside the segmented container
501 for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
502 if( !outer_iter->empty() ) {
503 inner_iter = outer_iter->begin();
511 segmented_iterator& operator++() {
517 segmented_iterator operator++(int) {
518 segmented_iterator tmp = *this;
523 bool operator==(const outer_iterator& other_outer) const {
524 __TBB_ASSERT(my_segcont != NULL, NULL);
525 return (outer_iter == other_outer &&
526 (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
529 bool operator!=(const outer_iterator& other_outer) const {
530 return !operator==(other_outer);
535 reference operator*() const {
536 __TBB_ASSERT(my_segcont != NULL, NULL);
537 __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
538 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
543 pointer operator->() const { return &operator*();}
546 SegmentedContainer* my_segcont;
547 outer_iterator outer_iter;
548 inner_iterator inner_iter;
551 __TBB_ASSERT(my_segcont != NULL, NULL);
552 __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
553 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
555 while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
556 inner_iter = outer_iter->begin();
559 }; // segmented_iterator
561 template<typename SegmentedContainer, typename T, typename U>
562 bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
563 const segmented_iterator<SegmentedContainer,U>& j ) {
564 if(i.my_segcont != j.my_segcont) return false;
565 if(i.my_segcont == NULL) return true;
566 if(i.outer_iter != j.outer_iter) return false;
567 if(i.outer_iter == i.my_segcont->end()) return true;
568 return i.inner_iter == j.inner_iter;
572 template<typename SegmentedContainer, typename T, typename U>
573 bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
574 const segmented_iterator<SegmentedContainer,U>& j ) {
579 struct destruct_only: tbb::internal::no_copy {
580 tbb::aligned_space<T,1> value;
581 ~destruct_only() {value.begin()[0].~T();}
585 struct construct_by_default: tbb::internal::no_assign {
586 void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
587 construct_by_default( int ) {}
591 struct construct_by_exemplar: tbb::internal::no_assign {
593 void construct(void*where) {new(where) T(exemplar);}
594 construct_by_exemplar( const T& t ) : exemplar(t) {}
597 template<typename T, typename Finit>
598 struct construct_by_finit: tbb::internal::no_assign {
600 void construct(void* where) {new(where) T(f());}
601 construct_by_finit( const Finit& f_ ) : f(f_) {}
604 // storage for initialization function pointer
606 class callback_base {
609 virtual callback_base* clone() = 0;
610 // Destruct and free *this
611 virtual void destroy() = 0;
612 // Need virtual destructor to satisfy GCC compiler warning
613 virtual ~callback_base() { }
614 // Construct T at where
615 virtual void construct(void* where) = 0;
618 template <typename T, typename Constructor>
619 class callback_leaf: public callback_base<T>, Constructor {
620 template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
622 typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
624 /*override*/ callback_base<T>* clone() {
625 void* where = my_allocator_type().allocate(1);
626 return new(where) callback_leaf(*this);
629 /*override*/ void destroy() {
630 my_allocator_type().destroy(this);
631 my_allocator_type().deallocate(this,1);
634 /*override*/ void construct(void* where) {
635 Constructor::construct(where);
639 static callback_base<T>* make( const X& x ) {
640 void* where = my_allocator_type().allocate(1);
641 return new(where) callback_leaf(x);
645 //! Template for adding padding in order to avoid false sharing
646 /** ModularSize should be sizeof(U) modulo the cache line size.
647 All maintenance of the space will be done explicitly on push_back,
648 and all thread local copies must be destroyed before the concurrent
651 template<typename U, size_t ModularSize>
653 char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
655 tbb::internal::punned_cast<U*>(&value)->~U();
659 } // namespace internal
662 //! The enumerable_thread_specific container
663 /** enumerable_thread_specific has the following properties:
664 - thread-local copies are lazily created, with default, exemplar or function initialization.
665 - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant.
666 - the contained objects need not have operator=() defined if combine is not used.
667 - enumerable_thread_specific containers may be copy-constructed or assigned.
668 - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed.
669 - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods
671 @par Segmented iterator
672 When the thread-local objects are containers with input_iterators defined, a segmented iterator may
673 be used to iterate over all the elements of all thread-local copies.
675 @par combine and combine_each
676 - Both methods are defined for enumerable_thread_specific.
677 - combine() requires the the type T have operator=() defined.
678 - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.)
679 - Both are evaluated in serial context (the methods are assumed to be non-benign.)
681 @ingroup containers */
682 template <typename T,
683 typename Allocator=cache_aligned_allocator<T>,
684 ets_key_usage_type ETS_key_type=ets_no_key >
685 class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
687 template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
689 typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
691 //! A generic range, used to create range objects from the iterators
693 class generic_range_type: public blocked_range<I> {
695 typedef T value_type;
696 typedef T& reference;
697 typedef const T& const_reference;
699 typedef ptrdiff_t difference_type;
700 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
702 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
703 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
706 typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
707 typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
709 internal::callback_base<T> *my_construct_callback;
711 internal_collection_type my_locals;
713 /*override*/ void* create_local() {
715 void* lref = &my_locals[my_locals.push_back(padded_element())];
717 void* lref = &*my_locals.push_back(padded_element());
719 my_construct_callback->construct(lref);
723 void unconstruct_locals() {
724 for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
729 typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
732 /*override*/ void* create_array(size_t _size) {
733 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
734 return array_allocator_type().allocate(nelements);
737 /*override*/ void free_array( void* _ptr, size_t _size) {
738 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
739 array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
745 typedef Allocator allocator_type;
746 typedef T value_type;
747 typedef T& reference;
748 typedef const T& const_reference;
750 typedef const T* const_pointer;
751 typedef typename internal_collection_type::size_type size_type;
752 typedef typename internal_collection_type::difference_type difference_type;
755 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
756 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
758 // Parallel range types
759 typedef generic_range_type< iterator > range_type;
760 typedef generic_range_type< const_iterator > const_range_type;
762 //! Default constructor. Each local instance of T is default constructed.
763 enumerable_thread_specific() :
764 my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0) )
767 //! Constructor with initializer functor. Each local instance of T is constructed by T(finit()).
768 template <typename Finit>
769 enumerable_thread_specific( Finit finit ) :
770 my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) )
773 //! Constuctor with exemplar. Each local instance of T is copied-constructed from the exemplar.
774 enumerable_thread_specific(const T& exemplar) :
775 my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )
779 ~enumerable_thread_specific() {
780 my_construct_callback->destroy();
781 this->clear(); // deallocation before the derived class is finished destructing
782 // So free(array *) is still accessible
785 //! returns reference to local, discarding exists
788 return local(exists);
791 //! Returns reference to calling thread's local copy, creating one if necessary
792 reference local(bool& exists) {
793 void* ptr = this->table_lookup(exists);
797 //! Get the number of local copies
798 size_type size() const { return my_locals.size(); }
800 //! true if there have been no local copies created
801 bool empty() const { return my_locals.empty(); }
804 iterator begin() { return iterator( my_locals, 0 ); }
806 iterator end() { return iterator(my_locals, my_locals.size() ); }
808 //! begin const iterator
809 const_iterator begin() const { return const_iterator(my_locals, 0); }
811 //! end const iterator
812 const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
814 //! Get range for parallel algorithms
815 range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
817 //! Get const range for parallel algorithms
818 const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
820 //! Destroys local copies
822 unconstruct_locals();
825 // callback is not destroyed
826 // exemplar is not destroyed
831 template<typename U, typename A2, ets_key_usage_type C2>
832 void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
836 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
837 enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
839 internal_copy(other);
842 enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
844 internal_copy(other);
849 template<typename U, typename A2, ets_key_usage_type C2>
850 enumerable_thread_specific &
851 internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
852 if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
854 my_construct_callback->destroy();
855 my_construct_callback = 0;
856 internal_copy( other );
864 enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
865 return internal_assign(other);
868 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
869 enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
871 return internal_assign(other);
874 // combine_func_t has signature T(T,T) or T(const T&, const T&)
875 template <typename combine_func_t>
876 T combine(combine_func_t f_combine) {
877 if(begin() == end()) {
878 internal::destruct_only<T> location;
879 my_construct_callback->construct(location.value.begin());
880 return *location.value.begin();
882 const_iterator ci = begin();
885 my_result = f_combine( my_result, *ci );
889 // combine_func_t has signature void(T) or void(const T&)
890 template <typename combine_func_t>
891 void combine_each(combine_func_t f_combine) {
892 for(const_iterator ci = begin(); ci != end(); ++ci) {
897 }; // enumerable_thread_specific
899 template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
900 template<typename U, typename A2, ets_key_usage_type C2>
901 void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
902 // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
903 my_construct_callback = other.my_construct_callback->clone();
905 typedef internal::ets_base<ets_no_key> base;
906 __TBB_ASSERT(my_locals.size()==0,NULL);
907 this->table_reserve_for_copy( other );
908 for( base::array* r=other.my_root; r; r=r->next ) {
909 for( size_t i=0; i<r->size(); ++i ) {
910 base::slot& s1 = r->at(i);
912 base::slot& s2 = this->table_find(s1.key);
915 void* lref = &my_locals[my_locals.push_back(padded_element())];
917 void* lref = &*my_locals.push_back(padded_element());
919 s2.ptr = new(lref) T(*(U*)s1.ptr);
922 // Skip the duplicate
929 template< typename Container >
932 // This intermediate typedef is to address issues with VC7.1 compilers
933 typedef typename Container::value_type conval_type;
938 typedef typename conval_type::size_type size_type;
939 typedef typename conval_type::difference_type difference_type;
940 typedef typename conval_type::allocator_type allocator_type;
941 typedef typename conval_type::value_type value_type;
942 typedef typename conval_type::reference reference;
943 typedef typename conval_type::const_reference const_reference;
944 typedef typename conval_type::pointer pointer;
945 typedef typename conval_type::const_pointer const_pointer;
947 typedef typename internal::segmented_iterator<Container, value_type> iterator;
948 typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
950 flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
951 my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
953 flattened2d( const Container &c ) :
954 my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
956 iterator begin() { return iterator(*my_container) = my_begin; }
957 iterator end() { return iterator(*my_container) = my_end; }
958 const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
959 const_iterator end() const { return const_iterator(*my_container) = my_end; }
961 size_type size() const {
962 size_type tot_size = 0;
963 for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
964 tot_size += i->size();
971 Container *my_container;
972 typename Container::const_iterator my_begin;
973 typename Container::const_iterator my_end;
977 template <typename Container>
978 flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
979 return flattened2d<Container>(c, b, e);
982 template <typename Container>
983 flattened2d<Container> flatten2d(const Container &c) {
984 return flattened2d<Container>(c);
990 using interface6::internal::segmented_iterator;
993 using interface6::enumerable_thread_specific;
994 using interface6::flattened2d;
995 using interface6::flatten2d;