2 Copyright 2005-2010 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_concurrent_vector_H
30 #define __TBB_concurrent_vector_H
32 #include "tbb_stddef.h"
33 #include "tbb_exception.h"
35 #include "cache_aligned_allocator.h"
36 #include "blocked_range.h"
37 #include "tbb_machine.h"
40 #if !TBB_USE_EXCEPTIONS && _MSC_VER
41 // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
42 #pragma warning (push)
43 #pragma warning (disable: 4530)
49 #if !TBB_USE_EXCEPTIONS && _MSC_VER
53 #if _MSC_VER==1500 && !__INTEL_COMPILER
54 // VS2008/VC9 seems to have an issue; limits pull in math.h
55 #pragma warning( push )
56 #pragma warning( disable: 4985 )
58 #include <limits> /* std::numeric_limits */
59 #if _MSC_VER==1500 && !__INTEL_COMPILER
60 #pragma warning( pop )
63 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64)
64 // Workaround for overzealous compiler warnings in /Wp64 mode
65 #pragma warning (push)
66 #pragma warning (disable: 4267)
71 template<typename T, class A = cache_aligned_allocator<T> >
72 class concurrent_vector;
77 //! Bad allocation marker
78 static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
80 //! Routine that loads pointer from location pointed to by src without any fence, without causing ITT to report a race.
81 void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
83 //! Base class of concurrent vector implementation.
84 /** @ingroup containers */
85 class concurrent_vector_base_v3 {
88 // Basic types declarations
89 typedef size_t segment_index_t;
90 typedef size_t size_type;
92 // Using enumerations due to Mac linking problems of static const variables
95 default_initial_segments = 1, // 2 initial items
96 //! Number of slots for segment's pointers inside the class
97 pointers_per_short_table = 3, // to fit into 8 words of entire structure
98 pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
101 // Segment pointer. Can be zero-initialized
106 __TBB_ASSERT( array <= internal::vector_allocation_error_flag, "should have been freed by clear" );
108 #endif /* TBB_USE_ASSERT */
113 //! allocator function pointer
114 void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
116 //! count of segments in the first block
117 atomic<size_type> my_first_block;
119 //! Requested size of vector
120 atomic<size_type> my_early_size;
122 //! Pointer to the segments table
123 atomic<segment_t*> my_segment;
125 //! embedded storage of segment pointers
126 segment_t my_storage[pointers_per_short_table];
130 concurrent_vector_base_v3() {
132 my_first_block = 0; // here is not default_initial_segments
133 for( segment_index_t i = 0; i < pointers_per_short_table; i++)
134 my_storage[i].array = NULL;
135 my_segment = my_storage;
137 __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();
139 static segment_index_t segment_index_of( size_type index ) {
140 return segment_index_t( __TBB_Log2( index|1 ) );
143 static segment_index_t segment_base( segment_index_t k ) {
144 return (segment_index_t(1)<<k & ~segment_index_t(1));
147 static inline segment_index_t segment_base_index_of( segment_index_t &index ) {
148 segment_index_t k = segment_index_of( index );
149 index -= segment_base(k);
153 static size_type segment_size( segment_index_t k ) {
154 return segment_index_t(1)<<k; // fake value for k==0
157 //! An operation on an n-element array starting at begin.
158 typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
160 //! An operation on n-element destination array and n-element source array.
161 typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
163 //! Internal structure for compact()
164 struct internal_segments_table {
165 segment_index_t first_block;
166 void* table[pointers_per_long_table];
169 void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
170 size_type __TBB_EXPORTED_METHOD internal_capacity() const;
171 void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
172 size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
173 void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
174 segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
175 void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
176 void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );
177 void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,
178 internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
180 void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;
181 void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);
183 void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
184 internal_array_op1 destroy, internal_array_op2 init );
185 size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
187 //! Deprecated entry point for backwards compatibility to TBB 2.1.
188 void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
190 //! Private functionality
195 typedef concurrent_vector_base_v3 concurrent_vector_base;
197 //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/
198 /** Value is either the T or const T type of the container.
199 @ingroup containers */
200 template<typename Container, typename Value>
201 class vector_iterator
203 //! concurrent_vector over which we are iterating.
204 Container* my_vector;
206 //! Index into the vector
209 //! Caches my_vector->internal_subscript(my_index)
210 /** NULL if cached value is not available */
211 mutable Value* my_item;
213 template<typename C, typename T>
214 friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
216 template<typename C, typename T, typename U>
217 friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
219 template<typename C, typename T, typename U>
220 friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
222 template<typename C, typename T, typename U>
223 friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
225 template<typename C, typename U>
226 friend class internal::vector_iterator;
228 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
229 template<typename T, class A>
230 friend class tbb::concurrent_vector;
232 public: // workaround for MSVC
235 vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
236 my_vector(const_cast<Container*>(&vector)),
238 my_item(static_cast<Value*>(ptr))
242 //! Default constructor
243 vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
245 vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :
246 my_vector(other.my_vector),
247 my_index(other.my_index),
248 my_item(other.my_item)
251 vector_iterator operator+( ptrdiff_t offset ) const {
252 return vector_iterator( *my_vector, my_index+offset );
254 vector_iterator &operator+=( ptrdiff_t offset ) {
259 vector_iterator operator-( ptrdiff_t offset ) const {
260 return vector_iterator( *my_vector, my_index-offset );
262 vector_iterator &operator-=( ptrdiff_t offset ) {
267 Value& operator*() const {
268 Value* item = my_item;
270 item = my_item = &my_vector->internal_subscript(my_index);
272 __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
275 Value& operator[]( ptrdiff_t k ) const {
276 return my_vector->internal_subscript(my_index+k);
278 Value* operator->() const {return &operator*();}
281 vector_iterator& operator++() {
282 size_t k = ++my_index;
284 // Following test uses 2's-complement wizardry
285 if( (k& (k-2))==0 ) {
286 // k is a power of two that is at least k-2
296 vector_iterator& operator--() {
297 __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
298 size_t k = my_index--;
300 // Following test uses 2's-complement wizardry
301 if( (k& (k-2))==0 ) {
302 // k is a power of two that is at least k-2
312 vector_iterator operator++(int) {
313 vector_iterator result = *this;
319 vector_iterator operator--(int) {
320 vector_iterator result = *this;
327 typedef ptrdiff_t difference_type;
328 typedef Value value_type;
329 typedef Value* pointer;
330 typedef Value& reference;
331 typedef std::random_access_iterator_tag iterator_category;
334 template<typename Container, typename T>
335 vector_iterator<Container,T> operator+( ptrdiff_t offset, const vector_iterator<Container,T>& v ) {
336 return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
339 template<typename Container, typename T, typename U>
340 bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
341 return i.my_index==j.my_index && i.my_vector == j.my_vector;
344 template<typename Container, typename T, typename U>
345 bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
349 template<typename Container, typename T, typename U>
350 bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
351 return i.my_index<j.my_index;
354 template<typename Container, typename T, typename U>
355 bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
359 template<typename Container, typename T, typename U>
360 bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
364 template<typename Container, typename T, typename U>
365 bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
369 template<typename Container, typename T, typename U>
370 ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
371 return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
374 template<typename T, class A>
375 class allocator_base {
377 typedef typename A::template
378 rebind<T>::other allocator_type;
379 allocator_type my_allocator;
381 allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}
384 } // namespace internal
387 //! Concurrent vector container
388 /** concurrent_vector is a container having the following main properties:
389 - It provides random indexed access to its elements. The index of the first element is 0.
390 - It ensures safe concurrent growing its size (different threads can safely append new elements).
391 - Adding new elements does not invalidate existing iterators and does not change indices of existing items.
394 The class meets all Container Requirements and Reversible Container Requirements from
395 C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet
396 Sequence Requirements due to absence of insert() and erase() methods.
398 @par Exception Safety
399 Methods working with memory allocation and/or new elements construction can throw an
400 exception if allocator fails to allocate memory or element's default constructor throws one.
401 Concurrent vector's element of type T must conform to the following requirements:
402 - Throwing an exception is forbidden for destructor of T.
403 - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized.
405 Otherwise, the program's behavior is undefined.
407 If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation.
409 - There are no guaranties that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens.
410 - An invalid vector instance cannot be repaired; it is unable to grow anymore.
411 - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful.
412 - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown.
414 If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails.
417 Unlike an STL vector, a concurrent_vector does not move existing elements if it needs
418 to allocate more memory. The container is divided into a series of contiguous arrays of
419 elements. The first reservation, growth, or assignment operation determines the size of
420 the first array. Using small number of elements as initial size incurs fragmentation that
421 may increase element access time. Internal layout can be optimized by method compact() that
422 merges several smaller arrays into one solid.
424 @par Changes since TBB 2.1
425 - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated.
426 - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector
427 - Added resize() methods (not thread-safe)
428 - Added cbegin/cend/crbegin/crend methods
429 - Changed return type of methods grow* and push_back to iterator
431 @par Changes since TBB 2.0
432 - Implemented exception-safety guaranties
433 - Added template argument for allocator
434 - Added allocator argument in constructors
435 - Faster index calculation
436 - First growth call specifies a number of segments to be merged in the first allocation.
437 - Fixed memory blow up for swarm of vector's instances of small size
438 - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items.
439 - Added STL-like constructors.
440 - Added operators ==, < and derivatives
441 - Added at() method, approved for using after an exception was thrown inside the vector
442 - Added get_allocator() method.
443 - Added assign() methods
444 - Added compact() method to defragment first segments
445 - Added swap() method
446 - range() defaults on grainsize = 1 supporting auto grainsize algorithms.
448 @ingroup containers */
449 template<typename T, class A>
450 class concurrent_vector: protected internal::allocator_base<T, A>,
451 private internal::concurrent_vector_base {
454 class generic_range_type: public blocked_range<I> {
456 typedef T value_type;
457 typedef T& reference;
458 typedef const T& const_reference;
460 typedef ptrdiff_t difference_type;
461 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
463 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
464 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
467 template<typename C, typename U>
468 friend class internal::vector_iterator;
470 //------------------------------------------------------------------------
471 // STL compatible types
472 //------------------------------------------------------------------------
473 typedef internal::concurrent_vector_base_v3::size_type size_type;
474 typedef typename internal::allocator_base<T, A>::allocator_type allocator_type;
476 typedef T value_type;
477 typedef ptrdiff_t difference_type;
478 typedef T& reference;
479 typedef const T& const_reference;
481 typedef const T *const_pointer;
483 typedef internal::vector_iterator<concurrent_vector,T> iterator;
484 typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
486 #if !defined(_MSC_VER) || _CPPLIB_VER>=300
487 // Assume ISO standard definition of std::reverse_iterator
488 typedef std::reverse_iterator<iterator> reverse_iterator;
489 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
491 // Use non-standard std::reverse_iterator
492 typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
493 typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
494 #endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
496 //------------------------------------------------------------------------
497 // Parallel algorithm support
498 //------------------------------------------------------------------------
499 typedef generic_range_type<iterator> range_type;
500 typedef generic_range_type<const_iterator> const_range_type;
502 //------------------------------------------------------------------------
503 // STL compatible constructors & destructors
504 //------------------------------------------------------------------------
506 //! Construct empty vector.
507 explicit concurrent_vector(const allocator_type &a = allocator_type())
508 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
510 vector_allocator_ptr = &internal_allocator;
513 //! Copying constructor
514 concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )
515 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
517 vector_allocator_ptr = &internal_allocator;
519 internal_copy(vector, sizeof(T), ©_array);
521 segment_t *table = my_segment;
522 internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
527 //! Copying constructor for vector with different allocator type
529 concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )
530 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
532 vector_allocator_ptr = &internal_allocator;
534 internal_copy(vector.internal_vector_base(), sizeof(T), ©_array);
536 segment_t *table = my_segment;
537 internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
542 //! Construction with initial size specified by argument n
543 explicit concurrent_vector(size_type n)
545 vector_allocator_ptr = &internal_allocator;
547 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
549 segment_t *table = my_segment;
550 internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
555 //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance
556 concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())
557 : internal::allocator_base<T, A>(a)
559 vector_allocator_ptr = &internal_allocator;
561 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
563 segment_t *table = my_segment;
564 internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
569 //! Construction with copying iteration range and given allocator instance
571 concurrent_vector(I first, I last, const allocator_type &a = allocator_type())
572 : internal::allocator_base<T, A>(a)
574 vector_allocator_ptr = &internal_allocator;
576 internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
578 segment_t *table = my_segment;
579 internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
585 concurrent_vector& operator=( const concurrent_vector& vector ) {
586 if( this != &vector )
587 internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array);
591 //! Assignment for vector with different allocator type
593 concurrent_vector& operator=( const concurrent_vector<T, M>& vector ) {
594 if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
595 internal_assign(vector.internal_vector_base(),
596 sizeof(T), &destroy_array, &assign_array, ©_array);
600 //------------------------------------------------------------------------
601 // Concurrent operations
602 //------------------------------------------------------------------------
603 //! Grow by "delta" elements.
605 /** Returns old size. */
606 size_type grow_by( size_type delta ) {
607 return delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size;
610 /** Returns iterator pointing to the first new element. */
611 iterator grow_by( size_type delta ) {
612 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size);
616 //! Grow by "delta" elements using copying constuctor.
618 /** Returns old size. */
619 size_type grow_by( size_type delta, const_reference t ) {
620 return delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size;
623 /** Returns iterator pointing to the first new element. */
624 iterator grow_by( size_type delta, const_reference t ) {
625 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size);
629 //! Append minimal sequence of elements such that size()>=n.
631 /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated.
632 May return while other elements are being constructed by other threads. */
633 void grow_to_at_least( size_type n ) {
634 if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
637 /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated.
638 May return while other elements are being constructed by other threads.
639 Returns iterator that points to beginning of appended sequence.
640 If no elements were appended, returns iterator pointing to nth element. */
641 iterator grow_to_at_least( size_type n ) {
644 m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
647 return iterator(*this, m);
653 size_type push_back( const_reference item )
655 /** Returns iterator pointing to the new element. */
656 iterator push_back( const_reference item )
660 void *ptr = internal_push_back(sizeof(T),k);
661 internal_loop_guide loop(1, ptr);
666 return iterator(*this, k, ptr);
670 //! Get reference to element at given index.
671 /** This method is thread-safe for concurrent reads, and also while growing the vector,
672 as long as the calling thread has checked that index<size(). */
673 reference operator[]( size_type index ) {
674 return internal_subscript(index);
677 //! Get const reference to element at given index.
678 const_reference operator[]( size_type index ) const {
679 return internal_subscript(index);
682 //! Get reference to element at given index. Throws exceptions on errors.
683 reference at( size_type index ) {
684 return internal_subscript_with_exceptions(index);
687 //! Get const reference to element at given index. Throws exceptions on errors.
688 const_reference at( size_type index ) const {
689 return internal_subscript_with_exceptions(index);
692 //! Get range for iterating with parallel algorithms
693 range_type range( size_t grainsize = 1) {
694 return range_type( begin(), end(), grainsize );
697 //! Get const range for iterating with parallel algorithms
698 const_range_type range( size_t grainsize = 1 ) const {
699 return const_range_type( begin(), end(), grainsize );
701 //------------------------------------------------------------------------
703 //------------------------------------------------------------------------
704 //! Return size of vector. It may include elements under construction
705 size_type size() const {
706 size_type sz = my_early_size, cp = internal_capacity();
707 return cp < sz ? cp : sz;
710 //! Return true if vector is not empty or has elements under construction at least.
711 bool empty() const {return !my_early_size;}
713 //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value.
714 size_type capacity() const {return internal_capacity();}
716 //! Allocate enough space to grow to size n without having to allocate more memory later.
717 /** Like most of the methods provided for STL compatibility, this method is *not* thread safe.
718 The capacity afterwards may be bigger than the requested reservation. */
719 void reserve( size_type n ) {
721 internal_reserve(n, sizeof(T), max_size());
724 //! Resize the vector. Not thread-safe.
725 void resize( size_type n ) {
726 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
729 //! Resize the vector, copy t for new elements. Not thread-safe.
730 void resize( size_type n, const_reference t ) {
731 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
735 //! An alias for shrink_to_fit()
736 void compact() {shrink_to_fit();}
737 #endif /* TBB_DEPRECATED */
739 //! Optimize memory usage and fragmentation.
740 void shrink_to_fit();
742 //! Upper bound on argument to reserve.
743 size_type max_size() const {return (~size_type(0))/sizeof(T);}
745 //------------------------------------------------------------------------
747 //------------------------------------------------------------------------
750 iterator begin() {return iterator(*this,0);}
752 iterator end() {return iterator(*this,size());}
753 //! start const iterator
754 const_iterator begin() const {return const_iterator(*this,0);}
755 //! end const iterator
756 const_iterator end() const {return const_iterator(*this,size());}
757 //! start const iterator
758 const_iterator cbegin() const {return const_iterator(*this,0);}
759 //! end const iterator
760 const_iterator cend() const {return const_iterator(*this,size());}
761 //! reverse start iterator
762 reverse_iterator rbegin() {return reverse_iterator(end());}
763 //! reverse end iterator
764 reverse_iterator rend() {return reverse_iterator(begin());}
765 //! reverse start const iterator
766 const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
767 //! reverse end const iterator
768 const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
769 //! reverse start const iterator
770 const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}
771 //! reverse end const iterator
772 const_reverse_iterator crend() const {return const_reverse_iterator(begin());}
775 __TBB_ASSERT( size()>0, NULL);
776 return static_cast<T*>(my_segment[0].array)[0];
778 //! the first item const
779 const_reference front() const {
780 __TBB_ASSERT( size()>0, NULL);
781 return static_cast<const T*>(my_segment[0].array)[0];
785 __TBB_ASSERT( size()>0, NULL);
786 return internal_subscript( size()-1 );
788 //! the last item const
789 const_reference back() const {
790 __TBB_ASSERT( size()>0, NULL);
791 return internal_subscript( size()-1 );
793 //! return allocator object
794 allocator_type get_allocator() const { return this->my_allocator; }
796 //! assign n items by copying t item
797 void assign(size_type n, const_reference t) {
799 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
802 //! assign range [first, last)
804 void assign(I first, I last) {
805 clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
808 //! swap two instances
809 void swap(concurrent_vector &vector) {
810 if( this != &vector ) {
811 concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
812 std::swap(this->my_allocator, vector.my_allocator);
816 //! Clear container while keeping memory allocated.
817 /** To free up the memory, use in conjunction with method compact(). Not thread safe **/
819 internal_clear(&destroy_array);
822 //! Clear and destroy vector.
823 ~concurrent_vector() {
824 segment_t *table = my_segment;
825 internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
826 // base class destructor call should be then
829 const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
832 static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
833 return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
835 //! Free k segments from table
836 void internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block);
838 //! Get reference to element at given index.
839 T& internal_subscript( size_type index ) const;
841 //! Get reference to element at given index with errors checks
842 T& internal_subscript_with_exceptions( size_type index ) const;
844 //! assign n items by copying t
845 void internal_assign_n(size_type n, const_pointer p) {
846 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
850 template<bool B> class is_integer_tag;
852 //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9
854 void internal_assign_range(I first, I last, is_integer_tag<true> *) {
855 internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
857 //! inline proxy assign by iterators
859 void internal_assign_range(I first, I last, is_integer_tag<false> *) {
860 internal_assign_iterators(first, last);
862 //! assign by iterators
864 void internal_assign_iterators(I first, I last);
866 //! Construct n instances of T, starting at "begin".
867 static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
869 //! Construct n instances of T, starting at "begin".
870 static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
872 //! Construct n instances of T, starting at "begin".
873 static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
875 //! Assign n instances of T, starting at "begin".
876 static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
878 //! Destroy n instances of T, starting at "begin".
879 static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
881 //! Exception-aware helper class for filling a segment by exception-danger operators of user class
882 class internal_loop_guide : internal::no_copy {
887 internal_loop_guide(size_type ntrials, void *ptr)
888 : array(static_cast<pointer>(ptr)), n(ntrials), i(0) {}
889 void init() { for(; i < n; ++i) new( &array[i] ) T(); }
890 void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*static_cast<const T*>(src)); }
891 void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(static_cast<const T*>(src)[i]); }
892 void assign(const void *src) { for(; i < n; ++i) array[i] = static_cast<const T*>(src)[i]; }
893 template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
894 ~internal_loop_guide() {
895 if(i < n) // if exception raised, do zerroing on the rest of items
896 std::memset(array+i, 0, (n-i)*sizeof(value_type));
901 template<typename T, class A>
902 void concurrent_vector<T, A>::shrink_to_fit() {
903 internal_segments_table old;
905 if( internal_compact( sizeof(T), &old, &destroy_array, ©_array ) )
906 internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
908 if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
909 internal_free_segments( old.table, 1, old.first_block );
914 template<typename T, class A>
915 void concurrent_vector<T, A>::internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block) {
917 while( k > first_block ) {
919 T* array = static_cast<T*>(table[k]);
921 if( array > internal::vector_allocation_error_flag ) // check for correct segment pointer
922 this->my_allocator.deallocate( array, segment_size(k) );
924 T* array = static_cast<T*>(table[0]);
925 if( array > internal::vector_allocation_error_flag ) {
926 __TBB_ASSERT( first_block > 0, NULL );
927 while(k > 0) table[--k] = NULL;
928 this->my_allocator.deallocate( array, segment_size(first_block) );
932 template<typename T, class A>
933 T& concurrent_vector<T, A>::internal_subscript( size_type index ) const {
934 __TBB_ASSERT( index < my_early_size, "index out of bounds" );
936 segment_index_t k = segment_base_index_of( j );
937 __TBB_ASSERT( (segment_t*)my_segment != my_storage || k < pointers_per_short_table, "index is being allocated" );
938 // no need in __TBB_load_with_acquire since thread works in own space or gets
939 #if TBB_USE_THREADING_TOOLS
940 T* array = static_cast<T*>( tbb::internal::itt_load_pointer_v3(&my_segment[k].array));
942 T* array = static_cast<T*>(my_segment[k].array);
943 #endif /* TBB_USE_THREADING_TOOLS */
944 __TBB_ASSERT( array != internal::vector_allocation_error_flag, "the instance is broken by bad allocation. Use at() instead" );
945 __TBB_ASSERT( array, "index is being allocated" );
949 template<typename T, class A>
950 T& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type index ) const {
951 if( index >= my_early_size )
952 internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
954 segment_index_t k = segment_base_index_of( j );
955 if( (segment_t*)my_segment == my_storage && k >= pointers_per_short_table )
956 internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error
957 void *array = my_segment[k].array; // no need in __TBB_load_with_acquire
958 if( array <= internal::vector_allocation_error_flag ) // check for correct segment pointer
959 internal::throw_exception(internal::eid_index_range_error); // throw std::range_error
960 return static_cast<T*>(array)[j];
963 template<typename T, class A> template<class I>
964 void concurrent_vector<T, A>::internal_assign_iterators(I first, I last) {
965 __TBB_ASSERT(my_early_size == 0, NULL);
966 size_type n = std::distance(first, last);
968 internal_reserve(n, sizeof(T), max_size());
970 segment_index_t k = 0;
971 size_type sz = segment_size( my_first_block );
973 internal_loop_guide loop(sz, my_segment[k].array);
976 if( !k ) k = my_first_block;
977 else { ++k; sz <<= 1; }
979 internal_loop_guide loop(n, my_segment[k].array);
983 template<typename T, class A>
984 void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
985 internal_loop_guide loop(n, begin); loop.init();
988 template<typename T, class A>
989 void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
990 internal_loop_guide loop(n, begin); loop.init(src);
993 template<typename T, class A>
994 void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
995 internal_loop_guide loop(n, dst); loop.copy(src);
998 template<typename T, class A>
999 void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1000 internal_loop_guide loop(n, dst); loop.assign(src);
1003 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1004 // Workaround for overzealous compiler warning
1005 #pragma warning (push)
1006 #pragma warning (disable: 4189)
1008 template<typename T, class A>
1009 void concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {
1010 T* array = static_cast<T*>(begin);
1011 for( size_type j=n; j>0; --j )
1012 array[j-1].~T(); // destructors are supposed to not throw any exceptions
1014 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1015 #pragma warning (pop)
1016 #endif // warning 4189 is back
1018 // concurrent_vector's template functions
1019 template<typename T, class A1, class A2>
1020 inline bool operator==(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b) {
1021 // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1022 if(a.size() != b.size()) return false;
1023 typename concurrent_vector<T, A1>::const_iterator i(a.begin());
1024 typename concurrent_vector<T, A2>::const_iterator j(b.begin());
1025 for(; i != a.end(); ++i, ++j)
1026 if( !(*i == *j) ) return false;
1030 template<typename T, class A1, class A2>
1031 inline bool operator!=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1032 { return !(a == b); }
1034 template<typename T, class A1, class A2>
1035 inline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1036 { return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1038 template<typename T, class A1, class A2>
1039 inline bool operator>(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1042 template<typename T, class A1, class A2>
1043 inline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1044 { return !(b < a); }
1046 template<typename T, class A1, class A2>
1047 inline bool operator>=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1048 { return !(a < b); }
1050 template<typename T, class A>
1051 inline void swap(concurrent_vector<T, A> &a, concurrent_vector<T, A> &b)
1056 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64)
1057 #pragma warning (pop)
1058 #endif // warning 4267 is back
1060 #endif /* __TBB_concurrent_vector_H */