2 Copyright 2005-2014 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5 you can redistribute it and/or modify it under the terms of the GNU General Public License
6 version 2 as published by the Free Software Foundation. Threading Building Blocks is
7 distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9 See the GNU General Public License for more details. You should have received a copy of
10 the GNU General Public License along with Threading Building Blocks; if not, write to the
11 Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
13 As a special exception, you may use this file as part of a free software library without
14 restriction. Specifically, if other files instantiate templates or use macros or inline
15 functions from this file, or you compile this file and link it with other files to produce
16 an executable, this file does not by itself cause the resulting executable to be covered
17 by the GNU General Public License. This exception does not however invalidate any other
18 reasons why the executable file might be covered by the GNU General Public License.
21 #ifndef __TBB_concurrent_vector_H
22 #define __TBB_concurrent_vector_H
24 #include "tbb_stddef.h"
25 #include "tbb_exception.h"
27 #include "cache_aligned_allocator.h"
28 #include "blocked_range.h"
29 #include "tbb_machine.h"
30 #include "tbb_profiling.h"
32 #include <cstring> // for memset()
34 #if !TBB_USE_EXCEPTIONS && _MSC_VER
35 // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
36 #pragma warning (push)
37 #pragma warning (disable: 4530)
43 #if !TBB_USE_EXCEPTIONS && _MSC_VER
47 #if _MSC_VER==1500 && !__INTEL_COMPILER
48 // VS2008/VC9 seems to have an issue; limits pull in math.h
49 #pragma warning( push )
50 #pragma warning( disable: 4985 )
52 #include <limits> /* std::numeric_limits */
53 #if _MSC_VER==1500 && !__INTEL_COMPILER
54 #pragma warning( pop )
57 #if __TBB_INITIALIZER_LISTS_PRESENT
58 #include <initializer_list>
61 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
62 // Workaround for overzealous compiler warnings in /Wp64 mode
63 #pragma warning (push)
65 #pragma warning (disable: 4267)
67 #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
72 template<typename T, class A = cache_aligned_allocator<T> >
73 class concurrent_vector;
75 template<typename Container, typename Value>
76 class vector_iterator;
81 //! Bad allocation marker
82 static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
84 //! Exception helper function
86 void handle_unconstructed_elements(T* array, size_t n_of_elements){
87 std::memset(array, 0, n_of_elements * sizeof(T));
90 //! Base class of concurrent vector implementation.
91 /** @ingroup containers */
92 class concurrent_vector_base_v3 {
95 // Basic types declarations
96 typedef size_t segment_index_t;
97 typedef size_t size_type;
99 // Using enumerations due to Mac linking problems of static const variables
102 default_initial_segments = 1, // 2 initial items
103 //! Number of slots for segment pointers inside the class
104 pointers_per_short_table = 3, // to fit into 8 words of entire structure
105 pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
108 struct segment_not_used {};
109 struct segment_allocated {};
110 struct segment_allocation_failed {};
113 class segment_value_t {
116 //TODO: More elegant way to grant access to selected functions _only_?
117 friend class segment_t;
118 explicit segment_value_t(void* an_array):array(an_array) {}
120 friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
121 friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;}
122 friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;}
123 template<typename argument_type>
124 friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
127 T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
134 segment_t(){ store<relaxed>(segment_not_used());}
135 //Copy ctor and assignment operator are defined to ease using of stl algorithms.
136 //These algorithms usually not a synchronization point, so, semantic is
137 //intentionally relaxed here.
138 segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}
140 void swap(segment_t & rhs ){
141 tbb::internal::swap<relaxed>(array, rhs.array);
144 segment_t& operator=(segment_t const& rhs ){
145 array.store<relaxed>(rhs.array.load<relaxed>());
149 template<memory_semantics M>
150 segment_value_t load() const { return segment_value_t(array.load<M>());}
152 template<memory_semantics M>
153 void store(segment_not_used) {
157 template<memory_semantics M>
158 void store(segment_allocation_failed) {
159 __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
160 array.store<M>(internal::vector_allocation_error_flag);
163 template<memory_semantics M>
164 void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
165 __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
166 "other overloads of store should be used for marking segment as not_used or allocation_failed" );
167 array.store<M>(allocated_segment_pointer);
172 __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
174 #endif /* TBB_USE_ASSERT */
176 friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
180 //! allocator function pointer
181 void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
183 //! count of segments in the first block
184 atomic<size_type> my_first_block;
186 //! Requested size of vector
187 atomic<size_type> my_early_size;
189 //! Pointer to the segments table
190 atomic<segment_t*> my_segment;
192 //! embedded storage of segment pointers
193 segment_t my_storage[pointers_per_short_table];
197 concurrent_vector_base_v3() {
198 //Here the semantic is intentionally relaxed.
199 //The reason this is next:
200 //Object that is in middle of construction (i.e. its constructor is not yet finished)
201 //cannot be used concurrently until the construction is finished.
202 //Thus to flag other threads that construction is finished, some synchronization with
203 //acquire-release semantic should be done by the (external) code that uses the vector.
204 //So, no need to do the synchronization inside the vector.
206 my_early_size.store<relaxed>(0);
207 my_first_block.store<relaxed>(0); // here is not default_initial_segments
208 my_segment.store<relaxed>(my_storage);
211 __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();
213 //these helpers methods use the fact that segments are allocated so
214 //that every segment size is a (increasing) power of 2.
215 //with one exception 0 segment has size of 2 as well segment 1;
216 //e.g. size of segment with index of 3 is 2^3=8;
217 static segment_index_t segment_index_of( size_type index ) {
218 return segment_index_t( __TBB_Log2( index|1 ) );
221 static segment_index_t segment_base( segment_index_t k ) {
222 return (segment_index_t(1)<<k & ~segment_index_t(1));
225 static inline segment_index_t segment_base_index_of( segment_index_t &index ) {
226 segment_index_t k = segment_index_of( index );
227 index -= segment_base(k);
231 static size_type segment_size( segment_index_t k ) {
232 return segment_index_t(1)<<k; // fake value for k==0
236 static bool is_first_element_in_segment(size_type element_index){
237 //check if element_index is a power of 2 that is at least 2.
238 //The idea is to detect if the iterator crosses a segment boundary,
239 //and 2 is the minimal index for which it's true
240 __TBB_ASSERT(element_index, "there should be no need to call "
241 "is_first_element_in_segment for 0th element" );
242 return is_power_of_two_factor( element_index, 2 );
245 //! An operation on an n-element array starting at begin.
246 typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
248 //! An operation on n-element destination array and n-element source array.
249 typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
251 //! Internal structure for compact()
252 struct internal_segments_table {
253 segment_index_t first_block;
254 segment_t table[pointers_per_long_table];
257 void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
258 size_type __TBB_EXPORTED_METHOD internal_capacity() const;
259 void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
260 size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
261 void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
262 segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
263 void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
264 void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );
265 void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,
266 internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
268 void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;
269 void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);
271 void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
272 internal_array_op1 destroy, internal_array_op2 init );
273 size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
275 //! Deprecated entry point for backwards compatibility to TBB 2.1.
276 void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
278 //! Private functionality
282 template<typename Container, typename Value>
283 friend class vector_iterator;
287 inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) {
291 typedef concurrent_vector_base_v3 concurrent_vector_base;
293 //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/
294 /** Value is either the T or const T type of the container.
295 @ingroup containers */
296 template<typename Container, typename Value>
297 class vector_iterator
299 //! concurrent_vector over which we are iterating.
300 Container* my_vector;
302 //! Index into the vector
305 //! Caches my_vector->internal_subscript(my_index)
306 /** NULL if cached value is not available */
307 mutable Value* my_item;
309 template<typename C, typename T>
310 friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
312 template<typename C, typename T, typename U>
313 friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
315 template<typename C, typename T, typename U>
316 friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
318 template<typename C, typename T, typename U>
319 friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
321 template<typename C, typename U>
322 friend class internal::vector_iterator;
324 #if !__TBB_TEMPLATE_FRIENDS_BROKEN
325 template<typename T, class A>
326 friend class tbb::concurrent_vector;
331 vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
332 my_vector(const_cast<Container*>(&vector)),
334 my_item(static_cast<Value*>(ptr))
338 //! Default constructor
339 vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
341 vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :
342 my_vector(other.my_vector),
343 my_index(other.my_index),
344 my_item(other.my_item)
347 vector_iterator operator+( ptrdiff_t offset ) const {
348 return vector_iterator( *my_vector, my_index+offset );
350 vector_iterator &operator+=( ptrdiff_t offset ) {
355 vector_iterator operator-( ptrdiff_t offset ) const {
356 return vector_iterator( *my_vector, my_index-offset );
358 vector_iterator &operator-=( ptrdiff_t offset ) {
363 Value& operator*() const {
364 Value* item = my_item;
366 item = my_item = &my_vector->internal_subscript(my_index);
368 __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
371 Value& operator[]( ptrdiff_t k ) const {
372 return my_vector->internal_subscript(my_index+k);
374 Value* operator->() const {return &operator*();}
377 vector_iterator& operator++() {
378 size_t element_index = ++my_index;
380 //TODO: consider using of knowledge about "first_block optimization" here as well?
381 if( concurrent_vector_base::is_first_element_in_segment(element_index)) {
382 //if the iterator crosses a segment boundary, the pointer become invalid
383 //as possibly next segment is in another memory location
393 vector_iterator& operator--() {
394 __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
395 size_t element_index = my_index--;
397 if(concurrent_vector_base::is_first_element_in_segment(element_index)) {
398 //if the iterator crosses a segment boundary, the pointer become invalid
399 //as possibly next segment is in another memory location
409 vector_iterator operator++(int) {
410 vector_iterator result = *this;
416 vector_iterator operator--(int) {
417 vector_iterator result = *this;
424 typedef ptrdiff_t difference_type;
425 typedef Value value_type;
426 typedef Value* pointer;
427 typedef Value& reference;
428 typedef std::random_access_iterator_tag iterator_category;
431 template<typename Container, typename T>
432 vector_iterator<Container,T> operator+( ptrdiff_t offset, const vector_iterator<Container,T>& v ) {
433 return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
436 template<typename Container, typename T, typename U>
437 bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
438 return i.my_index==j.my_index && i.my_vector == j.my_vector;
441 template<typename Container, typename T, typename U>
442 bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
446 template<typename Container, typename T, typename U>
447 bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
448 return i.my_index<j.my_index;
451 template<typename Container, typename T, typename U>
452 bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
456 template<typename Container, typename T, typename U>
457 bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
461 template<typename Container, typename T, typename U>
462 bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
466 template<typename Container, typename T, typename U>
467 ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
468 return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
471 template<typename T, class A>
472 class allocator_base {
474 typedef typename A::template
475 rebind<T>::other allocator_type;
476 allocator_type my_allocator;
478 allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}
482 } // namespace internal
485 //! Concurrent vector container
486 /** concurrent_vector is a container having the following main properties:
487 - It provides random indexed access to its elements. The index of the first element is 0.
488 - It ensures safe concurrent growing its size (different threads can safely append new elements).
489 - Adding new elements does not invalidate existing iterators and does not change indices of existing items.
492 The class meets all Container Requirements and Reversible Container Requirements from
493 C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet
494 Sequence Requirements due to absence of insert() and erase() methods.
496 @par Exception Safety
497 Methods working with memory allocation and/or new elements construction can throw an
498 exception if allocator fails to allocate memory or element's default constructor throws one.
499 Concurrent vector's element of type T must conform to the following requirements:
500 - Throwing an exception is forbidden for destructor of T.
501 - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized.
503 Otherwise, the program's behavior is undefined.
505 If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation.
507 - There are no guarantees that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens.
508 - An invalid vector instance cannot be repaired; it is unable to grow anymore.
509 - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful.
510 - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown.
512 If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails.
515 Unlike an STL vector, a concurrent_vector does not move existing elements if it needs
516 to allocate more memory. The container is divided into a series of contiguous arrays of
517 elements. The first reservation, growth, or assignment operation determines the size of
518 the first array. Using small number of elements as initial size incurs fragmentation that
519 may increase element access time. Internal layout can be optimized by method compact() that
520 merges several smaller arrays into one solid.
522 @par Changes since TBB 2.1
523 - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated.
524 - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector
525 - Added resize() methods (not thread-safe)
526 - Added cbegin/cend/crbegin/crend methods
527 - Changed return type of methods grow* and push_back to iterator
529 @par Changes since TBB 2.0
530 - Implemented exception-safety guarantees
531 - Added template argument for allocator
532 - Added allocator argument in constructors
533 - Faster index calculation
534 - First growth call specifies a number of segments to be merged in the first allocation.
535 - Fixed memory blow up for swarm of vector's instances of small size
536 - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items.
537 - Added STL-like constructors.
538 - Added operators ==, < and derivatives
539 - Added at() method, approved for using after an exception was thrown inside the vector
540 - Added get_allocator() method.
541 - Added assign() methods
542 - Added compact() method to defragment first segments
543 - Added swap() method
544 - range() defaults on grainsize = 1 supporting auto grainsize algorithms.
546 @ingroup containers */
547 template<typename T, class A>
548 class concurrent_vector: protected internal::allocator_base<T, A>,
549 private internal::concurrent_vector_base {
552 class generic_range_type: public blocked_range<I> {
554 typedef T value_type;
555 typedef T& reference;
556 typedef const T& const_reference;
558 typedef ptrdiff_t difference_type;
559 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
561 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
562 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
565 template<typename C, typename U>
566 friend class internal::vector_iterator;
569 //------------------------------------------------------------------------
570 // STL compatible types
571 //------------------------------------------------------------------------
572 typedef internal::concurrent_vector_base_v3::size_type size_type;
573 typedef typename internal::allocator_base<T, A>::allocator_type allocator_type;
575 typedef T value_type;
576 typedef ptrdiff_t difference_type;
577 typedef T& reference;
578 typedef const T& const_reference;
580 typedef const T *const_pointer;
582 typedef internal::vector_iterator<concurrent_vector,T> iterator;
583 typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
585 #if !defined(_MSC_VER) || _CPPLIB_VER>=300
586 // Assume ISO standard definition of std::reverse_iterator
587 typedef std::reverse_iterator<iterator> reverse_iterator;
588 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
590 // Use non-standard std::reverse_iterator
591 typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
592 typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
593 #endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
595 //------------------------------------------------------------------------
596 // Parallel algorithm support
597 //------------------------------------------------------------------------
598 typedef generic_range_type<iterator> range_type;
599 typedef generic_range_type<const_iterator> const_range_type;
601 //------------------------------------------------------------------------
602 // STL compatible constructors & destructors
603 //------------------------------------------------------------------------
605 //! Construct empty vector.
606 explicit concurrent_vector(const allocator_type &a = allocator_type())
607 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
609 vector_allocator_ptr = &internal_allocator;
612 //Constructors are not required to have synchronization
613 //(for more details see comment in the concurrent_vector_base constructor).
614 #if __TBB_INITIALIZER_LISTS_PRESENT
615 //! Constructor from initializer_list
616 concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
617 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
619 vector_allocator_ptr = &internal_allocator;
621 internal_assign_iterators(init_list.begin(), init_list.end());
623 segment_t *table = my_segment.load<relaxed>();;
624 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
629 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
631 //! Copying constructor
632 concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )
633 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
635 vector_allocator_ptr = &internal_allocator;
637 internal_copy(vector, sizeof(T), ©_array);
639 segment_t *table = my_segment.load<relaxed>();
640 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
645 #if __TBB_CPP11_RVALUE_REF_PRESENT
647 //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
648 concurrent_vector( concurrent_vector&& source)
649 : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()
651 vector_allocator_ptr = &internal_allocator;
652 concurrent_vector_base_v3::internal_swap(source);
655 concurrent_vector( concurrent_vector&& source, const allocator_type& a)
656 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
658 vector_allocator_ptr = &internal_allocator;
659 //C++ standard requires instances of an allocator being compared for equality,
660 //which means that memory allocated by one instance is possible to deallocate with the other one.
661 if (a == source.my_allocator) {
662 concurrent_vector_base_v3::internal_swap(source);
665 internal_copy(source, sizeof(T), &move_array);
667 segment_t *table = my_segment.load<relaxed>();
668 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
676 //! Copying constructor for vector with different allocator type
678 concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )
679 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
681 vector_allocator_ptr = &internal_allocator;
683 internal_copy(vector.internal_vector_base(), sizeof(T), ©_array);
685 segment_t *table = my_segment.load<relaxed>();
686 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
691 //! Construction with initial size specified by argument n
692 explicit concurrent_vector(size_type n)
694 vector_allocator_ptr = &internal_allocator;
696 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
698 segment_t *table = my_segment.load<relaxed>();
699 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
704 //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance
705 concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())
706 : internal::allocator_base<T, A>(a)
708 vector_allocator_ptr = &internal_allocator;
710 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
712 segment_t *table = my_segment.load<relaxed>();
713 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
718 //! Construction with copying iteration range and given allocator instance
720 concurrent_vector(I first, I last, const allocator_type &a = allocator_type())
721 : internal::allocator_base<T, A>(a)
723 vector_allocator_ptr = &internal_allocator;
725 internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
727 segment_t *table = my_segment.load<relaxed>();
728 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
734 concurrent_vector& operator=( const concurrent_vector& vector ) {
735 if( this != &vector )
736 internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array);
740 #if __TBB_CPP11_RVALUE_REF_PRESENT
741 //TODO: add __TBB_NOEXCEPT()
743 concurrent_vector& operator=( concurrent_vector&& other ) {
744 __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
745 typedef typename tbb::internal::allocator_traits<A>::propagate_on_container_move_assignment pocma_t;
746 if(pocma_t::value || this->my_allocator == other.my_allocator) {
747 concurrent_vector trash (std::move(*this));
748 internal_swap(other);
749 if (pocma_t::value) {
750 this->my_allocator = std::move(other.my_allocator);
753 internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array);
758 //TODO: add an template assignment operator? (i.e. with different element type)
760 //! Assignment for vector with different allocator type
762 concurrent_vector& operator=( const concurrent_vector<T, M>& vector ) {
763 if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
764 internal_assign(vector.internal_vector_base(),
765 sizeof(T), &destroy_array, &assign_array, ©_array);
769 #if __TBB_INITIALIZER_LISTS_PRESENT
770 //! Assignment for initializer_list
771 concurrent_vector& operator=( std::initializer_list<T> init_list ) {
772 internal_clear(&destroy_array);
773 internal_assign_iterators(init_list.begin(), init_list.end());
776 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
778 //------------------------------------------------------------------------
779 // Concurrent operations
780 //------------------------------------------------------------------------
781 //! Grow by "delta" elements.
782 /** Returns iterator pointing to the first new element. */
783 iterator grow_by( size_type delta ) {
784 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
787 //! Grow by "delta" elements using copying constructor.
788 /** Returns iterator pointing to the first new element. */
789 iterator grow_by( size_type delta, const_reference t ) {
790 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
793 /** Returns iterator pointing to the first new element. */
795 iterator grow_by( I first, I last ) {
796 typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
797 __TBB_ASSERT( delta >= 0, NULL);
799 return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), ©_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
802 #if __TBB_INITIALIZER_LISTS_PRESENT
803 /** Returns iterator pointing to the first new element. */
804 iterator grow_by( std::initializer_list<T> init_list ) {
805 return grow_by( init_list.begin(), init_list.end() );
807 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
809 //! Append minimal sequence of elements such that size()>=n.
810 /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated.
811 May return while other elements are being constructed by other threads.
812 Returns iterator that points to beginning of appended sequence.
813 If no elements were appended, returns iterator pointing to nth element. */
814 iterator grow_to_at_least( size_type n ) {
817 m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
820 return iterator(*this, m);
823 /** Analogous to grow_to_at_least( size_type n ) with exception that the new
824 elements are initialized by copying of t instead of default construction. */
825 iterator grow_to_at_least( size_type n, const_reference t ) {
828 m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t);
831 return iterator(*this, m);
835 /** Returns iterator pointing to the new element. */
836 iterator push_back( const_reference item )
838 push_back_helper prolog(*this);
839 new(prolog.internal_push_back_result()) T(item);
840 return prolog.return_iterator_and_dismiss();
843 #if __TBB_CPP11_RVALUE_REF_PRESENT
844 //! Push item, move-aware
845 /** Returns iterator pointing to the new element. */
846 iterator push_back( T&& item )
848 push_back_helper prolog(*this);
849 new(prolog.internal_push_back_result()) T(std::move(item));
850 return prolog.return_iterator_and_dismiss();
852 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
853 //! Push item, create item "in place" with provided arguments
854 /** Returns iterator pointing to the new element. */
855 template<typename... Args>
856 iterator emplace_back( Args&&... args )
858 push_back_helper prolog(*this);
859 new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
860 return prolog.return_iterator_and_dismiss();
862 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
863 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
864 //! Get reference to element at given index.
865 /** This method is thread-safe for concurrent reads, and also while growing the vector,
866 as long as the calling thread has checked that index < size(). */
867 reference operator[]( size_type index ) {
868 return internal_subscript(index);
871 //! Get const reference to element at given index.
872 const_reference operator[]( size_type index ) const {
873 return internal_subscript(index);
876 //! Get reference to element at given index. Throws exceptions on errors.
877 reference at( size_type index ) {
878 return internal_subscript_with_exceptions(index);
881 //! Get const reference to element at given index. Throws exceptions on errors.
882 const_reference at( size_type index ) const {
883 return internal_subscript_with_exceptions(index);
886 //! Get range for iterating with parallel algorithms
887 range_type range( size_t grainsize = 1 ) {
888 return range_type( begin(), end(), grainsize );
891 //! Get const range for iterating with parallel algorithms
892 const_range_type range( size_t grainsize = 1 ) const {
893 return const_range_type( begin(), end(), grainsize );
896 //------------------------------------------------------------------------
898 //------------------------------------------------------------------------
899 //! Return size of vector. It may include elements under construction
900 size_type size() const {
901 size_type sz = my_early_size, cp = internal_capacity();
902 return cp < sz ? cp : sz;
905 //! Return false if vector is not empty or has elements under construction at least.
906 bool empty() const {return !my_early_size;}
908 //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value.
909 size_type capacity() const {return internal_capacity();}
911 //! Allocate enough space to grow to size n without having to allocate more memory later.
912 /** Like most of the methods provided for STL compatibility, this method is *not* thread safe.
913 The capacity afterwards may be bigger than the requested reservation. */
914 void reserve( size_type n ) {
916 internal_reserve(n, sizeof(T), max_size());
919 //! Resize the vector. Not thread-safe.
920 void resize( size_type n ) {
921 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
924 //! Resize the vector, copy t for new elements. Not thread-safe.
925 void resize( size_type n, const_reference t ) {
926 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
929 //! Optimize memory usage and fragmentation.
930 void shrink_to_fit();
932 //! Upper bound on argument to reserve.
933 size_type max_size() const {return (~size_type(0))/sizeof(T);}
935 //------------------------------------------------------------------------
937 //------------------------------------------------------------------------
940 iterator begin() {return iterator(*this,0);}
942 iterator end() {return iterator(*this,size());}
943 //! start const iterator
944 const_iterator begin() const {return const_iterator(*this,0);}
945 //! end const iterator
946 const_iterator end() const {return const_iterator(*this,size());}
947 //! start const iterator
948 const_iterator cbegin() const {return const_iterator(*this,0);}
949 //! end const iterator
950 const_iterator cend() const {return const_iterator(*this,size());}
951 //! reverse start iterator
952 reverse_iterator rbegin() {return reverse_iterator(end());}
953 //! reverse end iterator
954 reverse_iterator rend() {return reverse_iterator(begin());}
955 //! reverse start const iterator
956 const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
957 //! reverse end const iterator
958 const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
959 //! reverse start const iterator
960 const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}
961 //! reverse end const iterator
962 const_reverse_iterator crend() const {return const_reverse_iterator(begin());}
965 __TBB_ASSERT( size()>0, NULL);
966 return (my_segment[0].template load<relaxed>().template pointer<T>())[0];
968 //! the first item const
969 const_reference front() const {
970 __TBB_ASSERT( size()>0, NULL);
971 return static_cast<const T*>(my_segment[0].array)[0];
975 __TBB_ASSERT( size()>0, NULL);
976 return internal_subscript( size()-1 );
978 //! the last item const
979 const_reference back() const {
980 __TBB_ASSERT( size()>0, NULL);
981 return internal_subscript( size()-1 );
983 //! return allocator object
984 allocator_type get_allocator() const { return this->my_allocator; }
986 //! assign n items by copying t item
987 void assign(size_type n, const_reference t) {
989 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
992 //! assign range [first, last)
994 void assign(I first, I last) {
995 clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
998 #if __TBB_INITIALIZER_LISTS_PRESENT
999 //! assigns an initializer list
1000 void assign(std::initializer_list<T> init_list) {
1001 clear(); internal_assign_iterators( init_list.begin(), init_list.end());
1003 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
1005 //! swap two instances
1006 void swap(concurrent_vector &vector) {
1008 if( this != &vector ) {
1009 concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
1010 swap(this->my_allocator, vector.my_allocator);
1014 //! Clear container while keeping memory allocated.
1015 /** To free up the memory, use in conjunction with method compact(). Not thread safe **/
1017 internal_clear(&destroy_array);
1020 //! Clear and destroy vector.
1021 ~concurrent_vector() {
1022 segment_t *table = my_segment.load<relaxed>();
1023 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
1024 // base class destructor call should be then
1027 const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
1029 //! Allocate k items
1030 static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
1031 return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1033 //! Free k segments from table
1034 void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);
1036 //! Get reference to element at given index.
1037 T& internal_subscript( size_type index ) const;
1039 //! Get reference to element at given index with errors checks
1040 T& internal_subscript_with_exceptions( size_type index ) const;
1042 //! assign n items by copying t
1043 void internal_assign_n(size_type n, const_pointer p) {
1044 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1048 template<bool B> class is_integer_tag;
1050 //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9
1052 void internal_assign_range(I first, I last, is_integer_tag<true> *) {
1053 internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1055 //! inline proxy assign by iterators
1057 void internal_assign_range(I first, I last, is_integer_tag<false> *) {
1058 internal_assign_iterators(first, last);
1060 //! assign by iterators
1062 void internal_assign_iterators(I first, I last);
1064 //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1066 //! Construct n instances of T, starting at "begin".
1067 static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1069 //! Copy-construct n instances of T, starting at "begin".
1070 static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1072 //! Copy-construct n instances of T by copying single element pointed to by src, starting at "dst".
1073 static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1075 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1076 //! Either opy or move-construct n instances of T, starting at "dst" by copying according element of src array.
1077 static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1078 #endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1080 #if __TBB_CPP11_RVALUE_REF_PRESENT
1081 //! Move-construct n instances of T, starting at "dst" by copying according element of src array.
1082 static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1084 //! Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array.
1085 static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1087 //! Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n).
1088 template<typename Iterator>
1089 static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1091 //! Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array.
1092 static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1094 //! Destroy n instances of T, starting at "begin".
1095 static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
1097 //! Exception-aware helper class for filling a segment by exception-danger operators of user class
1098 class internal_loop_guide : internal::no_copy {
1100 const pointer array;
1104 static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
1105 static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1107 internal_loop_guide(size_type ntrials, void *ptr)
1108 : array(as_pointer(ptr)), n(ntrials), i(0) {}
1109 void init() { for(; i < n; ++i) new( &array[i] ) T(); }
1110 void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
1111 void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
1112 void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1113 #if __TBB_CPP11_RVALUE_REF_PRESENT
1114 void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
1115 void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1117 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1118 void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1119 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1121 //TODO: rename to construct_range
1122 template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
1123 ~internal_loop_guide() {
1124 if(i < n) {// if an exception was raised, fill the rest of items with zeros
1125 internal::handle_unconstructed_elements(array+i, n-i);
1130 struct push_back_helper : internal::no_copy{
1131 struct element_construction_guard : internal::no_copy{
1134 element_construction_guard(pointer an_element) : element (an_element){}
1135 void dismiss(){ element = NULL; }
1136 ~element_construction_guard(){
1138 internal::handle_unconstructed_elements(element, 1);
1143 concurrent_vector & v;
1145 element_construction_guard g;
1147 push_back_helper(concurrent_vector & vector) :
1149 g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1152 pointer internal_push_back_result(){ return g.element;}
1153 iterator return_iterator_and_dismiss(){
1155 return iterator(v, k, g.element);
1160 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1161 #pragma warning (push)
1162 #pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1164 template<typename T, class A>
1165 void concurrent_vector<T, A>::shrink_to_fit() {
1166 internal_segments_table old;
1168 internal_array_op2 copy_or_move_array =
1169 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1170 &move_array_if_noexcept
1175 if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1176 internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1177 } __TBB_CATCH(...) {
1178 if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1179 internal_free_segments( old.table, 1, old.first_block );
1183 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1184 #pragma warning (pop)
1185 #endif // warning 4701 is back
1187 template<typename T, class A>
1188 void concurrent_vector<T, A>::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) {
1190 while( k > first_block ) {
1192 segment_value_t segment_value = table[k].load<relaxed>();
1193 table[k].store<relaxed>(segment_not_used());
1194 if( segment_value == segment_allocated() ) // check for correct segment pointer
1195 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1197 segment_value_t segment_value = table[0].load<relaxed>();
1198 if( segment_value == segment_allocated() ) {
1199 __TBB_ASSERT( first_block > 0, NULL );
1200 while(k > 0) table[--k].store<relaxed>(segment_not_used());
1201 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1205 template<typename T, class A>
1206 T& concurrent_vector<T, A>::internal_subscript( size_type index ) const {
1207 //TODO: unify both versions of internal_subscript
1208 __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1209 size_type j = index;
1210 segment_index_t k = segment_base_index_of( j );
1211 __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1212 //no need in load with acquire (load<acquire>) since thread works in own space or gets
1213 //the information about added elements via some form of external synchronization
1214 //TODO: why not make a load of my_segment relaxed as well ?
1215 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1216 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1217 __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1218 __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1219 return (( segment_value.pointer<T>()))[j];
1222 template<typename T, class A>
1223 T& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type index ) const {
1224 if( index >= my_early_size )
1225 internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1226 size_type j = index;
1227 segment_index_t k = segment_base_index_of( j );
1228 //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1229 if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1230 internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error
1231 // no need in load with acquire (load<acquire>) since thread works in own space or gets
1232 //the information about added elements via some form of external synchronization
1233 //TODO: why not make a load of my_segment relaxed as well ?
1234 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1235 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1236 if( segment_value != segment_allocated() ) // check for correct segment pointer
1237 internal::throw_exception(internal::eid_index_range_error); // throw std::range_error
1238 return (segment_value.pointer<T>())[j];
1241 template<typename T, class A> template<class I>
1242 void concurrent_vector<T, A>::internal_assign_iterators(I first, I last) {
1243 __TBB_ASSERT(my_early_size == 0, NULL);
1244 size_type n = std::distance(first, last);
1246 internal_reserve(n, sizeof(T), max_size());
1248 segment_index_t k = 0;
1249 //TODO: unify segment iteration code with concurrent_base_v3::helper
1250 size_type sz = segment_size( my_first_block );
1252 internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1253 loop.iterate(first);
1255 if( !k ) k = my_first_block;
1256 else { ++k; sz <<= 1; }
1258 internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1259 loop.iterate(first);
1262 template<typename T, class A>
1263 void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
1264 internal_loop_guide loop(n, begin); loop.init();
1267 template<typename T, class A>
1268 void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
1269 internal_loop_guide loop(n, begin); loop.init(src);
1272 template<typename T, class A>
1273 void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1274 internal_loop_guide loop(n, dst); loop.copy(src);
1277 #if __TBB_CPP11_RVALUE_REF_PRESENT
1278 template<typename T, class A>
1279 void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1280 internal_loop_guide loop(n, dst); loop.move_construct(src);
1282 template<typename T, class A>
1283 void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1284 internal_loop_guide loop(n, dst); loop.move_assign(src);
1288 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1289 template<typename T, class A>
1290 void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1291 internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1293 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1295 template<typename T, class A>
1296 template<typename I>
1297 void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1298 I & iterator ((*const_cast<I*>(static_cast<const I*>(p_type_erased_iterator))));
1299 internal_loop_guide loop(n, dst); loop.iterate(iterator);
1302 template<typename T, class A>
1303 void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1304 internal_loop_guide loop(n, dst); loop.assign(src);
1307 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1308 // Workaround for overzealous compiler warning
1309 #pragma warning (push)
1310 #pragma warning (disable: 4189)
1312 template<typename T, class A>
1313 void concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {
1314 T* array = static_cast<T*>(begin);
1315 for( size_type j=n; j>0; --j )
1316 array[j-1].~T(); // destructors are supposed to not throw any exceptions
1318 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1319 #pragma warning (pop)
1320 #endif // warning 4189 is back
1322 // concurrent_vector's template functions
1323 template<typename T, class A1, class A2>
1324 inline bool operator==(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b) {
1325 //TODO: call size() only once per vector (in operator==)
1326 // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1327 if(a.size() != b.size()) return false;
1328 typename concurrent_vector<T, A1>::const_iterator i(a.begin());
1329 typename concurrent_vector<T, A2>::const_iterator j(b.begin());
1330 for(; i != a.end(); ++i, ++j)
1331 if( !(*i == *j) ) return false;
1335 template<typename T, class A1, class A2>
1336 inline bool operator!=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1337 { return !(a == b); }
1339 template<typename T, class A1, class A2>
1340 inline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1341 { return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1343 template<typename T, class A1, class A2>
1344 inline bool operator>(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1347 template<typename T, class A1, class A2>
1348 inline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1349 { return !(b < a); }
1351 template<typename T, class A1, class A2>
1352 inline bool operator>=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1353 { return !(a < b); }
1355 template<typename T, class A>
1356 inline void swap(concurrent_vector<T, A> &a, concurrent_vector<T, A> &b)
1361 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1362 #pragma warning (pop)
1363 #endif // warning 4267,4127 are back
1365 #endif /* __TBB_concurrent_vector_H */