2 Copyright 2005-2010 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_concurrent_hash_map_H
30 #define __TBB_concurrent_hash_map_H
32 #include "tbb_stddef.h"
34 #if !TBB_USE_EXCEPTIONS && _MSC_VER
35 // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
36 #pragma warning (push)
37 #pragma warning (disable: 4530)
41 #include <utility> // Need std::pair
42 #include <cstring> // Need std::memset
44 #if !TBB_USE_EXCEPTIONS && _MSC_VER
48 #include "cache_aligned_allocator.h"
49 #include "tbb_allocator.h"
50 #include "spin_rw_mutex.h"
52 #include "aligned_space.h"
53 #include "tbb_exception.h"
54 #include "_concurrent_unordered_internal.h" // Need tbb_hasher
55 #if TBB_USE_PERFORMANCE_WARNINGS
63 //! ITT instrumented routine that loads pointer from location pointed to by src.
64 void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src );
65 //! ITT instrumented routine that stores src into location pointed to by dst.
66 void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src );
67 //! Routine that loads pointer from location pointed to by src without causing ITT to report a race.
68 void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
72 //! hash_compare that is default argument for concurrent_hash_map
73 template<typename Key>
74 struct tbb_hash_compare {
75 static size_t hash( const Key& a ) { return tbb_hasher(a); }
76 static bool equal( const Key& a, const Key& b ) { return a == b; }
79 namespace interface4 {
81 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
82 class concurrent_hash_map;
88 //! Type of a hash code.
89 typedef size_t hashcode_t;
91 struct hash_map_node_base : tbb::internal::no_copy {
93 typedef spin_rw_mutex mutex_t;
94 //! Scoped lock type for mutex
95 typedef mutex_t::scoped_lock scoped_t;
96 //! Next node in chain
97 hash_map_node_base *next;
100 //! Incompleteness flag value
101 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
102 //! Rehashed empty bucket flag
103 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
104 //! base class of concurrent_hash_map
105 class hash_map_base {
108 typedef size_t size_type;
109 //! Type of a hash code.
110 typedef size_t hashcode_t;
111 //! Segment index type
112 typedef size_t segment_index_t;
114 typedef hash_map_node_base node_base;
116 struct bucket : tbb::internal::no_copy {
117 //! Mutex type for buckets
118 typedef spin_rw_mutex mutex_t;
119 //! Scoped lock type for mutex
120 typedef mutex_t::scoped_lock scoped_t;
122 node_base *node_list;
124 //! Count of segments in the first block
125 static size_type const embedded_block = 1;
126 //! Count of segments in the first block
127 static size_type const embedded_buckets = 1<<embedded_block;
128 //! Count of segments in the first block
129 static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
130 //! Size of a pointer / table size
131 static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
133 typedef bucket *segment_ptr_t;
134 //! Segment pointers table type
135 typedef segment_ptr_t segments_table_t[pointers_per_table];
136 //! Hash mask = sum of allocated segments sizes - 1
137 atomic<hashcode_t> my_mask;
138 //! Segment pointers table. Also prevents false sharing between my_mask and my_size
139 segments_table_t my_table;
140 //! Size of container in stored items
141 atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
143 bucket my_embedded_segment[embedded_buckets];
147 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512
148 + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8
149 + embedded_buckets*sizeof(bucket) ); // n*8 or n*16
150 for( size_type i = 0; i < embedded_block; i++ ) // fill the table
151 my_table[i] = my_embedded_segment + segment_base(i);
152 my_mask = embedded_buckets - 1;
153 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
156 //! @return segment index of given index in the array
157 static segment_index_t segment_index_of( size_type index ) {
158 return segment_index_t( __TBB_Log2( index|1 ) );
161 //! @return the first array index of given segment
162 static segment_index_t segment_base( segment_index_t k ) {
163 return (segment_index_t(1)<<k & ~segment_index_t(1));
166 //! @return segment size except for @arg k == 0
167 static size_type segment_size( segment_index_t k ) {
168 return size_type(1)<<k; // fake value for k==0
171 //! @return true if @arg ptr is valid pointer
172 static bool is_valid( void *ptr ) {
173 return reinterpret_cast<size_t>(ptr) > size_t(63);
176 //! Initialize buckets
177 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
178 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
179 else for(size_type i = 0; i < sz; i++, ptr++) {
180 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
181 ptr->node_list = rehash_req;
185 //! Add node @arg n to bucket @arg b
186 static void add_to_bucket( bucket *b, node_base *n ) {
187 __TBB_ASSERT(b->node_list != rehash_req, NULL);
188 n->next = b->node_list;
189 b->node_list = n; // its under lock and flag is set
192 //! Exception safety helper
193 struct enable_segment_failsafe {
194 segment_ptr_t *my_segment_ptr;
195 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
196 ~enable_segment_failsafe() {
197 if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
202 void enable_segment( segment_index_t k, bool is_initial = false ) {
203 __TBB_ASSERT( k, "Zero segment must be embedded" );
204 enable_segment_failsafe watchdog( my_table, k );
205 cache_aligned_allocator<bucket> alloc;
207 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
208 if( k >= first_block ) {
209 sz = segment_size( k );
210 segment_ptr_t ptr = alloc.allocate( sz );
211 init_buckets( ptr, sz, is_initial );
212 #if TBB_USE_THREADING_TOOLS
213 // TODO: actually, fence and notification are unnecessary here and below
214 itt_store_pointer_with_release_v3( my_table + k, ptr );
216 my_table[k] = ptr;// my_mask has release fence
218 sz <<= 1;// double it to get entire capacity of the container
219 } else { // the first block
220 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
221 sz = segment_size( first_block );
222 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
223 init_buckets( ptr, sz - embedded_buckets, is_initial );
224 ptr -= segment_base(embedded_block);
225 for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
226 #if TBB_USE_THREADING_TOOLS
227 itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) );
229 my_table[i] = ptr + segment_base(i);
232 #if TBB_USE_THREADING_TOOLS
233 itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) );
237 watchdog.my_segment_ptr = 0;
240 //! Get bucket by (masked) hashcode
241 bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
242 segment_index_t s = segment_index_of( h );
243 h -= segment_base(s);
244 segment_ptr_t seg = my_table[s];
245 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
249 // internal serial rehashing helper
250 void mark_rehashed_levels( hashcode_t h ) throw () {
251 segment_index_t s = segment_index_of( h );
252 while( segment_ptr_t seg = my_table[++s] )
253 if( seg[h].node_list == rehash_req ) {
254 seg[h].node_list = empty_rehashed;
255 mark_rehashed_levels( h + segment_base(s) );
259 //! Check for mask race
260 // Splitting into two functions should help inlining
261 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
262 hashcode_t m_now, m_old = m;
263 #if TBB_USE_THREADING_TOOLS
264 m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
269 return check_rehashing_collision( h, m_old, m = m_now );
273 //! Process mask race, check for rehashing collision
274 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
275 __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
276 if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
277 // condition above proves that 'h' has some other bits set beside 'm_old'
278 // find next applicable mask after m_old //TODO: look at bsl instruction
279 for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
281 m_old = (m_old<<1) - 1; // get full mask from a bit
282 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
283 // check whether it is rehashing/ed
284 #if TBB_USE_THREADING_TOOLS
285 if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req )
287 if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req )
294 //! Insert a node and check for load factor. @return segment index to enable.
295 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
296 size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
297 add_to_bucket( b, n );
299 if( sz >= mask ) { // TODO: add custom load_factor
300 segment_index_t new_seg = segment_index_of( mask+1 );
301 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
302 #if TBB_USE_THREADING_TOOLS
303 if( !itt_load_pointer_v3(my_table+new_seg)
305 if( !my_table[new_seg]
307 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
308 return new_seg; // The value must be processed
313 //! Prepare enough segments for number of buckets
314 void reserve(size_type buckets) {
315 if( !buckets-- ) return;
316 bool is_initial = !my_size;
317 for( size_type m = my_mask; buckets > m; m = my_mask )
318 enable_segment( segment_index_of( m+1 ), is_initial );
320 //! Swap hash_map_bases
321 void internal_swap(hash_map_base &table) {
322 std::swap(this->my_mask, table.my_mask);
323 std::swap(this->my_size, table.my_size);
324 for(size_type i = 0; i < embedded_buckets; i++)
325 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
326 for(size_type i = embedded_block; i < pointers_per_table; i++)
327 std::swap(this->my_table[i], table.my_table[i]);
331 template<typename Iterator>
332 class hash_map_range;
334 //! Meets requirements of a forward iterator for STL */
335 /** Value is either the T or const T type of the container.
336 @ingroup containers */
337 template<typename Container, typename Value>
338 class hash_map_iterator
339 : public std::iterator<std::forward_iterator_tag,Value>
341 typedef Container map_type;
342 typedef typename Container::node node;
343 typedef hash_map_base::node_base node_base;
344 typedef hash_map_base::bucket bucket;
346 template<typename C, typename T, typename U>
347 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
349 template<typename C, typename T, typename U>
350 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
352 template<typename C, typename T, typename U>
353 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
355 template<typename C, typename U>
356 friend class hash_map_iterator;
359 friend class hash_map_range;
361 void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
362 size_t k = my_index+1;
363 while( my_bucket && k <= my_map->my_mask ) {
364 // Following test uses 2's-complement wizardry
365 if( k& (k-2) ) // not the beginning of a segment
367 else my_bucket = my_map->get_bucket( k );
368 my_node = static_cast<node*>( my_bucket->node_list );
369 if( hash_map_base::is_valid(my_node) ) {
370 my_index = k; return;
374 my_bucket = 0; my_node = 0; my_index = k; // the end
376 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
377 template<typename Key, typename T, typename HashCompare, typename A>
378 friend class interface4::concurrent_hash_map;
380 public: // workaround
382 //! concurrent_hash_map over which we are iterating.
383 const Container *my_map;
385 //! Index in hash table for current item
388 //! Pointer to bucket
389 const bucket *my_bucket;
391 //! Pointer to node that has current item
394 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
397 //! Construct undefined iterator
398 hash_map_iterator() {}
399 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
400 my_map(other.my_map),
401 my_index(other.my_index),
402 my_bucket(other.my_bucket),
403 my_node(other.my_node)
405 Value& operator*() const {
406 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
407 return my_node->item;
409 Value* operator->() const {return &operator*();}
410 hash_map_iterator& operator++();
413 Value* operator++(int) {
414 Value* result = &operator*();
420 template<typename Container, typename Value>
421 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
425 my_node( static_cast<node*>(n) )
427 if( b && !hash_map_base::is_valid(n) )
428 advance_to_next_bucket();
431 template<typename Container, typename Value>
432 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
433 my_node = static_cast<node*>( my_node->next );
434 if( !my_node ) advance_to_next_bucket();
438 template<typename Container, typename T, typename U>
439 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
440 return i.my_node == j.my_node && i.my_map == j.my_map;
443 template<typename Container, typename T, typename U>
444 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
445 return i.my_node != j.my_node || i.my_map != j.my_map;
448 //! Range class used with concurrent_hash_map
449 /** @ingroup containers */
450 template<typename Iterator>
451 class hash_map_range {
452 typedef typename Iterator::map_type map_type;
455 mutable Iterator my_midpoint;
457 //! Set my_midpoint to point approximately half way between my_begin and my_end.
458 void set_midpoint() const;
459 template<typename U> friend class hash_map_range;
461 //! Type for size of a range
462 typedef std::size_t size_type;
463 typedef typename Iterator::value_type value_type;
464 typedef typename Iterator::reference reference;
465 typedef typename Iterator::difference_type difference_type;
466 typedef Iterator iterator;
468 //! True if range is empty.
469 bool empty() const {return my_begin==my_end;}
471 //! True if range can be partitioned into two subranges.
472 bool is_divisible() const {
473 return my_midpoint!=my_end;
476 hash_map_range( hash_map_range& r, split ) :
478 my_grainsize(r.my_grainsize)
480 r.my_end = my_begin = r.my_midpoint;
481 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
482 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
488 hash_map_range( hash_map_range<U>& r) :
489 my_begin(r.my_begin),
491 my_midpoint(r.my_midpoint),
492 my_grainsize(r.my_grainsize)
495 //! Init range with iterators and grainsize specified
496 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
499 my_grainsize(grainsize_)
501 if(!my_end.my_index && !my_end.my_bucket) // end
502 my_end.my_index = my_end.my_map->my_mask + 1;
504 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
507 //! Init range with container and grainsize specified
508 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
509 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
510 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
511 my_grainsize( grainsize_ )
513 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
516 const Iterator& begin() const {return my_begin;}
517 const Iterator& end() const {return my_end;}
518 //! The grain size for this range.
519 size_type grainsize() const {return my_grainsize;}
522 template<typename Iterator>
523 void hash_map_range<Iterator>::set_midpoint() const {
524 // Split by groups of nodes
525 size_t m = my_end.my_index-my_begin.my_index;
526 if( m > my_grainsize ) {
527 m = my_begin.my_index + m/2u;
528 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
529 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
531 my_midpoint = my_end;
533 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
534 "my_begin is after my_midpoint" );
535 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
536 "my_midpoint is after my_end" );
537 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
538 "[my_begin, my_midpoint) range should not be empty" );
544 //! Unordered map from Key to T.
545 /** concurrent_hash_map is associative container with concurrent access.
548 The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).
550 @par Exception Safety
551 - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.
552 - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).
553 - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results.
555 @par Changes since TBB 2.1
556 - Replaced internal algorithm and data structure. Patent is pending.
557 - Added buckets number argument for constructor
559 @par Changes since TBB 2.0
560 - Fixed exception-safety
561 - Added template argument for allocator
562 - Added allocator argument in constructors
563 - Added constructor from a range of iterators
564 - Added several new overloaded insert() methods
565 - Added get_allocator()
568 - Added overloaded erase(accessor &) and erase(const_accessor&)
569 - Added equal_range() [const]
570 - Added [const_]pointer, [const_]reference, and allocator_type types
571 - Added global functions: operator==(), operator!=(), and swap()
573 @ingroup containers */
574 template<typename Key, typename T, typename HashCompare, typename Allocator>
575 class concurrent_hash_map : protected internal::hash_map_base {
576 template<typename Container, typename Value>
577 friend class internal::hash_map_iterator;
580 friend class internal::hash_map_range;
583 typedef Key key_type;
584 typedef T mapped_type;
585 typedef std::pair<const Key,T> value_type;
586 typedef hash_map_base::size_type size_type;
587 typedef ptrdiff_t difference_type;
588 typedef value_type *pointer;
589 typedef const value_type *const_pointer;
590 typedef value_type &reference;
591 typedef const value_type &const_reference;
592 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
593 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
594 typedef internal::hash_map_range<iterator> range_type;
595 typedef internal::hash_map_range<const_iterator> const_range_type;
596 typedef Allocator allocator_type;
599 friend class const_accessor;
601 typedef typename Allocator::template rebind<node>::other node_allocator_type;
602 node_allocator_type my_allocator;
603 HashCompare my_hash_compare;
605 struct node : public node_base {
607 node( const Key &key ) : item(key, T()) {}
608 node( const Key &key, const T &t ) : item(key, t) {}
609 // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17
610 void *operator new( size_t /*size*/, node_allocator_type &a ) {
611 void *ptr = a.allocate(1);
613 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
616 // match placement-new form above to be called if exception thrown in constructor
617 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
620 void delete_node( node_base *n ) {
621 my_allocator.destroy( static_cast<node*>(n) );
622 my_allocator.deallocate( static_cast<node*>(n), 1);
625 node *search_bucket( const key_type &key, bucket *b ) const {
626 node *n = static_cast<node*>( b->node_list );
627 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
628 n = static_cast<node*>( n->next );
629 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
633 //! bucket accessor is to find, rehash, acquire a lock, and access a bucket
634 class bucket_accessor : public bucket::scoped_t {
635 bool my_is_writer; // TODO: use it from base type
638 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
639 //! find a bucket by masked hashcode, optionally rehash, and acquire the lock
640 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
641 my_b = base->get_bucket( h );
642 #if TBB_USE_THREADING_TOOLS
643 // TODO: actually, notification is unnecessary here, just hiding double-check
644 if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req
646 if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req
648 && try_acquire( my_b->mutex, /*write=*/true ) )
650 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
653 else bucket::scoped_t::acquire( my_b->mutex, /*write=*/my_is_writer = writer );
654 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
656 //! check whether bucket is locked for write
657 bool is_writer() { return my_is_writer; }
658 //! get bucket pointer
659 bucket *operator() () { return my_b; }
660 // TODO: optimize out
661 bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); }
664 // TODO refactor to hash_base
665 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
666 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
667 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
668 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed
669 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
671 bucket_accessor b_old( this, h & mask );
673 mask = (mask<<1) | 1; // get full mask for new bucket
674 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
676 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
677 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
679 hashcode_t bmask = h & (mask>>1);
680 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
681 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
683 if( (c & mask) == h ) {
684 if( !b_old.is_writer() )
685 if( !b_old.upgrade_to_writer() ) {
686 goto restart; // node ptr can be invalid due to concurrent erase
688 *p = n->next; // exclude from b_old
689 add_to_bucket( b_new, n );
690 } else p = &n->next; // iterate to next item
697 //! Combines data access, locking, and garbage collection.
698 class const_accessor {
699 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
700 friend class accessor;
701 void operator=( const accessor & ) const; // Deny access
702 const_accessor( const accessor & ); // Deny access
705 typedef const typename concurrent_hash_map::value_type value_type;
707 //! True if result is empty.
708 bool empty() const {return !my_node;}
718 //! Return reference to associated value in hash table.
719 const_reference operator*() const {
720 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
721 return my_node->item;
724 //! Return pointer to associated value in hash table.
725 const_pointer operator->() const {
729 //! Create empty result
730 const_accessor() : my_node(NULL) {}
732 //! Destroy result after releasing the underlying reference.
734 my_node = NULL; // my_lock.release() is called in scoped_lock destructor
738 typename node::scoped_t my_lock;
742 //! Allows write access to elements and combines data access, locking, and garbage collection.
743 class accessor: public const_accessor {
746 typedef typename concurrent_hash_map::value_type value_type;
748 //! Return reference to associated value in hash table.
749 reference operator*() const {
750 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
751 return this->my_node->item;
754 //! Return pointer to associated value in hash table.
755 pointer operator->() const {
760 //! Construct empty table.
761 concurrent_hash_map(const allocator_type &a = allocator_type())
762 : internal::hash_map_base(), my_allocator(a)
765 //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
766 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
773 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
774 : internal::hash_map_base(), my_allocator(a)
776 internal_copy(table);
779 //! Construction with copying iteration range and given allocator instance
781 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
784 reserve( std::distance(first, last) ); // TODO: load_factor?
785 internal_copy(first, last);
789 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
792 internal_copy(table);
798 //! Rehashes and optionally resizes the whole table.
799 /** Useful to optimize performance before or after concurrent operations.
800 Also enables using of find() and count() concurrent methods in serial context. */
801 void rehash(size_type n = 0);
806 //! Clear table and destroy it.
807 ~concurrent_hash_map() { clear(); }
809 //------------------------------------------------------------------------
810 // Parallel algorithm support
811 //------------------------------------------------------------------------
812 range_type range( size_type grainsize=1 ) {
813 return range_type( *this, grainsize );
815 const_range_type range( size_type grainsize=1 ) const {
816 return const_range_type( *this, grainsize );
819 //------------------------------------------------------------------------
820 // STL support - not thread-safe methods
821 //------------------------------------------------------------------------
822 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
823 iterator end() {return iterator(*this,0,0,0);}
824 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
825 const_iterator end() const {return const_iterator(*this,0,0,0);}
826 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
827 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
829 //! Number of items in table.
830 size_type size() const { return my_size; }
832 //! True if size()==0.
833 bool empty() const { return my_size == 0; }
835 //! Upper bound on size.
836 size_type max_size() const {return (~size_type(0))/sizeof(node);}
838 //! Returns the current number of buckets
839 size_type bucket_count() const { return my_mask+1; }
841 //! return allocator object
842 allocator_type get_allocator() const { return this->my_allocator; }
844 //! swap two instances. Iterators are invalidated
845 void swap(concurrent_hash_map &table);
847 //------------------------------------------------------------------------
848 // concurrent map operations
849 //------------------------------------------------------------------------
851 //! Return count of items (0 or 1)
852 size_type count( const Key &key ) const {
853 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false );
856 //! Find item and acquire a read lock on the item.
857 /** Return true if item is found, false otherwise. */
858 bool find( const_accessor &result, const Key &key ) const {
860 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false );
863 //! Find item and acquire a write lock on the item.
864 /** Return true if item is found, false otherwise. */
865 bool find( accessor &result, const Key &key ) {
867 return lookup(/*insert*/false, key, NULL, &result, /*write=*/true );
870 //! Insert item (if not already present) and acquire a read lock on the item.
871 /** Returns true if item is new. */
872 bool insert( const_accessor &result, const Key &key ) {
874 return lookup(/*insert*/true, key, NULL, &result, /*write=*/false );
877 //! Insert item (if not already present) and acquire a write lock on the item.
878 /** Returns true if item is new. */
879 bool insert( accessor &result, const Key &key ) {
881 return lookup(/*insert*/true, key, NULL, &result, /*write=*/true );
884 //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
885 /** Returns true if item is new. */
886 bool insert( const_accessor &result, const value_type &value ) {
888 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false );
891 //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
892 /** Returns true if item is new. */
893 bool insert( accessor &result, const value_type &value ) {
895 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true );
898 //! Insert item by copying if there is no such key present already
899 /** Returns true if item is inserted. */
900 bool insert( const value_type &value ) {
901 return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false );
904 //! Insert range [first, last)
906 void insert(I first, I last) {
907 for(; first != last; ++first)
912 /** Return true if item was erased by particularly this call. */
913 bool erase( const Key& key );
915 //! Erase item by const_accessor.
916 /** Return true if item was erased by particularly this call. */
917 bool erase( const_accessor& item_accessor ) {
918 return exclude( item_accessor, /*readonly=*/ true );
921 //! Erase item by accessor.
922 /** Return true if item was erased by particularly this call. */
923 bool erase( accessor& item_accessor ) {
924 return exclude( item_accessor, /*readonly=*/ false );
928 //! Insert or find item and optionally acquire a lock on the item.
929 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
931 //! delete item by accessor
932 bool exclude( const_accessor &item_accessor, bool readonly );
934 //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
936 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
938 //! Copy "source" to *this, where *this must start out empty.
939 void internal_copy( const concurrent_hash_map& source );
942 void internal_copy(I first, I last);
944 //! Fast find when no concurrent erasure is used. For internal use inside TBB only!
945 /** Return pointer to item with given key, or NULL if no such item exists.
946 Must not be called concurrently with erasure operations. */
947 const_pointer internal_fast_find( const Key& key ) const {
948 hashcode_t h = my_hash_compare.hash( key );
949 #if TBB_USE_THREADING_TOOLS
950 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
952 hashcode_t m = my_mask;
956 __TBB_ASSERT((m&(m+1))==0, NULL);
957 bucket *b = get_bucket( h & m );
958 #if TBB_USE_THREADING_TOOLS
959 // TODO: actually, notification is unnecessary here, just hiding double-check
960 if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req )
962 if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req )
965 bucket::scoped_t lock;
966 if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
967 if( b->node_list == internal::rehash_req)
968 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
970 else lock.acquire( b->mutex, /*write=*/false );
971 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
973 n = search_bucket( key, b );
976 else if( check_mask_race( h, m ) )
982 #if _MSC_VER && !defined(__INTEL_COMPILER)
983 // Suppress "conditional expression is constant" warning.
984 #pragma warning( push )
985 #pragma warning( disable: 4127 )
988 template<typename Key, typename T, typename HashCompare, typename A>
989 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
990 __TBB_ASSERT( !result || !result->my_node, NULL );
991 segment_index_t grow_segment;
994 hashcode_t const h = my_hash_compare.hash( key );
995 #if TBB_USE_THREADING_TOOLS
996 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
998 hashcode_t m = my_mask;
1002 __TBB_ASSERT((m&(m+1))==0, NULL);
1003 return_value = false;
1005 bucket_accessor b( this, h & m );
1008 n = search_bucket( key, b() );
1010 // [opt] insert a key
1013 if(t) tmp_n = new( my_allocator ) node(key, *t);
1014 else tmp_n = new( my_allocator ) node(key);
1016 if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1017 // Rerun search_list, in case another thread inserted the item during the upgrade.
1018 n = search_bucket( key, b() );
1019 if( is_valid(n) ) { // unfortunately, it did
1020 b.downgrade_to_reader();
1024 if( check_mask_race(h, m) )
1025 goto restart; // b.release() is done in ~b().
1026 // insert and set flag to grow the container
1027 grow_segment = insert_new_node( b(), n = tmp_n, m );
1029 return_value = true;
1031 exists: grow_segment = 0;
1033 } else { // find or count
1035 if( check_mask_race( h, m ) )
1036 goto restart; // b.release() is done in ~b(). TODO: replace by continue
1039 return_value = true;
1042 if( !result ) goto check_growth;
1043 // TODO: the following seems as generic/regular operation
1045 if( !result->my_lock.try_acquire( n->mutex, write ) ) {
1046 // we are unlucky, prepare for longer wait
1047 tbb::internal::atomic_backoff trials;
1049 if( !trials.bounded_pause() ) {
1050 // the wait takes really long, restart the operation
1052 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1054 #if TBB_USE_THREADING_TOOLS
1055 m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
1061 } while( !result->my_lock.try_acquire( n->mutex, write ) );
1064 result->my_node = n;
1065 result->my_hash = h;
1067 // [opt] grow the container
1069 enable_segment( grow_segment );
1070 if( tmp_n ) // if op_insert only
1071 delete_node( tmp_n );
1072 return return_value;
1075 template<typename Key, typename T, typename HashCompare, typename A>
1076 template<typename I>
1077 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1078 hashcode_t h = my_hash_compare.hash( key );
1079 hashcode_t m = my_mask;
1080 __TBB_ASSERT((m&(m+1))==0, NULL);
1082 bucket *b = get_bucket( h );
1083 while( b->node_list == internal::rehash_req ) {
1084 m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1085 b = get_bucket( h &= m );
1087 node *n = search_bucket( key, b );
1089 return std::make_pair(end_, end_);
1090 iterator lower(*this, h, b, n), upper(lower);
1091 return std::make_pair(lower, ++upper);
1094 template<typename Key, typename T, typename HashCompare, typename A>
1095 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
1096 __TBB_ASSERT( item_accessor.my_node, NULL );
1097 node_base *const n = item_accessor.my_node;
1098 item_accessor.my_node = NULL; // we ought release accessor anyway
1099 hashcode_t const h = item_accessor.my_hash;
1100 #if TBB_USE_THREADING_TOOLS
1101 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
1103 hashcode_t m = my_mask;
1107 bucket_accessor b( this, h & m, /*writer=*/true );
1108 node_base **p = &b()->node_list;
1109 while( *p && *p != n )
1111 if( !*p ) { // someone else was the first
1112 if( check_mask_race( h, m ) )
1114 item_accessor.my_lock.release();
1117 __TBB_ASSERT( *p == n, NULL );
1118 *p = n->next; // remove from container
1122 if( readonly ) // need to get exclusive lock
1123 item_accessor.my_lock.upgrade_to_writer(); // return value means nothing here
1124 item_accessor.my_lock.release();
1125 delete_node( n ); // Only one thread can delete it due to write lock on the chain_mutex
1129 template<typename Key, typename T, typename HashCompare, typename A>
1130 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
1132 hashcode_t const h = my_hash_compare.hash( key );
1133 #if TBB_USE_THREADING_TOOLS
1134 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
1136 hashcode_t m = my_mask;
1141 bucket_accessor b( this, h & m );
1143 node_base **p = &b()->node_list;
1145 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
1149 if( !n ) { // not found, but mask could be changed
1150 if( check_mask_race( h, m ) )
1154 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1155 if( check_mask_race( h, m ) ) // contended upgrade, check mask
1163 typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1165 // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1166 delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1170 template<typename Key, typename T, typename HashCompare, typename A>
1171 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
1172 std::swap(this->my_allocator, table.my_allocator);
1173 std::swap(this->my_hash_compare, table.my_hash_compare);
1174 internal_swap(table);
1177 template<typename Key, typename T, typename HashCompare, typename A>
1178 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
1179 reserve( sz ); // TODO: add reduction of number of buckets as well
1180 hashcode_t mask = my_mask;
1181 hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1182 __TBB_ASSERT((b&(b-1))==0, NULL);
1183 bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1184 for(; b <= mask; b++, bp++ ) {
1185 node_base *n = bp->node_list;
1186 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1187 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1188 if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1189 hashcode_t h = b; bucket *b_old = bp;
1191 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1192 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1193 b_old = get_bucket( h &= m );
1194 } while( b_old->node_list == internal::rehash_req );
1195 // now h - is index of the root rehashed bucket b_old
1196 mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1197 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1198 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
1199 if( (c & mask) != h ) { // should be rehashed
1200 *p = q->next; // exclude from b_old
1201 bucket *b_new = get_bucket( c & mask );
1202 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1203 add_to_bucket( b_new, q );
1204 } else p = &q->next; // iterate to next item
1208 #if TBB_USE_PERFORMANCE_WARNINGS
1209 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1210 static bool reported = false;
1212 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1213 for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1214 if( b & (b-2) ) ++bp; // not the beginning of a segment
1215 else bp = get_bucket( b );
1216 node_base *n = bp->node_list;
1217 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1218 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1219 #if TBB_USE_PERFORMANCE_WARNINGS
1220 if( n == internal::empty_rehashed ) empty_buckets++;
1221 else if( n->next ) overpopulated_buckets++;
1224 for( ; is_valid(n); n = n->next ) {
1225 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
1226 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1230 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1231 #if TBB_USE_PERFORMANCE_WARNINGS
1232 if( buckets > current_size) empty_buckets -= buckets - current_size;
1233 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1234 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1235 tbb::internal::runtime_warning(
1236 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1237 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
1243 template<typename Key, typename T, typename HashCompare, typename A>
1244 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
1245 hashcode_t m = my_mask;
1246 __TBB_ASSERT((m&(m+1))==0, NULL);
1247 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1248 #if TBB_USE_PERFORMANCE_WARNINGS
1249 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1250 static bool reported = false;
1253 // check consistency
1254 for( segment_index_t b = 0; b <= m; b++ ) {
1255 if( b & (b-2) ) ++bp; // not the beginning of a segment
1256 else bp = get_bucket( b );
1257 node_base *n = bp->node_list;
1258 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1259 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1260 #if TBB_USE_PERFORMANCE_WARNINGS
1261 if( n == internal::empty_rehashed ) empty_buckets++;
1262 else if( n == internal::rehash_req ) buckets--;
1263 else if( n->next ) overpopulated_buckets++;
1265 #if __TBB_EXTRA_DEBUG
1266 for(; is_valid(n); n = n->next ) {
1267 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
1269 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1273 #if TBB_USE_PERFORMANCE_WARNINGS
1274 if( buckets > current_size) empty_buckets -= buckets - current_size;
1275 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1276 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1277 tbb::internal::runtime_warning(
1278 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1279 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
1283 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1285 segment_index_t s = segment_index_of( m );
1286 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1287 cache_aligned_allocator<bucket> alloc;
1289 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1290 segment_ptr_t buckets_ptr = my_table[s];
1291 size_type sz = segment_size( s ? s : 1 );
1292 for( segment_index_t i = 0; i < sz; i++ )
1293 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1294 buckets_ptr[i].node_list = n->next;
1297 if( s >= first_block) // the first segment or the next
1298 alloc.deallocate( buckets_ptr, sz );
1299 else if( s == embedded_block && embedded_block != first_block )
1300 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
1301 if( s >= embedded_block ) my_table[s] = 0;
1303 my_mask = embedded_buckets - 1;
1306 template<typename Key, typename T, typename HashCompare, typename A>
1307 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
1308 reserve( source.my_size ); // TODO: load_factor?
1309 hashcode_t mask = source.my_mask;
1310 if( my_mask == mask ) { // optimized version
1311 bucket *dst = 0, *src = 0;
1312 bool rehash_required = false;
1313 for( hashcode_t k = 0; k <= mask; k++ ) {
1314 if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1315 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1316 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1317 node *n = static_cast<node*>( src->node_list );
1318 if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1319 rehash_required = true;
1320 dst->node_list = internal::rehash_req;
1321 } else for(; n; n = static_cast<node*>( n->next ) ) {
1322 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
1323 ++my_size; // TODO: replace by non-atomic op
1326 if( rehash_required ) rehash();
1327 } else internal_copy( source.begin(), source.end() );
1330 template<typename Key, typename T, typename HashCompare, typename A>
1331 template<typename I>
1332 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
1333 hashcode_t m = my_mask;
1334 for(; first != last; ++first) {
1335 hashcode_t h = my_hash_compare.hash( first->first );
1336 bucket *b = get_bucket( h & m );
1337 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1338 node *n = new( my_allocator ) node(first->first, first->second);
1339 add_to_bucket( b, n );
1340 ++my_size; // TODO: replace by non-atomic op
1344 } // namespace interface4
1346 using interface4::concurrent_hash_map;
1349 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1350 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
1351 if(a.size() != b.size()) return false;
1352 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1353 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1354 for(; i != i_end; ++i) {
1355 j = b.equal_range(i->first).first;
1356 if( j == j_end || !(i->second == j->second) ) return false;
1361 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1362 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
1363 { return !(a == b); }
1365 template<typename Key, typename T, typename HashCompare, typename A>
1366 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
1369 #if _MSC_VER && !defined(__INTEL_COMPILER)
1370 #pragma warning( pop )
1371 #endif // warning 4127 is back
1375 #endif /* __TBB_concurrent_hash_map_H */