2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 #ifndef __TBB_concurrent_hash_map_H
30 #define __TBB_concurrent_hash_map_H
32 #include "tbb_stddef.h"
34 #if !TBB_USE_EXCEPTIONS && _MSC_VER
35 // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
36 #pragma warning (push)
37 #pragma warning (disable: 4530)
41 #include <utility> // Need std::pair
42 #include <cstring> // Need std::memset
44 #if !TBB_USE_EXCEPTIONS && _MSC_VER
48 #include "cache_aligned_allocator.h"
49 #include "tbb_allocator.h"
50 #include "spin_rw_mutex.h"
52 #include "aligned_space.h"
53 #include "tbb_exception.h"
54 #include "tbb_profiling.h"
55 #include "_concurrent_unordered_internal.h" // Need tbb_hasher
56 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
65 //! hash_compare that is default argument for concurrent_hash_map
66 template<typename Key>
67 struct tbb_hash_compare {
68 static size_t hash( const Key& a ) { return tbb_hasher(a); }
69 static bool equal( const Key& a, const Key& b ) { return a == b; }
72 namespace interface5 {
74 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
75 class concurrent_hash_map;
81 //! Type of a hash code.
82 typedef size_t hashcode_t;
84 struct hash_map_node_base : tbb::internal::no_copy {
86 typedef spin_rw_mutex mutex_t;
87 //! Scoped lock type for mutex
88 typedef mutex_t::scoped_lock scoped_t;
89 //! Next node in chain
90 hash_map_node_base *next;
93 //! Incompleteness flag value
94 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
95 //! Rehashed empty bucket flag
96 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
97 //! base class of concurrent_hash_map
101 typedef size_t size_type;
102 //! Type of a hash code.
103 typedef size_t hashcode_t;
104 //! Segment index type
105 typedef size_t segment_index_t;
107 typedef hash_map_node_base node_base;
109 struct bucket : tbb::internal::no_copy {
110 //! Mutex type for buckets
111 typedef spin_rw_mutex mutex_t;
112 //! Scoped lock type for mutex
113 typedef mutex_t::scoped_lock scoped_t;
115 node_base *node_list;
117 //! Count of segments in the first block
118 static size_type const embedded_block = 1;
119 //! Count of segments in the first block
120 static size_type const embedded_buckets = 1<<embedded_block;
121 //! Count of segments in the first block
122 static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
123 //! Size of a pointer / table size
124 static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
126 typedef bucket *segment_ptr_t;
127 //! Segment pointers table type
128 typedef segment_ptr_t segments_table_t[pointers_per_table];
129 //! Hash mask = sum of allocated segment sizes - 1
130 atomic<hashcode_t> my_mask;
131 //! Segment pointers table. Also prevents false sharing between my_mask and my_size
132 segments_table_t my_table;
133 //! Size of container in stored items
134 atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
136 bucket my_embedded_segment[embedded_buckets];
138 atomic<unsigned> my_info_resizes; // concurrent ones
139 mutable atomic<unsigned> my_info_restarts; // race collisions
140 atomic<unsigned> my_info_rehashes; // invocations of rehash_bucket
144 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512
145 + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8
146 + embedded_buckets*sizeof(bucket) ); // n*8 or n*16
147 for( size_type i = 0; i < embedded_block; i++ ) // fill the table
148 my_table[i] = my_embedded_segment + segment_base(i);
149 my_mask = embedded_buckets - 1;
150 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
152 my_info_resizes = 0; // concurrent ones
153 my_info_restarts = 0; // race collisions
154 my_info_rehashes = 0; // invocations of rehash_bucket
158 //! @return segment index of given index in the array
159 static segment_index_t segment_index_of( size_type index ) {
160 return segment_index_t( __TBB_Log2( index|1 ) );
163 //! @return the first array index of given segment
164 static segment_index_t segment_base( segment_index_t k ) {
165 return (segment_index_t(1)<<k & ~segment_index_t(1));
168 //! @return segment size except for @arg k == 0
169 static size_type segment_size( segment_index_t k ) {
170 return size_type(1)<<k; // fake value for k==0
173 //! @return true if @arg ptr is valid pointer
174 static bool is_valid( void *ptr ) {
175 return reinterpret_cast<size_t>(ptr) > size_t(63);
178 //! Initialize buckets
179 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
180 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
181 else for(size_type i = 0; i < sz; i++, ptr++) {
182 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
183 ptr->node_list = rehash_req;
187 //! Add node @arg n to bucket @arg b
188 static void add_to_bucket( bucket *b, node_base *n ) {
189 __TBB_ASSERT(b->node_list != rehash_req, NULL);
190 n->next = b->node_list;
191 b->node_list = n; // its under lock and flag is set
194 //! Exception safety helper
195 struct enable_segment_failsafe {
196 segment_ptr_t *my_segment_ptr;
197 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
198 ~enable_segment_failsafe() {
199 if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
204 void enable_segment( segment_index_t k, bool is_initial = false ) {
205 __TBB_ASSERT( k, "Zero segment must be embedded" );
206 enable_segment_failsafe watchdog( my_table, k );
207 cache_aligned_allocator<bucket> alloc;
209 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
210 if( k >= first_block ) {
211 sz = segment_size( k );
212 segment_ptr_t ptr = alloc.allocate( sz );
213 init_buckets( ptr, sz, is_initial );
214 itt_hide_store_word( my_table[k], ptr );
215 sz <<= 1;// double it to get entire capacity of the container
216 } else { // the first block
217 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
218 sz = segment_size( first_block );
219 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
220 init_buckets( ptr, sz - embedded_buckets, is_initial );
221 ptr -= segment_base(embedded_block);
222 for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
223 itt_hide_store_word( my_table[i], ptr + segment_base(i) );
225 itt_store_word_with_release( my_mask, sz-1 );
226 watchdog.my_segment_ptr = 0;
229 //! Get bucket by (masked) hashcode
230 bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
231 segment_index_t s = segment_index_of( h );
232 h -= segment_base(s);
233 segment_ptr_t seg = my_table[s];
234 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
238 // internal serial rehashing helper
239 void mark_rehashed_levels( hashcode_t h ) throw () {
240 segment_index_t s = segment_index_of( h );
241 while( segment_ptr_t seg = my_table[++s] )
242 if( seg[h].node_list == rehash_req ) {
243 seg[h].node_list = empty_rehashed;
244 mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
248 //! Check for mask race
249 // Splitting into two functions should help inlining
250 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
251 hashcode_t m_now, m_old = m;
252 m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
254 return check_rehashing_collision( h, m_old, m = m_now );
258 //! Process mask race, check for rehashing collision
259 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
260 __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
261 if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
262 // condition above proves that 'h' has some other bits set beside 'm_old'
263 // find next applicable mask after m_old //TODO: look at bsl instruction
264 for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
266 m_old = (m_old<<1) - 1; // get full mask from a bit
267 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
268 // check whether it is rehashing/ed
269 if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
272 my_info_restarts++; // race collisions
280 //! Insert a node and check for load factor. @return segment index to enable.
281 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
282 size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
283 add_to_bucket( b, n );
285 if( sz >= mask ) { // TODO: add custom load_factor
286 segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
287 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
288 if( !itt_hide_load_word(my_table[new_seg])
289 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
290 return new_seg; // The value must be processed
295 //! Prepare enough segments for number of buckets
296 void reserve(size_type buckets) {
297 if( !buckets-- ) return;
298 bool is_initial = !my_size;
299 for( size_type m = my_mask; buckets > m; m = my_mask )
300 enable_segment( segment_index_of( m+1 ), is_initial );
302 //! Swap hash_map_bases
303 void internal_swap(hash_map_base &table) {
304 std::swap(this->my_mask, table.my_mask);
305 std::swap(this->my_size, table.my_size);
306 for(size_type i = 0; i < embedded_buckets; i++)
307 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
308 for(size_type i = embedded_block; i < pointers_per_table; i++)
309 std::swap(this->my_table[i], table.my_table[i]);
313 template<typename Iterator>
314 class hash_map_range;
316 //! Meets requirements of a forward iterator for STL */
317 /** Value is either the T or const T type of the container.
318 @ingroup containers */
319 template<typename Container, typename Value>
320 class hash_map_iterator
321 : public std::iterator<std::forward_iterator_tag,Value>
323 typedef Container map_type;
324 typedef typename Container::node node;
325 typedef hash_map_base::node_base node_base;
326 typedef hash_map_base::bucket bucket;
328 template<typename C, typename T, typename U>
329 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
331 template<typename C, typename T, typename U>
332 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
334 template<typename C, typename T, typename U>
335 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
337 template<typename C, typename U>
338 friend class hash_map_iterator;
341 friend class hash_map_range;
343 void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
344 size_t k = my_index+1;
345 while( my_bucket && k <= my_map->my_mask ) {
346 // Following test uses 2's-complement wizardry
347 if( k& (k-2) ) // not the beginning of a segment
349 else my_bucket = my_map->get_bucket( k );
350 my_node = static_cast<node*>( my_bucket->node_list );
351 if( hash_map_base::is_valid(my_node) ) {
352 my_index = k; return;
356 my_bucket = 0; my_node = 0; my_index = k; // the end
358 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
359 template<typename Key, typename T, typename HashCompare, typename A>
360 friend class interface5::concurrent_hash_map;
362 public: // workaround
364 //! concurrent_hash_map over which we are iterating.
365 const Container *my_map;
367 //! Index in hash table for current item
370 //! Pointer to bucket
371 const bucket *my_bucket;
373 //! Pointer to node that has current item
376 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
379 //! Construct undefined iterator
380 hash_map_iterator() {}
381 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
382 my_map(other.my_map),
383 my_index(other.my_index),
384 my_bucket(other.my_bucket),
385 my_node(other.my_node)
387 Value& operator*() const {
388 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
389 return my_node->item;
391 Value* operator->() const {return &operator*();}
392 hash_map_iterator& operator++();
395 hash_map_iterator operator++(int) {
396 hash_map_iterator old(*this);
402 template<typename Container, typename Value>
403 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
407 my_node( static_cast<node*>(n) )
409 if( b && !hash_map_base::is_valid(n) )
410 advance_to_next_bucket();
413 template<typename Container, typename Value>
414 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
415 my_node = static_cast<node*>( my_node->next );
416 if( !my_node ) advance_to_next_bucket();
420 template<typename Container, typename T, typename U>
421 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
422 return i.my_node == j.my_node && i.my_map == j.my_map;
425 template<typename Container, typename T, typename U>
426 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
427 return i.my_node != j.my_node || i.my_map != j.my_map;
430 //! Range class used with concurrent_hash_map
431 /** @ingroup containers */
432 template<typename Iterator>
433 class hash_map_range {
434 typedef typename Iterator::map_type map_type;
437 mutable Iterator my_midpoint;
439 //! Set my_midpoint to point approximately half way between my_begin and my_end.
440 void set_midpoint() const;
441 template<typename U> friend class hash_map_range;
443 //! Type for size of a range
444 typedef std::size_t size_type;
445 typedef typename Iterator::value_type value_type;
446 typedef typename Iterator::reference reference;
447 typedef typename Iterator::difference_type difference_type;
448 typedef Iterator iterator;
450 //! True if range is empty.
451 bool empty() const {return my_begin==my_end;}
453 //! True if range can be partitioned into two subranges.
454 bool is_divisible() const {
455 return my_midpoint!=my_end;
458 hash_map_range( hash_map_range& r, split ) :
460 my_grainsize(r.my_grainsize)
462 r.my_end = my_begin = r.my_midpoint;
463 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
464 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
470 hash_map_range( hash_map_range<U>& r) :
471 my_begin(r.my_begin),
473 my_midpoint(r.my_midpoint),
474 my_grainsize(r.my_grainsize)
477 //! Init range with iterators and grainsize specified
478 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
481 my_grainsize(grainsize_)
483 if(!my_end.my_index && !my_end.my_bucket) // end
484 my_end.my_index = my_end.my_map->my_mask + 1;
486 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
489 //! Init range with container and grainsize specified
490 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
491 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
492 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
493 my_grainsize( grainsize_ )
495 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
498 const Iterator& begin() const {return my_begin;}
499 const Iterator& end() const {return my_end;}
500 //! The grain size for this range.
501 size_type grainsize() const {return my_grainsize;}
504 template<typename Iterator>
505 void hash_map_range<Iterator>::set_midpoint() const {
506 // Split by groups of nodes
507 size_t m = my_end.my_index-my_begin.my_index;
508 if( m > my_grainsize ) {
509 m = my_begin.my_index + m/2u;
510 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
511 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
513 my_midpoint = my_end;
515 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
516 "my_begin is after my_midpoint" );
517 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
518 "my_midpoint is after my_end" );
519 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
520 "[my_begin, my_midpoint) range should not be empty" );
526 //! Unordered map from Key to T.
527 /** concurrent_hash_map is associative container with concurrent access.
530 The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).
532 @par Exception Safety
533 - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.
534 - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).
535 - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results.
537 @par Changes since TBB 2.1
538 - Replaced internal algorithm and data structure. Patent is pending.
539 - Added buckets number argument for constructor
541 @par Changes since TBB 2.0
542 - Fixed exception-safety
543 - Added template argument for allocator
544 - Added allocator argument in constructors
545 - Added constructor from a range of iterators
546 - Added several new overloaded insert() methods
547 - Added get_allocator()
550 - Added overloaded erase(accessor &) and erase(const_accessor&)
551 - Added equal_range() [const]
552 - Added [const_]pointer, [const_]reference, and allocator_type types
553 - Added global functions: operator==(), operator!=(), and swap()
555 @ingroup containers */
556 template<typename Key, typename T, typename HashCompare, typename Allocator>
557 class concurrent_hash_map : protected internal::hash_map_base {
558 template<typename Container, typename Value>
559 friend class internal::hash_map_iterator;
562 friend class internal::hash_map_range;
565 typedef Key key_type;
566 typedef T mapped_type;
567 typedef std::pair<const Key,T> value_type;
568 typedef hash_map_base::size_type size_type;
569 typedef ptrdiff_t difference_type;
570 typedef value_type *pointer;
571 typedef const value_type *const_pointer;
572 typedef value_type &reference;
573 typedef const value_type &const_reference;
574 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
575 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
576 typedef internal::hash_map_range<iterator> range_type;
577 typedef internal::hash_map_range<const_iterator> const_range_type;
578 typedef Allocator allocator_type;
581 friend class const_accessor;
583 typedef typename Allocator::template rebind<node>::other node_allocator_type;
584 node_allocator_type my_allocator;
585 HashCompare my_hash_compare;
587 struct node : public node_base {
589 node( const Key &key ) : item(key, T()) {}
590 node( const Key &key, const T &t ) : item(key, t) {}
591 // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17
592 void *operator new( size_t /*size*/, node_allocator_type &a ) {
593 void *ptr = a.allocate(1);
595 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
598 // match placement-new form above to be called if exception thrown in constructor
599 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
602 void delete_node( node_base *n ) {
603 my_allocator.destroy( static_cast<node*>(n) );
604 my_allocator.deallocate( static_cast<node*>(n), 1);
607 node *search_bucket( const key_type &key, bucket *b ) const {
608 node *n = static_cast<node*>( b->node_list );
609 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
610 n = static_cast<node*>( n->next );
611 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
615 //! bucket accessor is to find, rehash, acquire a lock, and access a bucket
616 class bucket_accessor : public bucket::scoped_t {
619 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
620 //! find a bucket by masked hashcode, optionally rehash, and acquire the lock
621 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
622 my_b = base->get_bucket( h );
623 // TODO: actually, notification is unnecessary here, just hiding double-check
624 if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req
625 && try_acquire( my_b->mutex, /*write=*/true ) )
627 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
629 else bucket::scoped_t::acquire( my_b->mutex, writer );
630 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
632 //! check whether bucket is locked for write
633 bool is_writer() { return bucket::scoped_t::is_writer; }
634 //! get bucket pointer
635 bucket *operator() () { return my_b; }
638 // TODO refactor to hash_base
639 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
640 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
641 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
642 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed
643 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
645 my_info_rehashes++; // invocations of rehash_bucket
648 bucket_accessor b_old( this, h & mask );
650 mask = (mask<<1) | 1; // get full mask for new bucket
651 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
653 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
654 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
656 hashcode_t bmask = h & (mask>>1);
657 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
658 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
660 if( (c & mask) == h ) {
661 if( !b_old.is_writer() )
662 if( !b_old.upgrade_to_writer() ) {
663 goto restart; // node ptr can be invalid due to concurrent erase
665 *p = n->next; // exclude from b_old
666 add_to_bucket( b_new, n );
667 } else p = &n->next; // iterate to next item
674 //! Combines data access, locking, and garbage collection.
675 class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
676 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
677 friend class accessor;
680 typedef const typename concurrent_hash_map::value_type value_type;
682 //! True if result is empty.
683 bool empty() const {return !my_node;}
688 node::scoped_t::release();
693 //! Return reference to associated value in hash table.
694 const_reference operator*() const {
695 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
696 return my_node->item;
699 //! Return pointer to associated value in hash table.
700 const_pointer operator->() const {
704 //! Create empty result
705 const_accessor() : my_node(NULL) {}
707 //! Destroy result after releasing the underlying reference.
709 my_node = NULL; // scoped lock's release() is called in its destructor
712 bool is_writer() { return node::scoped_t::is_writer; }
717 //! Allows write access to elements and combines data access, locking, and garbage collection.
718 class accessor: public const_accessor {
721 typedef typename concurrent_hash_map::value_type value_type;
723 //! Return reference to associated value in hash table.
724 reference operator*() const {
725 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
726 return this->my_node->item;
729 //! Return pointer to associated value in hash table.
730 pointer operator->() const {
735 //! Construct empty table.
736 concurrent_hash_map(const allocator_type &a = allocator_type())
737 : internal::hash_map_base(), my_allocator(a)
740 //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
741 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
748 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
749 : internal::hash_map_base(), my_allocator(a)
751 internal_copy(table);
754 //! Construction with copying iteration range and given allocator instance
756 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
759 reserve( std::distance(first, last) ); // TODO: load_factor?
760 internal_copy(first, last);
764 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
767 internal_copy(table);
773 //! Rehashes and optionally resizes the whole table.
774 /** Useful to optimize performance before or after concurrent operations.
775 Also enables using of find() and count() concurrent methods in serial context. */
776 void rehash(size_type n = 0);
781 //! Clear table and destroy it.
782 ~concurrent_hash_map() { clear(); }
784 //------------------------------------------------------------------------
785 // Parallel algorithm support
786 //------------------------------------------------------------------------
787 range_type range( size_type grainsize=1 ) {
788 return range_type( *this, grainsize );
790 const_range_type range( size_type grainsize=1 ) const {
791 return const_range_type( *this, grainsize );
794 //------------------------------------------------------------------------
795 // STL support - not thread-safe methods
796 //------------------------------------------------------------------------
797 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
798 iterator end() {return iterator(*this,0,0,0);}
799 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
800 const_iterator end() const {return const_iterator(*this,0,0,0);}
801 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
802 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
804 //! Number of items in table.
805 size_type size() const { return my_size; }
807 //! True if size()==0.
808 bool empty() const { return my_size == 0; }
810 //! Upper bound on size.
811 size_type max_size() const {return (~size_type(0))/sizeof(node);}
813 //! Returns the current number of buckets
814 size_type bucket_count() const { return my_mask+1; }
816 //! return allocator object
817 allocator_type get_allocator() const { return this->my_allocator; }
819 //! swap two instances. Iterators are invalidated
820 void swap(concurrent_hash_map &table);
822 //------------------------------------------------------------------------
823 // concurrent map operations
824 //------------------------------------------------------------------------
826 //! Return count of items (0 or 1)
827 size_type count( const Key &key ) const {
828 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false );
831 //! Find item and acquire a read lock on the item.
832 /** Return true if item is found, false otherwise. */
833 bool find( const_accessor &result, const Key &key ) const {
835 return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false );
838 //! Find item and acquire a write lock on the item.
839 /** Return true if item is found, false otherwise. */
840 bool find( accessor &result, const Key &key ) {
842 return lookup(/*insert*/false, key, NULL, &result, /*write=*/true );
845 //! Insert item (if not already present) and acquire a read lock on the item.
846 /** Returns true if item is new. */
847 bool insert( const_accessor &result, const Key &key ) {
849 return lookup(/*insert*/true, key, NULL, &result, /*write=*/false );
852 //! Insert item (if not already present) and acquire a write lock on the item.
853 /** Returns true if item is new. */
854 bool insert( accessor &result, const Key &key ) {
856 return lookup(/*insert*/true, key, NULL, &result, /*write=*/true );
859 //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
860 /** Returns true if item is new. */
861 bool insert( const_accessor &result, const value_type &value ) {
863 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false );
866 //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
867 /** Returns true if item is new. */
868 bool insert( accessor &result, const value_type &value ) {
870 return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true );
873 //! Insert item by copying if there is no such key present already
874 /** Returns true if item is inserted. */
875 bool insert( const value_type &value ) {
876 return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false );
879 //! Insert range [first, last)
881 void insert(I first, I last) {
882 for(; first != last; ++first)
887 /** Return true if item was erased by particularly this call. */
888 bool erase( const Key& key );
890 //! Erase item by const_accessor.
891 /** Return true if item was erased by particularly this call. */
892 bool erase( const_accessor& item_accessor ) {
893 return exclude( item_accessor );
896 //! Erase item by accessor.
897 /** Return true if item was erased by particularly this call. */
898 bool erase( accessor& item_accessor ) {
899 return exclude( item_accessor );
903 //! Insert or find item and optionally acquire a lock on the item.
904 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
906 //! delete item by accessor
907 bool exclude( const_accessor &item_accessor );
909 //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
911 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
913 //! Copy "source" to *this, where *this must start out empty.
914 void internal_copy( const concurrent_hash_map& source );
917 void internal_copy(I first, I last);
919 //! Fast find when no concurrent erasure is used. For internal use inside TBB only!
920 /** Return pointer to item with given key, or NULL if no such item exists.
921 Must not be called concurrently with erasure operations. */
922 const_pointer internal_fast_find( const Key& key ) const {
923 hashcode_t h = my_hash_compare.hash( key );
924 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
927 __TBB_ASSERT((m&(m+1))==0, NULL);
928 bucket *b = get_bucket( h & m );
929 // TODO: actually, notification is unnecessary here, just hiding double-check
930 if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
932 bucket::scoped_t lock;
933 if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
934 if( b->node_list == internal::rehash_req)
935 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
937 else lock.acquire( b->mutex, /*write=*/false );
938 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
940 n = search_bucket( key, b );
943 else if( check_mask_race( h, m ) )
949 #if _MSC_VER && !defined(__INTEL_COMPILER)
950 // Suppress "conditional expression is constant" warning.
951 #pragma warning( push )
952 #pragma warning( disable: 4127 )
955 template<typename Key, typename T, typename HashCompare, typename A>
956 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
957 __TBB_ASSERT( !result || !result->my_node, NULL );
959 hashcode_t const h = my_hash_compare.hash( key );
960 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
961 segment_index_t grow_segment = 0;
965 __TBB_ASSERT((m&(m+1))==0, NULL);
966 return_value = false;
968 bucket_accessor b( this, h & m );
971 n = search_bucket( key, b() );
973 // [opt] insert a key
976 if(t) tmp_n = new( my_allocator ) node(key, *t);
977 else tmp_n = new( my_allocator ) node(key);
979 if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
980 // Rerun search_list, in case another thread inserted the item during the upgrade.
981 n = search_bucket( key, b() );
982 if( is_valid(n) ) { // unfortunately, it did
983 b.downgrade_to_reader();
987 if( check_mask_race(h, m) )
988 goto restart; // b.release() is done in ~b().
989 // insert and set flag to grow the container
990 grow_segment = insert_new_node( b(), n = tmp_n, m );
994 } else { // find or count
996 if( check_mask_race( h, m ) )
997 goto restart; // b.release() is done in ~b(). TODO: replace by continue
1000 return_value = true;
1003 if( !result ) goto check_growth;
1004 // TODO: the following seems as generic/regular operation
1006 if( !result->try_acquire( n->mutex, write ) ) {
1007 // we are unlucky, prepare for longer wait
1008 tbb::internal::atomic_backoff trials;
1010 if( !trials.bounded_pause() ) {
1011 // the wait takes really long, restart the operation
1013 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1015 m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1018 } while( !result->try_acquire( n->mutex, write ) );
1021 result->my_node = n;
1022 result->my_hash = h;
1024 // [opt] grow the container
1025 if( grow_segment ) {
1026 #if __TBB_STATISTICS
1027 my_info_resizes++; // concurrent ones
1029 enable_segment( grow_segment );
1031 if( tmp_n ) // if op_insert only
1032 delete_node( tmp_n );
1033 return return_value;
1036 template<typename Key, typename T, typename HashCompare, typename A>
1037 template<typename I>
1038 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1039 hashcode_t h = my_hash_compare.hash( key );
1040 hashcode_t m = my_mask;
1041 __TBB_ASSERT((m&(m+1))==0, NULL);
1043 bucket *b = get_bucket( h );
1044 while( b->node_list == internal::rehash_req ) {
1045 m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1046 b = get_bucket( h &= m );
1048 node *n = search_bucket( key, b );
1050 return std::make_pair(end_, end_);
1051 iterator lower(*this, h, b, n), upper(lower);
1052 return std::make_pair(lower, ++upper);
1055 template<typename Key, typename T, typename HashCompare, typename A>
1056 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor ) {
1057 __TBB_ASSERT( item_accessor.my_node, NULL );
1058 node_base *const n = item_accessor.my_node;
1059 hashcode_t const h = item_accessor.my_hash;
1060 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1063 bucket_accessor b( this, h & m, /*writer=*/true );
1064 node_base **p = &b()->node_list;
1065 while( *p && *p != n )
1067 if( !*p ) { // someone else was the first
1068 if( check_mask_race( h, m ) )
1070 item_accessor.release();
1073 __TBB_ASSERT( *p == n, NULL );
1074 *p = n->next; // remove from container
1078 if( !item_accessor.is_writer() ) // need to get exclusive lock
1079 item_accessor.upgrade_to_writer(); // return value means nothing here
1080 item_accessor.release();
1081 delete_node( n ); // Only one thread can delete it
1085 template<typename Key, typename T, typename HashCompare, typename A>
1086 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
1088 hashcode_t const h = my_hash_compare.hash( key );
1089 hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1093 bucket_accessor b( this, h & m );
1095 node_base **p = &b()->node_list;
1097 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
1101 if( !n ) { // not found, but mask could be changed
1102 if( check_mask_race( h, m ) )
1106 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1107 if( check_mask_race( h, m ) ) // contended upgrade, check mask
1115 typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1117 // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1118 delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1122 template<typename Key, typename T, typename HashCompare, typename A>
1123 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
1124 std::swap(this->my_allocator, table.my_allocator);
1125 std::swap(this->my_hash_compare, table.my_hash_compare);
1126 internal_swap(table);
1129 template<typename Key, typename T, typename HashCompare, typename A>
1130 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
1131 reserve( sz ); // TODO: add reduction of number of buckets as well
1132 hashcode_t mask = my_mask;
1133 hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1134 __TBB_ASSERT((b&(b-1))==0, NULL);
1135 bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1136 for(; b <= mask; b++, bp++ ) {
1137 node_base *n = bp->node_list;
1138 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1139 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1140 if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1141 hashcode_t h = b; bucket *b_old = bp;
1143 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1144 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1145 b_old = get_bucket( h &= m );
1146 } while( b_old->node_list == internal::rehash_req );
1147 // now h - is index of the root rehashed bucket b_old
1148 mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1149 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1150 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
1151 if( (c & mask) != h ) { // should be rehashed
1152 *p = q->next; // exclude from b_old
1153 bucket *b_new = get_bucket( c & mask );
1154 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1155 add_to_bucket( b_new, q );
1156 } else p = &q->next; // iterate to next item
1160 #if TBB_USE_PERFORMANCE_WARNINGS
1161 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1162 static bool reported = false;
1164 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1165 for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1166 if( b & (b-2) ) ++bp; // not the beginning of a segment
1167 else bp = get_bucket( b );
1168 node_base *n = bp->node_list;
1169 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1170 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1171 #if TBB_USE_PERFORMANCE_WARNINGS
1172 if( n == internal::empty_rehashed ) empty_buckets++;
1173 else if( n->next ) overpopulated_buckets++;
1176 for( ; is_valid(n); n = n->next ) {
1177 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
1178 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1182 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1183 #if TBB_USE_PERFORMANCE_WARNINGS
1184 if( buckets > current_size) empty_buckets -= buckets - current_size;
1185 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1186 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1187 tbb::internal::runtime_warning(
1188 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1189 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
1195 template<typename Key, typename T, typename HashCompare, typename A>
1196 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
1197 hashcode_t m = my_mask;
1198 __TBB_ASSERT((m&(m+1))==0, NULL);
1199 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1200 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1201 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1202 static bool reported = false;
1205 // check consistency
1206 for( segment_index_t b = 0; b <= m; b++ ) {
1207 if( b & (b-2) ) ++bp; // not the beginning of a segment
1208 else bp = get_bucket( b );
1209 node_base *n = bp->node_list;
1210 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1211 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1212 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1213 if( n == internal::empty_rehashed ) empty_buckets++;
1214 else if( n == internal::rehash_req ) buckets--;
1215 else if( n->next ) overpopulated_buckets++;
1217 #if __TBB_EXTRA_DEBUG
1218 for(; is_valid(n); n = n->next ) {
1219 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
1221 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1225 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1226 #if __TBB_STATISTICS
1227 printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1228 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1229 current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1230 unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1231 my_info_resizes = 0; // concurrent ones
1232 my_info_restarts = 0; // race collisions
1233 my_info_rehashes = 0; // invocations of rehash_bucket
1235 if( buckets > current_size) empty_buckets -= buckets - current_size;
1236 else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1237 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1238 tbb::internal::runtime_warning(
1239 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1240 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
1244 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1246 segment_index_t s = segment_index_of( m );
1247 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1248 cache_aligned_allocator<bucket> alloc;
1250 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1251 segment_ptr_t buckets_ptr = my_table[s];
1252 size_type sz = segment_size( s ? s : 1 );
1253 for( segment_index_t i = 0; i < sz; i++ )
1254 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1255 buckets_ptr[i].node_list = n->next;
1258 if( s >= first_block) // the first segment or the next
1259 alloc.deallocate( buckets_ptr, sz );
1260 else if( s == embedded_block && embedded_block != first_block )
1261 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
1262 if( s >= embedded_block ) my_table[s] = 0;
1264 my_mask = embedded_buckets - 1;
1267 template<typename Key, typename T, typename HashCompare, typename A>
1268 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
1269 reserve( source.my_size ); // TODO: load_factor?
1270 hashcode_t mask = source.my_mask;
1271 if( my_mask == mask ) { // optimized version
1272 bucket *dst = 0, *src = 0;
1273 bool rehash_required = false;
1274 for( hashcode_t k = 0; k <= mask; k++ ) {
1275 if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1276 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1277 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1278 node *n = static_cast<node*>( src->node_list );
1279 if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1280 rehash_required = true;
1281 dst->node_list = internal::rehash_req;
1282 } else for(; n; n = static_cast<node*>( n->next ) ) {
1283 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
1284 ++my_size; // TODO: replace by non-atomic op
1287 if( rehash_required ) rehash();
1288 } else internal_copy( source.begin(), source.end() );
1291 template<typename Key, typename T, typename HashCompare, typename A>
1292 template<typename I>
1293 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
1294 hashcode_t m = my_mask;
1295 for(; first != last; ++first) {
1296 hashcode_t h = my_hash_compare.hash( first->first );
1297 bucket *b = get_bucket( h & m );
1298 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1299 node *n = new( my_allocator ) node(first->first, first->second);
1300 add_to_bucket( b, n );
1301 ++my_size; // TODO: replace by non-atomic op
1305 } // namespace interface5
1307 using interface5::concurrent_hash_map;
1310 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1311 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
1312 if(a.size() != b.size()) return false;
1313 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
1314 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
1315 for(; i != i_end; ++i) {
1316 j = b.equal_range(i->first).first;
1317 if( j == j_end || !(i->second == j->second) ) return false;
1322 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1323 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
1324 { return !(a == b); }
1326 template<typename Key, typename T, typename HashCompare, typename A>
1327 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
1330 #if _MSC_VER && !defined(__INTEL_COMPILER)
1331 #pragma warning( pop )
1332 #endif // warning 4127 is back
1336 #endif /* __TBB_concurrent_hash_map_H */