2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 /* Container implementations in this header are based on PPL implementations
30 provided by Microsoft. */
32 #ifndef __TBB__concurrent_unordered_impl_H
33 #define __TBB__concurrent_unordered_impl_H
34 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
35 #error Do not #include this internal file directly; use public TBB headers instead.
38 #include "../tbb_stddef.h"
40 #if !TBB_USE_EXCEPTIONS && _MSC_VER
41 // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
42 #pragma warning (push)
43 #pragma warning (disable: 4530)
47 #include <utility> // Need std::pair
49 #include <string> // For tbb_hasher
50 #include <cstring> // Need std::memset
52 #if !TBB_USE_EXCEPTIONS && _MSC_VER
56 #include "../atomic.h"
57 #include "../tbb_exception.h"
58 #include "../tbb_allocator.h"
61 namespace interface5 {
65 template <typename T, typename Allocator>
66 class split_ordered_list;
67 template <typename Traits>
68 class concurrent_unordered_base;
70 // Forward list iterators (without skipping dummy elements)
71 template<class Solist, typename Value>
72 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
74 template <typename T, typename Allocator>
75 friend class split_ordered_list;
76 template <typename Traits>
77 friend class concurrent_unordered_base;
78 template<class M, typename V>
79 friend class flist_iterator;
81 typedef typename Solist::nodeptr_t nodeptr_t;
83 typedef typename Solist::value_type value_type;
84 typedef typename Solist::difference_type difference_type;
85 typedef typename Solist::pointer pointer;
86 typedef typename Solist::reference reference;
88 flist_iterator() : my_node_ptr(0) {}
89 flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
90 : my_node_ptr(other.my_node_ptr) {}
92 reference operator*() const { return my_node_ptr->my_element; }
93 pointer operator->() const { return &**this; }
95 flist_iterator& operator++() {
96 my_node_ptr = my_node_ptr->my_next;
100 flist_iterator operator++(int) {
101 flist_iterator tmp = *this;
107 flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
108 nodeptr_t get_node_ptr() const { return my_node_ptr; }
110 nodeptr_t my_node_ptr;
112 template<typename M, typename T, typename U>
113 friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
114 template<typename M, typename T, typename U>
115 friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
118 template<typename Solist, typename T, typename U>
119 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
120 return i.my_node_ptr == j.my_node_ptr;
122 template<typename Solist, typename T, typename U>
123 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
124 return i.my_node_ptr != j.my_node_ptr;
127 // Split-order list iterators, needed to skip dummy elements
128 template<class Solist, typename Value>
129 class solist_iterator : public flist_iterator<Solist, Value>
131 typedef flist_iterator<Solist, Value> base_type;
132 typedef typename Solist::nodeptr_t nodeptr_t;
133 using base_type::get_node_ptr;
134 template <typename T, typename Allocator>
135 friend class split_ordered_list;
136 template<class M, typename V>
137 friend class solist_iterator;
138 template<typename M, typename T, typename U>
139 friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
140 template<typename M, typename T, typename U>
141 friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
143 const Solist *my_list_ptr;
144 solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
147 typedef typename Solist::value_type value_type;
148 typedef typename Solist::difference_type difference_type;
149 typedef typename Solist::pointer pointer;
150 typedef typename Solist::reference reference;
153 solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
154 : base_type(other), my_list_ptr(other.my_list_ptr) {}
156 reference operator*() const {
157 return this->base_type::operator*();
160 pointer operator->() const {
164 solist_iterator& operator++() {
165 do ++(*(base_type *)this);
166 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
171 solist_iterator operator++(int) {
172 solist_iterator tmp = *this;
174 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
180 template<typename Solist, typename T, typename U>
181 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
182 return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
184 template<typename Solist, typename T, typename U>
185 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
186 return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
189 // Forward type and class definitions
190 typedef size_t sokey_t;
192 // Forward list in which elements are sorted in a split-order
193 template <typename T, typename Allocator>
194 class split_ordered_list
197 typedef split_ordered_list<T, Allocator> self_type;
198 typedef typename Allocator::template rebind<T>::other allocator_type;
200 typedef node *nodeptr_t;
202 typedef typename allocator_type::size_type size_type;
203 typedef typename allocator_type::difference_type difference_type;
204 typedef typename allocator_type::pointer pointer;
205 typedef typename allocator_type::const_pointer const_pointer;
206 typedef typename allocator_type::reference reference;
207 typedef typename allocator_type::const_reference const_reference;
208 typedef typename allocator_type::value_type value_type;
210 typedef solist_iterator<self_type, const value_type> const_iterator;
211 typedef solist_iterator<self_type, value_type> iterator;
212 typedef flist_iterator<self_type, const value_type> raw_const_iterator;
213 typedef flist_iterator<self_type, value_type> raw_iterator;
215 // Node that holds the element in a split-ordered list
216 struct node : tbb::internal::no_assign
218 // Initialize the node with the given order key
219 void init(sokey_t order_key) {
220 my_order_key = order_key;
224 // Return the order key (needed for hashing)
225 sokey_t get_order_key() const { // TODO: remove
229 // Inserts the new element in the list in an atomic fashion
230 nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
232 // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
233 nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
235 if (exchange_node == current_node) // TODO: why this branch?
237 // Operation succeeded, return the new node
242 // Operation failed, return the "interfering" node
243 return exchange_node;
247 // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
248 // in the hash table to quickly index into the right subsection of the split-ordered list.
249 bool is_dummy() const {
250 return (my_order_key & 0x1) == 0;
254 nodeptr_t my_next; // Next element in the list
255 value_type my_element; // Element storage
256 sokey_t my_order_key; // Order key for this element
259 // Allocate a new node with the given order key and value
260 nodeptr_t create_node(sokey_t order_key, const T &value) {
261 nodeptr_t pnode = my_node_allocator.allocate(1);
264 new(static_cast<void*>(&pnode->my_element)) T(value);
265 pnode->init(order_key);
267 my_node_allocator.deallocate(pnode, 1);
274 // Allocate a new node with the given order key; used to allocate dummy nodes
275 nodeptr_t create_node(sokey_t order_key) {
276 nodeptr_t pnode = my_node_allocator.allocate(1);
279 new(static_cast<void*>(&pnode->my_element)) T();
280 pnode->init(order_key);
282 my_node_allocator.deallocate(pnode, 1);
289 split_ordered_list(allocator_type a = allocator_type())
290 : my_node_allocator(a), my_element_count(0)
292 // Immediately allocate a dummy node with order key of 0. This node
293 // will always be the head of the list.
294 my_head = create_node(0);
297 ~split_ordered_list()
302 // Remove the head element which is not cleared by clear()
303 nodeptr_t pnode = my_head;
306 __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
311 // Common forward list functions
313 allocator_type get_allocator() const {
314 return (my_node_allocator);
319 nodeptr_t pnode = my_head;
321 __TBB_ASSERT(my_head != NULL, "Invalid head list node");
322 pnext = pnode->my_next;
323 pnode->my_next = NULL;
326 while (pnode != NULL)
328 pnext = pnode->my_next;
333 my_element_count = 0;
336 // Returns a first non-dummy element in the SOL
338 return first_real_iterator(raw_begin());
341 // Returns a first non-dummy element in the SOL
342 const_iterator begin() const {
343 return first_real_iterator(raw_begin());
347 return (iterator(0, this));
350 const_iterator end() const {
351 return (const_iterator(0, this));
354 const_iterator cbegin() const {
355 return (((const self_type *)this)->begin());
358 const_iterator cend() const {
359 return (((const self_type *)this)->end());
362 // Checks if the number of elements (non-dummy) is 0
364 return (my_element_count == 0);
367 // Returns the number of non-dummy elements in the list
368 size_type size() const {
369 return my_element_count;
372 // Returns the maximum size of the list, determined by the allocator
373 size_type max_size() const {
374 return my_node_allocator.max_size();
377 // Swaps 'this' list with the passed in one
378 void swap(self_type& other)
386 std::swap(my_element_count, other.my_element_count);
387 std::swap(my_head, other.my_head);
390 // Split-order list functions
392 // Returns a first element in the SOL, which is always a dummy
393 raw_iterator raw_begin() {
394 return raw_iterator(my_head);
397 // Returns a first element in the SOL, which is always a dummy
398 raw_const_iterator raw_begin() const {
399 return raw_const_iterator(my_head);
402 raw_iterator raw_end() {
403 return raw_iterator(0);
406 raw_const_iterator raw_end() const {
407 return raw_const_iterator(0);
410 static sokey_t get_order_key(const raw_const_iterator& it) {
411 return it.get_node_ptr()->get_order_key();
414 static sokey_t get_safe_order_key(const raw_const_iterator& it) {
415 if( !it.get_node_ptr() ) return sokey_t(~0U);
416 return it.get_node_ptr()->get_order_key();
419 // Returns a public iterator version of the internal iterator. Public iterator must not
420 // be a dummy private iterator.
421 iterator get_iterator(raw_iterator it) {
422 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
423 return iterator(it.get_node_ptr(), this);
426 // Returns a public iterator version of the internal iterator. Public iterator must not
427 // be a dummy private iterator.
428 const_iterator get_iterator(raw_const_iterator it) const {
429 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
430 return const_iterator(it.get_node_ptr(), this);
433 // Returns a non-const version of the raw_iterator
434 raw_iterator get_iterator(raw_const_iterator it) {
435 return raw_iterator(it.get_node_ptr());
438 // Returns a non-const version of the iterator
439 static iterator get_iterator(const_iterator it) {
440 return iterator(it.my_node_ptr, it.my_list_ptr);
443 // Returns a public iterator version of a first non-dummy internal iterator at or after
444 // the passed in internal iterator.
445 iterator first_real_iterator(raw_iterator it)
447 // Skip all dummy, internal only iterators
448 while (it != raw_end() && it.get_node_ptr()->is_dummy())
451 return iterator(it.get_node_ptr(), this);
454 // Returns a public iterator version of a first non-dummy internal iterator at or after
455 // the passed in internal iterator.
456 const_iterator first_real_iterator(raw_const_iterator it) const
458 // Skip all dummy, internal only iterators
459 while (it != raw_end() && it.get_node_ptr()->is_dummy())
462 return const_iterator(it.get_node_ptr(), this);
465 // Erase an element using the allocator
466 void destroy_node(nodeptr_t pnode) {
467 my_node_allocator.destroy(pnode);
468 my_node_allocator.deallocate(pnode, 1);
471 // Try to insert a new element in the list. If insert fails, return the node that
472 // was inserted instead.
473 nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
474 new_node->my_next = current_node;
475 return previous->atomic_set_next(new_node, current_node);
478 // Insert a new element between passed in iterators
479 std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
481 nodeptr_t pnode = create_node(order_key, value);
482 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
484 if (inserted_node == pnode)
486 // If the insert succeeded, check that the order is correct and increment the element count
488 *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
489 return std::pair<iterator, bool>(iterator(pnode, this), true);
493 // If the insert failed (element already there), then delete the new one
495 return std::pair<iterator, bool>(end(), false);
499 // Insert a new dummy element, starting search at a parent dummy element
500 raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
502 raw_iterator last = raw_end();
503 raw_iterator where = it;
505 __TBB_ASSERT(where != last, "Invalid head node");
509 // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
510 nodeptr_t dummy_node = create_node(order_key);
514 __TBB_ASSERT(it != last, "Invalid head list node");
516 // If the head iterator is at the end of the list, or past the point where this dummy
517 // node needs to be inserted, then try to insert it.
518 if (where == last || get_order_key(where) > order_key)
520 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
522 // Try to insert it in the right place
523 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
525 if (inserted_node == dummy_node)
527 // Insertion succeeded, check the list for order violations
529 return raw_iterator(dummy_node);
533 // Insertion failed: either dummy node was inserted by another thread, or
534 // a real element was inserted at exactly the same place as dummy node.
535 // Proceed with the search from the previous location where order key was
536 // known to be larger (note: this is legal only because there is no safe
537 // concurrent erase operation supported).
543 else if (get_order_key(where) == order_key)
545 // Another dummy node with the same value found, discard the new one.
546 destroy_node(dummy_node);
550 // Move the iterator forward
557 // This erase function can handle both real and dummy nodes
558 void erase_node(raw_iterator previous, raw_const_iterator& where)
560 nodeptr_t pnode = (where++).get_node_ptr();
561 nodeptr_t prevnode = previous.get_node_ptr();
562 __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
563 prevnode->my_next = pnode->my_next;
568 // Erase the element (previous node needs to be passed because this is a forward only list)
569 iterator erase_node(raw_iterator previous, const_iterator where)
571 raw_const_iterator it = where;
572 erase_node(previous, it);
575 return get_iterator(first_real_iterator(it));
578 // Move all elements from the passed in split-ordered list to this one
579 void move_all(self_type& source)
581 raw_const_iterator first = source.raw_begin();
582 raw_const_iterator last = source.raw_end();
587 nodeptr_t previous_node = my_head;
588 raw_const_iterator begin_iterator = first++;
590 // Move all elements one by one, including dummy ones
591 for (raw_const_iterator it = first; it != last;)
593 nodeptr_t pnode = it.get_node_ptr();
595 nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
596 previous_node = try_insert(previous_node, dummy_node, NULL);
597 __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
598 raw_const_iterator where = it++;
599 source.erase_node(get_iterator(begin_iterator), where);
607 // Check the list for order violations
611 for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
613 raw_iterator next_iterator = it;
616 __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
621 typename allocator_type::template rebind<node>::other my_node_allocator; // allocator object for nodes
622 size_type my_element_count; // Total item count, not counting dummy nodes
623 nodeptr_t my_head; // pointer to head node
626 // Template class for hash compare
627 template<typename Key, typename Hasher, typename Key_equality>
633 hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
635 hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
637 size_t operator()(const Key& key) const {
638 return ((size_t)my_hash_object(key));
641 bool operator()(const Key& key1, const Key& key2) const {
642 return (!my_key_compare_object(key1, key2));
645 Hasher my_hash_object; // The hash object
646 Key_equality my_key_compare_object; // The equality comparator object
650 #pragma warning(push)
651 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
654 template <typename Traits>
655 class concurrent_unordered_base : public Traits
659 typedef concurrent_unordered_base<Traits> self_type;
660 typedef typename Traits::value_type value_type;
661 typedef typename Traits::key_type key_type;
662 typedef typename Traits::hash_compare hash_compare;
663 typedef typename Traits::value_compare value_compare;
664 typedef typename Traits::allocator_type allocator_type;
665 typedef typename allocator_type::pointer pointer;
666 typedef typename allocator_type::const_pointer const_pointer;
667 typedef typename allocator_type::reference reference;
668 typedef typename allocator_type::const_reference const_reference;
669 typedef typename allocator_type::size_type size_type;
670 typedef typename allocator_type::difference_type difference_type;
671 typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
672 typedef typename solist_t::nodeptr_t nodeptr_t;
673 // Iterators that walk the entire split-order list, including dummy nodes
674 typedef typename solist_t::raw_iterator raw_iterator;
675 typedef typename solist_t::raw_const_iterator raw_const_iterator;
676 typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
677 typedef typename solist_t::const_iterator const_iterator;
678 typedef iterator local_iterator;
679 typedef const_iterator const_local_iterator;
680 using Traits::my_hash_compare;
681 using Traits::get_key;
682 using Traits::allow_multimapping;
685 typedef std::pair<iterator, iterator> pairii_t;
686 typedef std::pair<const_iterator, const_iterator> paircc_t;
688 static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
689 static const size_type initial_bucket_number = 8; // Initial number of buckets
690 static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
693 // Constructors/Destructors
694 concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
695 const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
696 : Traits(hc), my_solist(a),
697 my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
699 if( n_of_buckets == 0) ++n_of_buckets;
700 my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
704 concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
705 : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
708 internal_copy(right);
711 concurrent_unordered_base(const concurrent_unordered_base& right)
712 : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
715 internal_copy(right);
718 concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
720 internal_copy(right);
724 ~concurrent_unordered_base() {
725 // Delete all node segments
730 allocator_type get_allocator() const {
731 return my_solist.get_allocator();
734 // Size and capacity function
736 return my_solist.empty();
739 size_type size() const {
740 return my_solist.size();
743 size_type max_size() const {
744 return my_solist.max_size();
749 return my_solist.begin();
752 const_iterator begin() const {
753 return my_solist.begin();
757 return my_solist.end();
760 const_iterator end() const {
761 return my_solist.end();
764 const_iterator cbegin() const {
765 return my_solist.cbegin();
768 const_iterator cend() const {
769 return my_solist.cend();
772 // Parallel traversal support
773 class const_range_type : tbb::internal::no_assign {
774 const concurrent_unordered_base &my_table;
775 raw_const_iterator my_begin_node;
776 raw_const_iterator my_end_node;
777 mutable raw_const_iterator my_midpoint_node;
779 //! Type for size of a range
780 typedef typename concurrent_unordered_base::size_type size_type;
781 typedef typename concurrent_unordered_base::value_type value_type;
782 typedef typename concurrent_unordered_base::reference reference;
783 typedef typename concurrent_unordered_base::difference_type difference_type;
784 typedef typename concurrent_unordered_base::const_iterator iterator;
786 //! True if range is empty.
787 bool empty() const {return my_begin_node == my_end_node;}
789 //! True if range can be partitioned into two subranges.
790 bool is_divisible() const {
791 return my_midpoint_node != my_end_node;
794 const_range_type( const_range_type &r, split ) :
795 my_table(r.my_table), my_end_node(r.my_end_node)
797 r.my_end_node = my_begin_node = r.my_midpoint_node;
798 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
799 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
803 //! Init range with container and grainsize specified
804 const_range_type( const concurrent_unordered_base &a_table ) :
805 my_table(a_table), my_begin_node(a_table.my_solist.begin()),
806 my_end_node(a_table.my_solist.end())
810 iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
811 iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
812 //! The grain size for this range.
813 size_type grainsize() const { return 1; }
815 //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
816 void set_midpoint() const {
817 if( my_begin_node == my_end_node ) // not divisible
818 my_midpoint_node = my_end_node;
820 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
821 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
822 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
823 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
824 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
825 if( my_midpoint_node == my_begin_node )
826 my_midpoint_node = my_end_node;
829 sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
830 __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
831 __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
833 #endif // TBB_USE_ASSERT
838 class range_type : public const_range_type {
840 typedef typename concurrent_unordered_base::iterator iterator;
842 range_type( range_type &r, split ) : const_range_type( r, split() ) {}
843 //! Init range with container and grainsize specified
844 range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
846 iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
847 iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
851 return range_type( *this );
854 const_range_type range() const {
855 return const_range_type( *this );
859 std::pair<iterator, bool> insert(const value_type& value) {
860 return internal_insert(value);
863 iterator insert(const_iterator, const value_type& value) {
865 return insert(value).first;
868 template<class Iterator>
869 void insert(Iterator first, Iterator last) {
870 for (Iterator it = first; it != last; ++it)
874 iterator unsafe_erase(const_iterator where) {
875 return internal_erase(where);
878 iterator unsafe_erase(const_iterator first, const_iterator last) {
879 while (first != last)
880 unsafe_erase(first++);
881 return my_solist.get_iterator(first);
884 size_type unsafe_erase(const key_type& key) {
885 pairii_t where = equal_range(key);
886 size_type item_count = internal_distance(where.first, where.second);
887 unsafe_erase(where.first, where.second);
891 void swap(concurrent_unordered_base& right) {
892 if (this != &right) {
893 std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
894 my_solist.swap(right.my_solist);
895 internal_swap_buckets(right);
896 std::swap(my_number_of_buckets, right.my_number_of_buckets);
897 std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
909 // Initialize bucket 0
910 __TBB_ASSERT(my_buckets[0] == NULL, NULL);
911 raw_iterator dummy_node = my_solist.raw_begin();
912 set_bucket(0, dummy_node);
916 iterator find(const key_type& key) {
917 return internal_find(key);
920 const_iterator find(const key_type& key) const {
921 return const_cast<self_type*>(this)->internal_find(key);
924 size_type count(const key_type& key) const {
925 if(allow_multimapping) {
926 paircc_t answer = equal_range(key);
927 size_type item_count = internal_distance(answer.first, answer.second);
930 return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
934 std::pair<iterator, iterator> equal_range(const key_type& key) {
935 return internal_equal_range(key);
938 std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
939 return const_cast<self_type*>(this)->internal_equal_range(key);
942 // Bucket interface - for debugging
943 size_type unsafe_bucket_count() const {
944 return my_number_of_buckets;
947 size_type unsafe_max_bucket_count() const {
948 return segment_size(pointers_per_table-1);
951 size_type unsafe_bucket_size(size_type bucket) {
952 size_type item_count = 0;
953 if (is_initialized(bucket)) {
954 raw_iterator it = get_bucket(bucket);
956 for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
962 size_type unsafe_bucket(const key_type& key) const {
963 sokey_t order_key = (sokey_t) my_hash_compare(key);
964 size_type bucket = order_key % my_number_of_buckets;
968 // If the bucket is initialized, return a first non-dummy element in it
969 local_iterator unsafe_begin(size_type bucket) {
970 if (!is_initialized(bucket))
973 raw_iterator it = get_bucket(bucket);
974 return my_solist.first_real_iterator(it);
977 // If the bucket is initialized, return a first non-dummy element in it
978 const_local_iterator unsafe_begin(size_type bucket) const
980 if (!is_initialized(bucket))
983 raw_const_iterator it = get_bucket(bucket);
984 return my_solist.first_real_iterator(it);
987 // @REVIEW: Takes O(n)
988 // Returns the iterator after the last non-dummy element in the bucket
989 local_iterator unsafe_end(size_type bucket)
991 if (!is_initialized(bucket))
994 raw_iterator it = get_bucket(bucket);
996 // Find the end of the bucket, denoted by the dummy element
998 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1000 // Return the first real element past the end of the bucket
1001 return my_solist.first_real_iterator(it);
1004 // @REVIEW: Takes O(n)
1005 // Returns the iterator after the last non-dummy element in the bucket
1006 const_local_iterator unsafe_end(size_type bucket) const
1008 if (!is_initialized(bucket))
1011 raw_const_iterator it = get_bucket(bucket);
1013 // Find the end of the bucket, denoted by the dummy element
1015 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1017 // Return the first real element past the end of the bucket
1018 return my_solist.first_real_iterator(it);
1021 const_local_iterator unsafe_cbegin(size_type bucket) const {
1022 return ((const self_type *) this)->begin();
1025 const_local_iterator unsafe_cend(size_type bucket) const {
1026 return ((const self_type *) this)->end();
1030 float load_factor() const {
1031 return (float) size() / (float) unsafe_bucket_count();
1034 float max_load_factor() const {
1035 return my_maximum_bucket_size;
1038 void max_load_factor(float newmax) {
1039 if (newmax != newmax || newmax < 0)
1040 tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
1041 my_maximum_bucket_size = newmax;
1044 // This function is a noop, because the underlying split-ordered list
1045 // is already sorted, so an increase in the bucket number will be
1046 // reflected next time this bucket is touched.
1047 void rehash(size_type buckets) {
1048 size_type current_buckets = my_number_of_buckets;
1049 if (current_buckets >= buckets)
1051 my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1056 // Initialize the hash and keep the first bucket open
1057 void internal_init() {
1058 // Allocate an array of segment pointers
1059 memset(my_buckets, 0, pointers_per_table * sizeof(void *));
1061 // Initialize bucket 0
1062 raw_iterator dummy_node = my_solist.raw_begin();
1063 set_bucket(0, dummy_node);
1066 void internal_clear() {
1067 for (size_type index = 0; index < pointers_per_table; ++index) {
1068 if (my_buckets[index] != NULL) {
1069 size_type sz = segment_size(index);
1070 for (size_type index2 = 0; index2 < sz; ++index2)
1071 my_allocator.destroy(&my_buckets[index][index2]);
1072 my_allocator.deallocate(my_buckets[index], sz);
1073 my_buckets[index] = 0;
1078 void internal_copy(const self_type& right) {
1081 my_maximum_bucket_size = right.my_maximum_bucket_size;
1082 my_number_of_buckets = right.my_number_of_buckets;
1085 insert(right.begin(), right.end());
1086 my_hash_compare = right.my_hash_compare;
1087 } __TBB_CATCH(...) {
1093 void internal_swap_buckets(concurrent_unordered_base& right)
1095 // Swap all node segments
1096 for (size_type index = 0; index < pointers_per_table; ++index)
1098 raw_iterator * iterator_pointer = my_buckets[index];
1099 my_buckets[index] = right.my_buckets[index];
1100 right.my_buckets[index] = iterator_pointer;
1105 size_type internal_distance(const_iterator first, const_iterator last) const
1109 for (const_iterator it = first; it != last; ++it)
1115 // Insert an element in the hash given its value
1116 std::pair<iterator, bool> internal_insert(const value_type& value)
1118 sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
1119 size_type bucket = order_key % my_number_of_buckets;
1121 // If bucket is empty, initialize it first
1122 if (!is_initialized(bucket))
1123 init_bucket(bucket);
1125 size_type new_count;
1126 order_key = split_order_key_regular(order_key);
1127 raw_iterator it = get_bucket(bucket);
1128 raw_iterator last = my_solist.raw_end();
1129 raw_iterator where = it;
1131 __TBB_ASSERT(where != last, "Invalid head node");
1133 // First node is a dummy node
1138 if (where == last || solist_t::get_order_key(where) > order_key)
1140 // Try to insert it in the right place
1141 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
1145 // Insertion succeeded, adjust the table size, if needed
1146 adjust_table_size(new_count, my_number_of_buckets);
1151 // Insertion failed: either the same node was inserted by another thread, or
1152 // another element was inserted at exactly the same place as this node.
1153 // Proceed with the search from the previous location where order key was
1154 // known to be larger (note: this is legal only because there is no safe
1155 // concurrent erase operation supported).
1161 else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
1163 // Element already in the list, return it
1164 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1167 // Move the iterator forward
1173 // Find the element in the split-ordered list
1174 iterator internal_find(const key_type& key)
1176 sokey_t order_key = (sokey_t) my_hash_compare(key);
1177 size_type bucket = order_key % my_number_of_buckets;
1179 // If bucket is empty, initialize it first
1180 if (!is_initialized(bucket))
1181 init_bucket(bucket);
1183 order_key = split_order_key_regular(order_key);
1184 raw_iterator last = my_solist.raw_end();
1186 for (raw_iterator it = get_bucket(bucket); it != last; ++it)
1188 if (solist_t::get_order_key(it) > order_key)
1190 // If the order key is smaller than the current order key, the element
1191 // is not in the hash.
1194 else if (solist_t::get_order_key(it) == order_key)
1196 // The fact that order keys match does not mean that the element is found.
1197 // Key function comparison has to be performed to check whether this is the
1198 // right element. If not, keep searching while order key is the same.
1199 if (!my_hash_compare(get_key(*it), key))
1200 return my_solist.get_iterator(it);
1207 // Erase an element from the list. This is not a concurrency safe function.
1208 iterator internal_erase(const_iterator it)
1210 key_type key = get_key(*it);
1211 sokey_t order_key = (sokey_t) my_hash_compare(key);
1212 size_type bucket = order_key % my_number_of_buckets;
1214 // If bucket is empty, initialize it first
1215 if (!is_initialized(bucket))
1216 init_bucket(bucket);
1218 order_key = split_order_key_regular(order_key);
1220 raw_iterator previous = get_bucket(bucket);
1221 raw_iterator last = my_solist.raw_end();
1222 raw_iterator where = previous;
1224 __TBB_ASSERT(where != last, "Invalid head node");
1226 // First node is a dummy node
1232 else if (my_solist.get_iterator(where) == it)
1233 return my_solist.erase_node(previous, it);
1235 // Move the iterator forward
1241 // Return the [begin, end) pair of iterators with the same key values.
1242 // This operation makes sense only if mapping is many-to-one.
1243 pairii_t internal_equal_range(const key_type& key)
1245 sokey_t order_key = (sokey_t) my_hash_compare(key);
1246 size_type bucket = order_key % my_number_of_buckets;
1248 // If bucket is empty, initialize it first
1249 if (!is_initialized(bucket))
1250 init_bucket(bucket);
1252 order_key = split_order_key_regular(order_key);
1253 raw_iterator end_it = my_solist.raw_end();
1255 for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
1257 if (solist_t::get_order_key(it) > order_key)
1259 // There is no element with the given key
1260 return pairii_t(end(), end());
1262 else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
1264 iterator first = my_solist.get_iterator(it);
1265 iterator last = first;
1266 do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1267 return pairii_t(first, last);
1271 return pairii_t(end(), end());
1275 void init_bucket(size_type bucket)
1277 // Bucket 0 has no parent.
1278 __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1280 size_type parent_bucket = get_parent(bucket);
1282 // All parent_bucket buckets have to be initialized before this bucket is
1283 if (!is_initialized(parent_bucket))
1284 init_bucket(parent_bucket);
1286 raw_iterator parent = get_bucket(parent_bucket);
1288 // Create a dummy first node in this bucket
1289 raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
1290 set_bucket(bucket, dummy_node);
1293 void adjust_table_size(size_type total_elements, size_type current_size)
1295 // Grow the table by a factor of 2 if possible and needed
1296 if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1298 // Double the size of the hash only if size has not changed inbetween loads
1299 __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, uintptr_t(2u*current_size), uintptr_t(current_size) );
1300 //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1301 //due to overzealous compiler warnings in /Wp64 mode
1305 size_type get_parent(size_type bucket) const
1307 // Unsets bucket's most significant turned-on bit
1308 size_type msb = __TBB_Log2((uintptr_t)bucket);
1309 return bucket & ~(size_type(1) << msb);
1313 // Dynamic sized array (segments)
1314 //! @return segment index of given index in the array
1315 static size_type segment_index_of( size_type index ) {
1316 return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1319 //! @return the first array index of given segment
1320 static size_type segment_base( size_type k ) {
1321 return (size_type(1)<<k & ~size_type(1));
1324 //! @return segment size
1325 static size_type segment_size( size_type k ) {
1326 return k? size_type(1)<<k : 2;
1329 raw_iterator get_bucket(size_type bucket) const {
1330 size_type segment = segment_index_of(bucket);
1331 bucket -= segment_base(segment);
1332 __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1333 return my_buckets[segment][bucket];
1336 void set_bucket(size_type bucket, raw_iterator dummy_head) {
1337 size_type segment = segment_index_of(bucket);
1338 bucket -= segment_base(segment);
1340 if (my_buckets[segment] == NULL) {
1341 size_type sz = segment_size(segment);
1342 raw_iterator * new_segment = my_allocator.allocate(sz);
1343 std::memset(new_segment, 0, sz*sizeof(raw_iterator));
1345 if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
1346 my_allocator.deallocate(new_segment, sz);
1349 my_buckets[segment][bucket] = dummy_head;
1352 bool is_initialized(size_type bucket) const {
1353 size_type segment = segment_index_of(bucket);
1354 bucket -= segment_base(segment);
1356 if (my_buckets[segment] == NULL)
1359 raw_iterator it = my_buckets[segment][bucket];
1360 return (it.get_node_ptr() != NULL);
1363 // Utilities for keys
1365 // A regular order key has its original hash value reversed and the last bit set
1366 sokey_t split_order_key_regular(sokey_t order_key) const {
1367 return __TBB_ReverseBits(order_key) | 0x1;
1370 // A dummy order key has its original hash value reversed and the last bit unset
1371 sokey_t split_order_key_dummy(sokey_t order_key) const {
1372 return __TBB_ReverseBits(order_key) & ~(0x1);
1376 atomic<size_type> my_number_of_buckets; // Current table size
1377 solist_t my_solist; // List where all the elements are kept
1378 typename allocator_type::template rebind<raw_iterator>::other my_allocator; // Allocator object for segments
1379 float my_maximum_bucket_size; // Maximum size of the bucket
1380 atomic<raw_iterator*> my_buckets[pointers_per_table]; // The segment table
1383 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
1387 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
1388 } // namespace internal
1390 //! Hasher functions
1391 template<typename T>
1392 inline size_t tbb_hasher( const T& t ) {
1393 return static_cast<size_t>( t ) * internal::hash_multiplier;
1395 template<typename P>
1396 inline size_t tbb_hasher( P* ptr ) {
1397 size_t const h = reinterpret_cast<size_t>( ptr );
1398 return (h >> 3) ^ h;
1400 template<typename E, typename S, typename A>
1401 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
1403 for( const E* c = s.c_str(); *c; ++c )
1404 h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
1407 template<typename F, typename S>
1408 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
1409 return tbb_hasher(p.first) ^ tbb_hasher(p.second);
1411 } // namespace interface5
1412 using interface5::tbb_hasher;
1415 // Template class for hash compare
1416 template<typename Key>
1422 size_t operator()(const Key& key) const
1424 return tbb_hasher(key);
1429 #endif// __TBB__concurrent_unordered_impl_H