2 Copyright 2005-2010 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 /* Container implementations in this header are based on PPL implementations
30 provided by Microsoft. */
32 #ifndef __TBB_concurrent_unordered_internal_H
33 #define __TBB_concurrent_unordered_internal_H
35 #include "tbb_stddef.h"
37 #if !TBB_USE_EXCEPTIONS && _MSC_VER
38 // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
39 #pragma warning (push)
40 #pragma warning (disable: 4530)
44 #include <utility> // Need std::pair
46 #include <string> // For tbb_hasher
47 #include <cstring> // Need std::memset
49 #if !TBB_USE_EXCEPTIONS && _MSC_VER
53 #include "tbb_machine.h"
54 #include "tbb_exception.h"
55 #include "tbb_allocator.h"
58 namespace interface5 {
62 template <typename T, typename Allocator>
63 class split_ordered_list;
64 template <typename Traits>
65 class concurrent_unordered_base;
67 // Forward list iterators (without skipping dummy elements)
68 template<class Solist, typename Value>
69 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
71 template <typename T, typename Allocator>
72 friend class split_ordered_list;
73 template <typename Traits>
74 friend class concurrent_unordered_base;
75 template<class M, typename V>
76 friend class flist_iterator;
78 typedef typename Solist::nodeptr_t nodeptr_t;
80 typedef typename Solist::value_type value_type;
81 typedef typename Solist::difference_type difference_type;
82 typedef typename Solist::pointer pointer;
83 typedef typename Solist::reference reference;
85 flist_iterator() : my_node_ptr(0) {}
86 flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
87 : my_node_ptr(other.my_node_ptr) {}
89 reference operator*() const { return my_node_ptr->my_element; }
90 pointer operator->() const { return &**this; }
92 flist_iterator& operator++() {
93 my_node_ptr = my_node_ptr->my_next;
97 flist_iterator operator++(int) {
98 flist_iterator tmp = *this;
104 flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
105 nodeptr_t get_node_ptr() const { return my_node_ptr; }
107 nodeptr_t my_node_ptr;
109 template<typename M, typename T, typename U>
110 friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
111 template<typename M, typename T, typename U>
112 friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
115 template<typename Solist, typename T, typename U>
116 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
117 return i.my_node_ptr == j.my_node_ptr;
119 template<typename Solist, typename T, typename U>
120 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
121 return i.my_node_ptr != j.my_node_ptr;
124 // Split-order list iterators, needed to skip dummy elements
125 template<class Solist, typename Value>
126 class solist_iterator : public flist_iterator<Solist, Value>
128 typedef flist_iterator<Solist, Value> base_type;
129 typedef typename Solist::nodeptr_t nodeptr_t;
130 using base_type::get_node_ptr;
131 template <typename T, typename Allocator>
132 friend class split_ordered_list;
133 template<class M, typename V>
134 friend class solist_iterator;
135 template<typename M, typename T, typename U>
136 friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
137 template<typename M, typename T, typename U>
138 friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
140 const Solist *my_list_ptr;
141 solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
144 typedef typename Solist::value_type value_type;
145 typedef typename Solist::difference_type difference_type;
146 typedef typename Solist::pointer pointer;
147 typedef typename Solist::reference reference;
150 solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
151 : base_type(other), my_list_ptr(other.my_list_ptr) {}
153 reference operator*() const {
154 return this->base_type::operator*();
157 pointer operator->() const {
161 solist_iterator& operator++() {
162 do ++(*(base_type *)this);
163 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
168 solist_iterator operator++(int) {
169 solist_iterator tmp = *this;
171 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
177 template<typename Solist, typename T, typename U>
178 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
179 return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
181 template<typename Solist, typename T, typename U>
182 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
183 return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
186 // Forward type and class definitions
187 typedef size_t sokey_t;
189 // Forward list in which elements are sorted in a split-order
190 template <typename T, typename Allocator>
191 class split_ordered_list
194 typedef split_ordered_list<T, Allocator> self_type;
195 typedef typename Allocator::template rebind<T>::other allocator_type;
197 typedef node *nodeptr_t;
199 typedef typename allocator_type::size_type size_type;
200 typedef typename allocator_type::difference_type difference_type;
201 typedef typename allocator_type::pointer pointer;
202 typedef typename allocator_type::const_pointer const_pointer;
203 typedef typename allocator_type::reference reference;
204 typedef typename allocator_type::const_reference const_reference;
205 typedef typename allocator_type::value_type value_type;
207 typedef solist_iterator<self_type, const value_type> const_iterator;
208 typedef solist_iterator<self_type, value_type> iterator;
209 typedef flist_iterator<self_type, const value_type> raw_const_iterator;
210 typedef flist_iterator<self_type, value_type> raw_iterator;
212 // Node that holds the element in a split-ordered list
213 struct node : tbb::internal::no_assign
215 // Initialize the node with the given order key
216 void init(sokey_t order_key) {
217 my_order_key = order_key;
221 // Return the order key (needed for hashing)
222 sokey_t get_order_key() const { // TODO: remove
226 // Inserts the new element in the list in an atomic fashion
227 nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
229 // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
230 nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
232 if (exchange_node == current_node) // TODO: why this branch?
234 // Operation succeeded, return the new node
239 // Operation failed, return the "interfering" node
240 return exchange_node;
244 // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
245 // in the hash table to quickly index into the right subsection of the split-ordered list.
246 bool is_dummy() const {
247 return (my_order_key & 0x1) == 0;
251 nodeptr_t my_next; // Next element in the list
252 value_type my_element; // Element storage
253 sokey_t my_order_key; // Order key for this element
256 // Allocate a new node with the given order key and value
257 nodeptr_t create_node(sokey_t order_key, const T &value) {
258 nodeptr_t pnode = my_node_allocator.allocate(1);
261 new(static_cast<void*>(&pnode->my_element)) T(value);
262 pnode->init(order_key);
264 my_node_allocator.deallocate(pnode, 1);
271 // Allocate a new node with the given order key; used to allocate dummy nodes
272 nodeptr_t create_node(sokey_t order_key) {
273 nodeptr_t pnode = my_node_allocator.allocate(1);
276 new(static_cast<void*>(&pnode->my_element)) T();
277 pnode->init(order_key);
279 my_node_allocator.deallocate(pnode, 1);
286 split_ordered_list(allocator_type a = allocator_type())
287 : my_node_allocator(a), my_element_count(0)
289 // Immediately allocate a dummy node with order key of 0. This node
290 // will always be the head of the list.
291 my_head = create_node(0);
294 ~split_ordered_list()
299 // Remove the head element which is not cleared by clear()
300 nodeptr_t pnode = my_head;
303 __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
308 // Common forward list functions
310 allocator_type get_allocator() const {
311 return (my_node_allocator);
316 nodeptr_t pnode = my_head;
318 __TBB_ASSERT(my_head != NULL, "Invalid head list node");
319 pnext = pnode->my_next;
320 pnode->my_next = NULL;
323 while (pnode != NULL)
325 pnext = pnode->my_next;
330 my_element_count = 0;
333 // Returns a first non-dummy element in the SOL
335 return first_real_iterator(raw_begin());
338 // Returns a first non-dummy element in the SOL
339 const_iterator begin() const {
340 return first_real_iterator(raw_begin());
344 return (iterator(0, this));
347 const_iterator end() const {
348 return (const_iterator(0, this));
351 const_iterator cbegin() const {
352 return (((const self_type *)this)->begin());
355 const_iterator cend() const {
356 return (((const self_type *)this)->end());
359 // Checks if the number of elements (non-dummy) is 0
361 return (my_element_count == 0);
364 // Returns the number of non-dummy elements in the list
365 size_type size() const {
366 return my_element_count;
369 // Returns the maximum size of the list, determined by the allocator
370 size_type max_size() const {
371 return my_node_allocator.max_size();
374 // Swaps 'this' list with the passed in one
375 void swap(self_type& other)
383 std::swap(my_element_count, other.my_element_count);
384 std::swap(my_head, other.my_head);
387 // Split-order list functions
389 // Returns a first element in the SOL, which is always a dummy
390 raw_iterator raw_begin() {
391 return raw_iterator(my_head);
394 // Returns a first element in the SOL, which is always a dummy
395 raw_const_iterator raw_begin() const {
396 return raw_const_iterator(my_head);
399 raw_iterator raw_end() {
400 return raw_iterator(0);
403 raw_const_iterator raw_end() const {
404 return raw_const_iterator(0);
407 static sokey_t get_order_key(const raw_const_iterator& it) {
408 return it.get_node_ptr()->get_order_key();
411 static sokey_t get_safe_order_key(const raw_const_iterator& it) {
412 if( !it.get_node_ptr() ) return sokey_t(~0U);
413 return it.get_node_ptr()->get_order_key();
416 // Returns a public iterator version of the internal iterator. Public iterator must not
417 // be a dummy private iterator.
418 iterator get_iterator(raw_iterator it) {
419 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
420 return iterator(it.get_node_ptr(), this);
423 // Returns a public iterator version of the internal iterator. Public iterator must not
424 // be a dummy private iterator.
425 const_iterator get_iterator(raw_const_iterator it) const {
426 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
427 return const_iterator(it.get_node_ptr(), this);
430 // Returns a non-const version of the raw_iterator
431 raw_iterator get_iterator(raw_const_iterator it) {
432 return raw_iterator(it.get_node_ptr());
435 // Returns a non-const version of the iterator
436 static iterator get_iterator(const_iterator it) {
437 return iterator(it.my_node_ptr, it.my_list_ptr);
440 // Returns a public iterator version of a first non-dummy internal iterator at or after
441 // the passed in internal iterator.
442 iterator first_real_iterator(raw_iterator it)
444 // Skip all dummy, internal only iterators
445 while (it != raw_end() && it.get_node_ptr()->is_dummy())
448 return iterator(it.get_node_ptr(), this);
451 // Returns a public iterator version of a first non-dummy internal iterator at or after
452 // the passed in internal iterator.
453 const_iterator first_real_iterator(raw_const_iterator it) const
455 // Skip all dummy, internal only iterators
456 while (it != raw_end() && it.get_node_ptr()->is_dummy())
459 return const_iterator(it.get_node_ptr(), this);
462 // Erase an element using the allocator
463 void destroy_node(nodeptr_t pnode) {
464 my_node_allocator.destroy(pnode);
465 my_node_allocator.deallocate(pnode, 1);
468 // Try to insert a new element in the list. If insert fails, return the node that
469 // was inserted instead.
470 nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
471 new_node->my_next = current_node;
472 return previous->atomic_set_next(new_node, current_node);
475 // Insert a new element between passed in iterators
476 std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
478 nodeptr_t pnode = create_node(order_key, value);
479 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
481 if (inserted_node == pnode)
483 // If the insert succeeded, check that the order is correct and increment the element count
485 *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
486 return std::pair<iterator, bool>(iterator(pnode, this), true);
490 // If the insert failed (element already there), then delete the new one
492 return std::pair<iterator, bool>(end(), false);
496 // Insert a new dummy element, starting search at a parent dummy element
497 raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
499 raw_iterator last = raw_end();
500 raw_iterator where = it;
502 __TBB_ASSERT(where != last, "Invalid head node");
506 // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
507 nodeptr_t dummy_node = create_node(order_key);
511 __TBB_ASSERT(it != last, "Invalid head list node");
513 // If the head iterator is at the end of the list, or past the point where this dummy
514 // node needs to be inserted, then try to insert it.
515 if (where == last || get_order_key(where) > order_key)
517 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
519 // Try to insert it in the right place
520 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
522 if (inserted_node == dummy_node)
524 // Insertion succeeded, check the list for order violations
526 return raw_iterator(dummy_node);
530 // Insertion failed: either dummy node was inserted by another thread, or
531 // a real element was inserted at exactly the same place as dummy node.
532 // Proceed with the search from the previous location where order key was
533 // known to be larger (note: this is legal only because there is no safe
534 // concurrent erase operation supported).
540 else if (get_order_key(where) == order_key)
542 // Another dummy node with the same value found, discard the new one.
543 destroy_node(dummy_node);
547 // Move the iterator forward
554 // This erase function can handle both real and dummy nodes
555 void erase_node(raw_iterator previous, raw_const_iterator& where)
557 nodeptr_t pnode = (where++).get_node_ptr();
558 nodeptr_t prevnode = previous.get_node_ptr();
559 __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
560 prevnode->my_next = pnode->my_next;
565 // Erase the element (previous node needs to be passed because this is a forward only list)
566 iterator erase_node(raw_iterator previous, const_iterator where)
568 raw_const_iterator it = where;
569 erase_node(previous, it);
572 return get_iterator(first_real_iterator(it));
575 // Move all elements from the passed in split-ordered list to this one
576 void move_all(self_type& source)
578 raw_const_iterator first = source.raw_begin();
579 raw_const_iterator last = source.raw_end();
584 nodeptr_t previous_node = my_head;
585 raw_const_iterator begin_iterator = first++;
587 // Move all elements one by one, including dummy ones
588 for (raw_const_iterator it = first; it != last;)
590 nodeptr_t pnode = it.get_node_ptr();
592 nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
593 previous_node = try_insert(previous_node, dummy_node, NULL);
594 __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
595 raw_const_iterator where = it++;
596 source.erase_node(get_iterator(begin_iterator), where);
604 // Check the list for order violations
608 for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
610 raw_iterator next_iterator = it;
613 __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
618 typename allocator_type::template rebind<node>::other my_node_allocator; // allocator object for nodes
619 size_type my_element_count; // Total item count, not counting dummy nodes
620 nodeptr_t my_head; // pointer to head node
623 // Template class for hash compare
624 template<typename Key, typename Hasher, typename Key_equality>
630 hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
632 hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
634 size_t operator()(const Key& key) const {
635 return ((size_t)my_hash_object(key));
638 bool operator()(const Key& key1, const Key& key2) const {
639 return (!my_key_compare_object(key1, key2));
642 Hasher my_hash_object; // The hash object
643 Key_equality my_key_compare_object; // The equality comparator object
647 #pragma warning(push)
648 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
651 template <typename Traits>
652 class concurrent_unordered_base : public Traits
656 typedef concurrent_unordered_base<Traits> self_type;
657 typedef typename Traits::value_type value_type;
658 typedef typename Traits::key_type key_type;
659 typedef typename Traits::hash_compare hash_compare;
660 typedef typename Traits::value_compare value_compare;
661 typedef typename Traits::allocator_type allocator_type;
662 typedef typename allocator_type::pointer pointer;
663 typedef typename allocator_type::const_pointer const_pointer;
664 typedef typename allocator_type::reference reference;
665 typedef typename allocator_type::const_reference const_reference;
666 typedef typename allocator_type::size_type size_type;
667 typedef typename allocator_type::difference_type difference_type;
668 typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
669 typedef typename solist_t::nodeptr_t nodeptr_t;
670 // Iterators that walk the entire split-order list, including dummy nodes
671 typedef typename solist_t::raw_iterator raw_iterator;
672 typedef typename solist_t::raw_const_iterator raw_const_iterator;
673 typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
674 typedef typename solist_t::const_iterator const_iterator;
675 typedef iterator local_iterator;
676 typedef const_iterator const_local_iterator;
677 using Traits::my_hash_compare;
678 using Traits::get_key;
679 using Traits::allow_multimapping;
682 typedef std::pair<iterator, iterator> pairii_t;
683 typedef std::pair<const_iterator, const_iterator> paircc_t;
685 static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
686 static const size_type initial_bucket_number = 8; // Initial number of buckets
687 static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
690 // Constructors/Destructors
691 concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
692 const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
693 : Traits(hc), my_number_of_buckets(n_of_buckets), my_solist(a),
694 my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
699 concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
700 : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
702 internal_copy(right);
705 concurrent_unordered_base(const concurrent_unordered_base& right)
706 : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
709 internal_copy(right);
712 concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
714 internal_copy(right);
718 ~concurrent_unordered_base() {
719 // Delete all node segments
724 allocator_type get_allocator() const {
725 return my_solist.get_allocator();
728 // Size and capacity function
730 return my_solist.empty();
733 size_type size() const {
734 return my_solist.size();
737 size_type max_size() const {
738 return my_solist.max_size();
743 return my_solist.begin();
746 const_iterator begin() const {
747 return my_solist.begin();
751 return my_solist.end();
754 const_iterator end() const {
755 return my_solist.end();
758 const_iterator cbegin() const {
759 return my_solist.cbegin();
762 const_iterator cend() const {
763 return my_solist.cend();
766 // Parallel traversal support
767 class const_range_type : tbb::internal::no_assign {
768 const concurrent_unordered_base &my_table;
769 raw_const_iterator my_begin_node;
770 raw_const_iterator my_end_node;
771 mutable raw_const_iterator my_midpoint_node;
773 //! Type for size of a range
774 typedef typename concurrent_unordered_base::size_type size_type;
775 typedef typename concurrent_unordered_base::value_type value_type;
776 typedef typename concurrent_unordered_base::reference reference;
777 typedef typename concurrent_unordered_base::difference_type difference_type;
778 typedef typename concurrent_unordered_base::const_iterator iterator;
780 //! True if range is empty.
781 bool empty() const {return my_begin_node == my_end_node;}
783 //! True if range can be partitioned into two subranges.
784 bool is_divisible() const {
785 return my_midpoint_node != my_end_node;
788 const_range_type( const_range_type &r, split ) :
789 my_table(r.my_table), my_end_node(r.my_end_node)
791 r.my_end_node = my_begin_node = r.my_midpoint_node;
792 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
793 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
797 //! Init range with container and grainsize specified
798 const_range_type( const concurrent_unordered_base &a_table ) :
799 my_table(a_table), my_begin_node(a_table.my_solist.begin()),
800 my_end_node(a_table.my_solist.end())
804 iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
805 iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
806 //! The grain size for this range.
807 size_type grainsize() const { return 1; }
809 //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
810 void set_midpoint() const {
811 if( my_begin_node == my_end_node ) // not divisible
812 my_midpoint_node = my_end_node;
814 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
815 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
816 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
817 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
818 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
819 if( my_midpoint_node == my_begin_node )
820 my_midpoint_node = my_end_node;
823 sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
824 __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
825 __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
827 #endif // TBB_USE_ASSERT
832 class range_type : public const_range_type {
834 typedef typename concurrent_unordered_base::iterator iterator;
836 range_type( range_type &r, split ) : const_range_type( r, split() ) {}
837 //! Init range with container and grainsize specified
838 range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
840 iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
841 iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
845 return range_type( *this );
848 const_range_type range() const {
849 return const_range_type( *this );
853 std::pair<iterator, bool> insert(const value_type& value) {
854 return internal_insert(value);
857 iterator insert(const_iterator, const value_type& value) {
859 return insert(value).first;
862 template<class Iterator>
863 void insert(Iterator first, Iterator last) {
864 for (Iterator it = first; it != last; ++it)
868 iterator unsafe_erase(const_iterator where) {
869 return internal_erase(where);
872 iterator unsafe_erase(const_iterator first, const_iterator last) {
873 while (first != last)
874 unsafe_erase(first++);
875 return my_solist.get_iterator(first);
878 size_type unsafe_erase(const key_type& key) {
879 pairii_t where = equal_range(key);
880 size_type item_count = internal_distance(where.first, where.second);
881 unsafe_erase(where.first, where.second);
885 void swap(concurrent_unordered_base& right) {
886 if (this != &right) {
887 std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
888 my_solist.swap(right.my_solist);
889 internal_swap_buckets(right);
890 std::swap(my_number_of_buckets, right.my_number_of_buckets);
891 std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
905 iterator find(const key_type& key) {
906 return internal_find(key);
909 const_iterator find(const key_type& key) const {
910 return const_cast<self_type*>(this)->internal_find(key);
913 size_type count(const key_type& key) const {
914 paircc_t answer = equal_range(key);
915 size_type item_count = internal_distance(answer.first, answer.second);
919 std::pair<iterator, iterator> equal_range(const key_type& key) {
920 return internal_equal_range(key);
923 std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
924 return internal_equal_range(key);
927 // Bucket interface - for debugging
928 size_type unsafe_bucket_count() const {
929 return my_number_of_buckets;
932 size_type unsafe_max_bucket_count() const {
933 return segment_size(pointers_per_table-1);
936 size_type unsafe_bucket_size(size_type bucket) {
937 size_type item_count = 0;
938 if (is_initialized(bucket)) {
939 raw_iterator it = get_bucket(bucket);
941 for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
947 size_type unsafe_bucket(const key_type& key) const {
948 sokey_t order_key = (sokey_t) my_hash_compare(key);
949 size_type bucket = order_key % my_number_of_buckets;
953 // If the bucket is initialized, return a first non-dummy element in it
954 local_iterator unsafe_begin(size_type bucket) {
955 if (!is_initialized(bucket))
958 raw_iterator it = get_bucket(bucket);
959 return my_solist.first_real_iterator(it);
962 // If the bucket is initialized, return a first non-dummy element in it
963 const_local_iterator unsafe_begin(size_type bucket) const
965 if (!is_initialized(bucket))
968 raw_const_iterator it = get_bucket(bucket);
969 return my_solist.first_real_iterator(it);
972 // @REVIEW: Takes O(n)
973 // Returns the iterator after the last non-dummy element in the bucket
974 local_iterator unsafe_end(size_type bucket)
976 if (!is_initialized(bucket))
979 raw_iterator it = get_bucket(bucket);
981 // Find the end of the bucket, denoted by the dummy element
983 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
985 // Return the first real element past the end of the bucket
986 return my_solist.first_real_iterator(it);
989 // @REVIEW: Takes O(n)
990 // Returns the iterator after the last non-dummy element in the bucket
991 const_local_iterator unsafe_end(size_type bucket) const
993 if (!is_initialized(bucket))
996 raw_const_iterator it = get_bucket(bucket);
998 // Find the end of the bucket, denoted by the dummy element
1000 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1002 // Return the first real element past the end of the bucket
1003 return my_solist.first_real_iterator(it);
1006 const_local_iterator unsafe_cbegin(size_type bucket) const {
1007 return ((const self_type *) this)->begin();
1010 const_local_iterator unsafe_cend(size_type bucket) const {
1011 return ((const self_type *) this)->end();
1015 float load_factor() const {
1016 return (float) size() / (float) unsafe_bucket_count();
1019 float max_load_factor() const {
1020 return my_maximum_bucket_size;
1023 void max_load_factor(float newmax) {
1024 if (newmax != newmax || newmax < 0)
1025 tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
1026 my_maximum_bucket_size = newmax;
1029 // This function is a noop, because the underlying split-ordered list
1030 // is already sorted, so an increase in the bucket number will be
1031 // reflected next time this bucket is touched.
1032 void rehash(size_type buckets) {
1033 size_type current_buckets = my_number_of_buckets;
1035 if (current_buckets > buckets)
1037 else if ( (buckets & (buckets-1)) != 0 )
1038 tbb::internal::throw_exception(tbb::internal::eid_invalid_buckets_number);
1039 my_number_of_buckets = buckets;
1044 // Initialize the hash and keep the first bucket open
1045 void internal_init() {
1046 // Allocate an array of segment pointers
1047 memset(my_buckets, 0, pointers_per_table * sizeof(void *));
1049 // Insert the first element in the split-ordered list
1050 raw_iterator dummy_node = my_solist.raw_begin();
1051 set_bucket(0, dummy_node);
1054 void internal_clear() {
1055 for (size_type index = 0; index < pointers_per_table; ++index) {
1056 if (my_buckets[index] != NULL) {
1057 size_type sz = segment_size(index);
1058 for (size_type index2 = 0; index2 < sz; ++index2)
1059 my_allocator.destroy(&my_buckets[index][index2]);
1060 my_allocator.deallocate(my_buckets[index], sz);
1061 my_buckets[index] = 0;
1066 void internal_copy(const self_type& right) {
1069 my_maximum_bucket_size = right.my_maximum_bucket_size;
1070 my_number_of_buckets = right.my_number_of_buckets;
1073 insert(right.begin(), right.end());
1074 my_hash_compare = right.my_hash_compare;
1075 } __TBB_CATCH(...) {
1081 void internal_swap_buckets(concurrent_unordered_base& right)
1083 // Swap all node segments
1084 for (size_type index = 0; index < pointers_per_table; ++index)
1086 raw_iterator * iterator_pointer = my_buckets[index];
1087 my_buckets[index] = right.my_buckets[index];
1088 right.my_buckets[index] = iterator_pointer;
1093 size_type internal_distance(const_iterator first, const_iterator last) const
1097 for (const_iterator it = first; it != last; ++it)
1103 // Insert an element in the hash given its value
1104 std::pair<iterator, bool> internal_insert(const value_type& value)
1106 sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
1107 size_type bucket = order_key % my_number_of_buckets;
1109 // If bucket is empty, initialize it first
1110 if (!is_initialized(bucket))
1111 init_bucket(bucket);
1113 size_type new_count;
1114 order_key = split_order_key_regular(order_key);
1115 raw_iterator it = get_bucket(bucket);
1116 raw_iterator last = my_solist.raw_end();
1117 raw_iterator where = it;
1119 __TBB_ASSERT(where != last, "Invalid head node");
1121 // First node is a dummy node
1126 if (where == last || solist_t::get_order_key(where) > order_key)
1128 // Try to insert it in the right place
1129 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
1133 // Insertion succeeded, adjust the table size, if needed
1134 adjust_table_size(new_count, my_number_of_buckets);
1139 // Insertion failed: either the same node was inserted by another thread, or
1140 // another element was inserted at exactly the same place as this node.
1141 // Proceed with the search from the previous location where order key was
1142 // known to be larger (note: this is legal only because there is no safe
1143 // concurrent erase operation supported).
1149 else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
1151 // Element already in the list, return it
1152 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1155 // Move the iterator forward
1161 // Find the element in the split-ordered list
1162 iterator internal_find(const key_type& key)
1164 sokey_t order_key = (sokey_t) my_hash_compare(key);
1165 size_type bucket = order_key % my_number_of_buckets;
1167 // If bucket is empty, initialize it first
1168 if (!is_initialized(bucket))
1169 init_bucket(bucket);
1171 order_key = split_order_key_regular(order_key);
1172 raw_iterator last = my_solist.raw_end();
1174 for (raw_iterator it = get_bucket(bucket); it != last; ++it)
1176 if (solist_t::get_order_key(it) > order_key)
1178 // If the order key is smaller than the current order key, the element
1179 // is not in the hash.
1182 else if (solist_t::get_order_key(it) == order_key)
1184 // The fact that order keys match does not mean that the element is found.
1185 // Key function comparison has to be performed to check whether this is the
1186 // right element. If not, keep searching while order key is the same.
1187 if (!my_hash_compare(get_key(*it), key))
1188 return my_solist.get_iterator(it);
1195 // Erase an element from the list. This is not a concurrency safe function.
1196 iterator internal_erase(const_iterator it)
1198 key_type key = get_key(*it);
1199 sokey_t order_key = (sokey_t) my_hash_compare(key);
1200 size_type bucket = order_key % my_number_of_buckets;
1202 // If bucket is empty, initialize it first
1203 if (!is_initialized(bucket))
1204 init_bucket(bucket);
1206 order_key = split_order_key_regular(order_key);
1208 raw_iterator previous = get_bucket(bucket);
1209 raw_iterator last = my_solist.raw_end();
1210 raw_iterator where = previous;
1212 __TBB_ASSERT(where != last, "Invalid head node");
1214 // First node is a dummy node
1220 else if (my_solist.get_iterator(where) == it)
1221 return my_solist.erase_node(previous, it);
1223 // Move the iterator forward
1229 // Return the [begin, end) pair of iterators with the same key values.
1230 // This operation makes sense only if mapping is many-to-one.
1231 pairii_t internal_equal_range(const key_type& key)
1233 sokey_t order_key = (sokey_t) my_hash_compare(key);
1234 size_type bucket = order_key % my_number_of_buckets;
1236 // If bucket is empty, initialize it first
1237 if (!is_initialized(bucket))
1238 init_bucket(bucket);
1240 order_key = split_order_key_regular(order_key);
1241 raw_iterator end_it = my_solist.raw_end();
1243 for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
1245 if (solist_t::get_order_key(it) > order_key)
1247 // There is no element with the given key
1248 return pairii_t(end(), end());
1250 else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
1252 iterator first = my_solist.get_iterator(it);
1253 iterator last = first;
1255 while( last != end() && !my_hash_compare(get_key(*last), key) )
1257 return pairii_t(first, last);
1261 return pairii_t(end(), end());
1264 // Return the [begin, end) pair of const iterators with the same key values.
1265 // This operation makes sense only if mapping is many-to-one.
1266 paircc_t internal_equal_range(const key_type& key) const
1268 sokey_t order_key = (sokey_t) my_hash_compare(key);
1269 size_type bucket = order_key % my_number_of_buckets;
1271 // If bucket is empty, initialize it first
1272 if (!is_initialized(bucket))
1273 return paircc_t(end(), end());
1275 order_key = split_order_key_regular(order_key);
1276 raw_const_iterator end_it = my_solist.raw_end();
1278 for (raw_const_iterator it = get_bucket(bucket); it != end_it; ++it)
1280 if (solist_t::get_order_key(it) > order_key)
1282 // There is no element with the given key
1283 return paircc_t(end(), end());
1285 else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
1287 const_iterator first = my_solist.get_iterator(it);
1288 const_iterator last = first;
1290 while( last != end() && !my_hash_compare(get_key(*last), key ) )
1292 return paircc_t(first, last);
1296 return paircc_t(end(), end());
1300 void init_bucket(size_type bucket)
1302 // Bucket 0 has no parent. Initialize it and return.
1308 size_type parent_bucket = get_parent(bucket);
1310 // All parent_bucket buckets have to be initialized before this bucket is
1311 if (!is_initialized(parent_bucket))
1312 init_bucket(parent_bucket);
1314 raw_iterator parent = get_bucket(parent_bucket);
1316 // Create a dummy first node in this bucket
1317 raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
1318 set_bucket(bucket, dummy_node);
1321 void adjust_table_size(size_type total_elements, size_type current_size)
1323 // Grow the table by a factor of 2 if possible and needed
1324 if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1326 // Double the size of the hash only if size has not changed inbetween loads
1327 __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, 2 * current_size, current_size );
1331 size_type get_parent(size_type bucket) const
1333 // Unsets bucket's most significant turned-on bit
1334 size_type msb = __TBB_Log2((uintptr_t)bucket);
1335 return bucket & ~(size_type(1) << msb);
1339 // Dynamic sized array (segments)
1340 //! @return segment index of given index in the array
1341 static size_type segment_index_of( size_type index ) {
1342 return size_type( __TBB_Log2( index|1 ) );
1345 //! @return the first array index of given segment
1346 static size_type segment_base( size_type k ) {
1347 return (size_type(1)<<k & ~size_type(1));
1350 //! @return segment size
1351 static size_type segment_size( size_type k ) {
1352 return k? size_type(1)<<k : 2;
1355 raw_iterator get_bucket(size_type bucket) const {
1356 size_type segment = segment_index_of(bucket);
1357 bucket -= segment_base(segment);
1358 __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1359 return my_buckets[segment][bucket];
1362 void set_bucket(size_type bucket, raw_iterator dummy_head) {
1363 size_type segment = segment_index_of(bucket);
1364 bucket -= segment_base(segment);
1366 if (my_buckets[segment] == NULL) {
1367 size_type sz = segment_size(segment);
1368 raw_iterator * new_segment = my_allocator.allocate(sz);
1369 std::memset(new_segment, 0, sz*sizeof(raw_iterator));
1371 if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
1372 my_allocator.deallocate(new_segment, sz);
1375 my_buckets[segment][bucket] = dummy_head;
1378 bool is_initialized(size_type bucket) const {
1379 size_type segment = segment_index_of(bucket);
1380 bucket -= segment_base(segment);
1382 if (my_buckets[segment] == NULL)
1385 raw_iterator it = my_buckets[segment][bucket];
1386 return (it.get_node_ptr() != NULL);
1389 // Utilities for keys
1391 // A regular order key has its original hash value reversed and the last bit set
1392 sokey_t split_order_key_regular(sokey_t order_key) const {
1393 return __TBB_ReverseBits(order_key) | 0x1;
1396 // A dummy order key has its original hash value reversed and the last bit unset
1397 sokey_t split_order_key_dummy(sokey_t order_key) const {
1398 return __TBB_ReverseBits(order_key) & ~(0x1);
1402 size_type my_number_of_buckets; // Current table size
1403 solist_t my_solist; // List where all the elements are kept
1404 typename allocator_type::template rebind<raw_iterator>::other my_allocator; // Allocator object for segments
1405 float my_maximum_bucket_size; // Maximum size of the bucket
1406 raw_iterator *my_buckets[pointers_per_table]; // The segment table
1409 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
1413 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
1414 } // namespace internal
1416 //! Hasher functions
1417 template<typename T>
1418 inline size_t tbb_hasher( const T& t ) {
1419 return static_cast<size_t>( t ) * internal::hash_multiplier;
1421 template<typename P>
1422 inline size_t tbb_hasher( P* ptr ) {
1423 size_t const h = reinterpret_cast<size_t>( ptr );
1424 return (h >> 3) ^ h;
1426 template<typename E, typename S, typename A>
1427 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
1429 for( const E* c = s.c_str(); *c; ++c )
1430 h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
1433 template<typename F, typename S>
1434 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
1435 return tbb_hasher(p.first) ^ tbb_hasher(p.second);
1437 } // namespace interface5
1438 using interface5::tbb_hasher;
1440 #endif// __TBB_concurrent_unordered_internal_H