2 Copyright 2005-2011 Intel Corporation. All Rights Reserved.
4 This file is part of Threading Building Blocks.
6 Threading Building Blocks is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public License
8 version 2 as published by the Free Software Foundation.
10 Threading Building Blocks is distributed in the hope that it will be
11 useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with Threading Building Blocks; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 As a special exception, you may use this file as part of a free software
20 library without restriction. Specifically, if other files instantiate
21 templates or use macros or inline functions from this file, or you compile
22 this file and link it with other files to produce an executable, this
23 file does not by itself cause the resulting executable to be covered by
24 the GNU General Public License. This exception does not however
25 invalidate any other reasons why the executable file might be covered by
26 the GNU General Public License.
29 /* Container implementations in this header are based on PPL implementations
30 provided by Microsoft. */
32 #ifndef __TBB_concurrent_unordered_internal_H
33 #define __TBB_concurrent_unordered_internal_H
35 #include "../tbb_stddef.h"
37 #if !TBB_USE_EXCEPTIONS && _MSC_VER
38 // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
39 #pragma warning (push)
40 #pragma warning (disable: 4530)
44 #include <utility> // Need std::pair
46 #include <string> // For tbb_hasher
47 #include <cstring> // Need std::memset
49 #if !TBB_USE_EXCEPTIONS && _MSC_VER
53 #include "../atomic.h"
54 #include "../tbb_exception.h"
55 #include "../tbb_allocator.h"
58 namespace interface5 {
62 template <typename T, typename Allocator>
63 class split_ordered_list;
64 template <typename Traits>
65 class concurrent_unordered_base;
67 // Forward list iterators (without skipping dummy elements)
68 template<class Solist, typename Value>
69 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
71 template <typename T, typename Allocator>
72 friend class split_ordered_list;
73 template <typename Traits>
74 friend class concurrent_unordered_base;
75 template<class M, typename V>
76 friend class flist_iterator;
78 typedef typename Solist::nodeptr_t nodeptr_t;
80 typedef typename Solist::value_type value_type;
81 typedef typename Solist::difference_type difference_type;
82 typedef typename Solist::pointer pointer;
83 typedef typename Solist::reference reference;
85 flist_iterator() : my_node_ptr(0) {}
86 flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
87 : my_node_ptr(other.my_node_ptr) {}
89 reference operator*() const { return my_node_ptr->my_element; }
90 pointer operator->() const { return &**this; }
92 flist_iterator& operator++() {
93 my_node_ptr = my_node_ptr->my_next;
97 flist_iterator operator++(int) {
98 flist_iterator tmp = *this;
104 flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
105 nodeptr_t get_node_ptr() const { return my_node_ptr; }
107 nodeptr_t my_node_ptr;
109 template<typename M, typename T, typename U>
110 friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
111 template<typename M, typename T, typename U>
112 friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
115 template<typename Solist, typename T, typename U>
116 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
117 return i.my_node_ptr == j.my_node_ptr;
119 template<typename Solist, typename T, typename U>
120 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
121 return i.my_node_ptr != j.my_node_ptr;
124 // Split-order list iterators, needed to skip dummy elements
125 template<class Solist, typename Value>
126 class solist_iterator : public flist_iterator<Solist, Value>
128 typedef flist_iterator<Solist, Value> base_type;
129 typedef typename Solist::nodeptr_t nodeptr_t;
130 using base_type::get_node_ptr;
131 template <typename T, typename Allocator>
132 friend class split_ordered_list;
133 template<class M, typename V>
134 friend class solist_iterator;
135 template<typename M, typename T, typename U>
136 friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
137 template<typename M, typename T, typename U>
138 friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
140 const Solist *my_list_ptr;
141 solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
144 typedef typename Solist::value_type value_type;
145 typedef typename Solist::difference_type difference_type;
146 typedef typename Solist::pointer pointer;
147 typedef typename Solist::reference reference;
150 solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
151 : base_type(other), my_list_ptr(other.my_list_ptr) {}
153 reference operator*() const {
154 return this->base_type::operator*();
157 pointer operator->() const {
161 solist_iterator& operator++() {
162 do ++(*(base_type *)this);
163 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
168 solist_iterator operator++(int) {
169 solist_iterator tmp = *this;
171 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
177 template<typename Solist, typename T, typename U>
178 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
179 return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
181 template<typename Solist, typename T, typename U>
182 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
183 return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
186 // Forward type and class definitions
187 typedef size_t sokey_t;
189 // Forward list in which elements are sorted in a split-order
190 template <typename T, typename Allocator>
191 class split_ordered_list
194 typedef split_ordered_list<T, Allocator> self_type;
195 typedef typename Allocator::template rebind<T>::other allocator_type;
197 typedef node *nodeptr_t;
199 typedef typename allocator_type::size_type size_type;
200 typedef typename allocator_type::difference_type difference_type;
201 typedef typename allocator_type::pointer pointer;
202 typedef typename allocator_type::const_pointer const_pointer;
203 typedef typename allocator_type::reference reference;
204 typedef typename allocator_type::const_reference const_reference;
205 typedef typename allocator_type::value_type value_type;
207 typedef solist_iterator<self_type, const value_type> const_iterator;
208 typedef solist_iterator<self_type, value_type> iterator;
209 typedef flist_iterator<self_type, const value_type> raw_const_iterator;
210 typedef flist_iterator<self_type, value_type> raw_iterator;
212 // Node that holds the element in a split-ordered list
213 struct node : tbb::internal::no_assign
215 // Initialize the node with the given order key
216 void init(sokey_t order_key) {
217 my_order_key = order_key;
221 // Return the order key (needed for hashing)
222 sokey_t get_order_key() const { // TODO: remove
226 // Inserts the new element in the list in an atomic fashion
227 nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
229 // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
230 nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
232 if (exchange_node == current_node) // TODO: why this branch?
234 // Operation succeeded, return the new node
239 // Operation failed, return the "interfering" node
240 return exchange_node;
244 // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
245 // in the hash table to quickly index into the right subsection of the split-ordered list.
246 bool is_dummy() const {
247 return (my_order_key & 0x1) == 0;
251 nodeptr_t my_next; // Next element in the list
252 value_type my_element; // Element storage
253 sokey_t my_order_key; // Order key for this element
256 // Allocate a new node with the given order key and value
257 nodeptr_t create_node(sokey_t order_key, const T &value) {
258 nodeptr_t pnode = my_node_allocator.allocate(1);
261 new(static_cast<void*>(&pnode->my_element)) T(value);
262 pnode->init(order_key);
264 my_node_allocator.deallocate(pnode, 1);
271 // Allocate a new node with the given order key; used to allocate dummy nodes
272 nodeptr_t create_node(sokey_t order_key) {
273 nodeptr_t pnode = my_node_allocator.allocate(1);
276 new(static_cast<void*>(&pnode->my_element)) T();
277 pnode->init(order_key);
279 my_node_allocator.deallocate(pnode, 1);
286 split_ordered_list(allocator_type a = allocator_type())
287 : my_node_allocator(a), my_element_count(0)
289 // Immediately allocate a dummy node with order key of 0. This node
290 // will always be the head of the list.
291 my_head = create_node(0);
294 ~split_ordered_list()
299 // Remove the head element which is not cleared by clear()
300 nodeptr_t pnode = my_head;
303 __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
308 // Common forward list functions
310 allocator_type get_allocator() const {
311 return (my_node_allocator);
316 nodeptr_t pnode = my_head;
318 __TBB_ASSERT(my_head != NULL, "Invalid head list node");
319 pnext = pnode->my_next;
320 pnode->my_next = NULL;
323 while (pnode != NULL)
325 pnext = pnode->my_next;
330 my_element_count = 0;
333 // Returns a first non-dummy element in the SOL
335 return first_real_iterator(raw_begin());
338 // Returns a first non-dummy element in the SOL
339 const_iterator begin() const {
340 return first_real_iterator(raw_begin());
344 return (iterator(0, this));
347 const_iterator end() const {
348 return (const_iterator(0, this));
351 const_iterator cbegin() const {
352 return (((const self_type *)this)->begin());
355 const_iterator cend() const {
356 return (((const self_type *)this)->end());
359 // Checks if the number of elements (non-dummy) is 0
361 return (my_element_count == 0);
364 // Returns the number of non-dummy elements in the list
365 size_type size() const {
366 return my_element_count;
369 // Returns the maximum size of the list, determined by the allocator
370 size_type max_size() const {
371 return my_node_allocator.max_size();
374 // Swaps 'this' list with the passed in one
375 void swap(self_type& other)
383 std::swap(my_element_count, other.my_element_count);
384 std::swap(my_head, other.my_head);
387 // Split-order list functions
389 // Returns a first element in the SOL, which is always a dummy
390 raw_iterator raw_begin() {
391 return raw_iterator(my_head);
394 // Returns a first element in the SOL, which is always a dummy
395 raw_const_iterator raw_begin() const {
396 return raw_const_iterator(my_head);
399 raw_iterator raw_end() {
400 return raw_iterator(0);
403 raw_const_iterator raw_end() const {
404 return raw_const_iterator(0);
407 static sokey_t get_order_key(const raw_const_iterator& it) {
408 return it.get_node_ptr()->get_order_key();
411 static sokey_t get_safe_order_key(const raw_const_iterator& it) {
412 if( !it.get_node_ptr() ) return sokey_t(~0U);
413 return it.get_node_ptr()->get_order_key();
416 // Returns a public iterator version of the internal iterator. Public iterator must not
417 // be a dummy private iterator.
418 iterator get_iterator(raw_iterator it) {
419 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
420 return iterator(it.get_node_ptr(), this);
423 // Returns a public iterator version of the internal iterator. Public iterator must not
424 // be a dummy private iterator.
425 const_iterator get_iterator(raw_const_iterator it) const {
426 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
427 return const_iterator(it.get_node_ptr(), this);
430 // Returns a non-const version of the raw_iterator
431 raw_iterator get_iterator(raw_const_iterator it) {
432 return raw_iterator(it.get_node_ptr());
435 // Returns a non-const version of the iterator
436 static iterator get_iterator(const_iterator it) {
437 return iterator(it.my_node_ptr, it.my_list_ptr);
440 // Returns a public iterator version of a first non-dummy internal iterator at or after
441 // the passed in internal iterator.
442 iterator first_real_iterator(raw_iterator it)
444 // Skip all dummy, internal only iterators
445 while (it != raw_end() && it.get_node_ptr()->is_dummy())
448 return iterator(it.get_node_ptr(), this);
451 // Returns a public iterator version of a first non-dummy internal iterator at or after
452 // the passed in internal iterator.
453 const_iterator first_real_iterator(raw_const_iterator it) const
455 // Skip all dummy, internal only iterators
456 while (it != raw_end() && it.get_node_ptr()->is_dummy())
459 return const_iterator(it.get_node_ptr(), this);
462 // Erase an element using the allocator
463 void destroy_node(nodeptr_t pnode) {
464 my_node_allocator.destroy(pnode);
465 my_node_allocator.deallocate(pnode, 1);
468 // Try to insert a new element in the list. If insert fails, return the node that
469 // was inserted instead.
470 nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
471 new_node->my_next = current_node;
472 return previous->atomic_set_next(new_node, current_node);
475 // Insert a new element between passed in iterators
476 std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
478 nodeptr_t pnode = create_node(order_key, value);
479 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
481 if (inserted_node == pnode)
483 // If the insert succeeded, check that the order is correct and increment the element count
485 *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
486 return std::pair<iterator, bool>(iterator(pnode, this), true);
490 // If the insert failed (element already there), then delete the new one
492 return std::pair<iterator, bool>(end(), false);
496 // Insert a new dummy element, starting search at a parent dummy element
497 raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
499 raw_iterator last = raw_end();
500 raw_iterator where = it;
502 __TBB_ASSERT(where != last, "Invalid head node");
506 // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
507 nodeptr_t dummy_node = create_node(order_key);
511 __TBB_ASSERT(it != last, "Invalid head list node");
513 // If the head iterator is at the end of the list, or past the point where this dummy
514 // node needs to be inserted, then try to insert it.
515 if (where == last || get_order_key(where) > order_key)
517 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
519 // Try to insert it in the right place
520 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
522 if (inserted_node == dummy_node)
524 // Insertion succeeded, check the list for order violations
526 return raw_iterator(dummy_node);
530 // Insertion failed: either dummy node was inserted by another thread, or
531 // a real element was inserted at exactly the same place as dummy node.
532 // Proceed with the search from the previous location where order key was
533 // known to be larger (note: this is legal only because there is no safe
534 // concurrent erase operation supported).
540 else if (get_order_key(where) == order_key)
542 // Another dummy node with the same value found, discard the new one.
543 destroy_node(dummy_node);
547 // Move the iterator forward
554 // This erase function can handle both real and dummy nodes
555 void erase_node(raw_iterator previous, raw_const_iterator& where)
557 nodeptr_t pnode = (where++).get_node_ptr();
558 nodeptr_t prevnode = previous.get_node_ptr();
559 __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
560 prevnode->my_next = pnode->my_next;
565 // Erase the element (previous node needs to be passed because this is a forward only list)
566 iterator erase_node(raw_iterator previous, const_iterator where)
568 raw_const_iterator it = where;
569 erase_node(previous, it);
572 return get_iterator(first_real_iterator(it));
575 // Move all elements from the passed in split-ordered list to this one
576 void move_all(self_type& source)
578 raw_const_iterator first = source.raw_begin();
579 raw_const_iterator last = source.raw_end();
584 nodeptr_t previous_node = my_head;
585 raw_const_iterator begin_iterator = first++;
587 // Move all elements one by one, including dummy ones
588 for (raw_const_iterator it = first; it != last;)
590 nodeptr_t pnode = it.get_node_ptr();
592 nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
593 previous_node = try_insert(previous_node, dummy_node, NULL);
594 __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
595 raw_const_iterator where = it++;
596 source.erase_node(get_iterator(begin_iterator), where);
604 // Check the list for order violations
608 for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
610 raw_iterator next_iterator = it;
613 __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
618 typename allocator_type::template rebind<node>::other my_node_allocator; // allocator object for nodes
619 size_type my_element_count; // Total item count, not counting dummy nodes
620 nodeptr_t my_head; // pointer to head node
623 // Template class for hash compare
624 template<typename Key, typename Hasher, typename Key_equality>
630 hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
632 hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
634 size_t operator()(const Key& key) const {
635 return ((size_t)my_hash_object(key));
638 bool operator()(const Key& key1, const Key& key2) const {
639 return (!my_key_compare_object(key1, key2));
642 Hasher my_hash_object; // The hash object
643 Key_equality my_key_compare_object; // The equality comparator object
647 #pragma warning(push)
648 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
651 template <typename Traits>
652 class concurrent_unordered_base : public Traits
656 typedef concurrent_unordered_base<Traits> self_type;
657 typedef typename Traits::value_type value_type;
658 typedef typename Traits::key_type key_type;
659 typedef typename Traits::hash_compare hash_compare;
660 typedef typename Traits::value_compare value_compare;
661 typedef typename Traits::allocator_type allocator_type;
662 typedef typename allocator_type::pointer pointer;
663 typedef typename allocator_type::const_pointer const_pointer;
664 typedef typename allocator_type::reference reference;
665 typedef typename allocator_type::const_reference const_reference;
666 typedef typename allocator_type::size_type size_type;
667 typedef typename allocator_type::difference_type difference_type;
668 typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
669 typedef typename solist_t::nodeptr_t nodeptr_t;
670 // Iterators that walk the entire split-order list, including dummy nodes
671 typedef typename solist_t::raw_iterator raw_iterator;
672 typedef typename solist_t::raw_const_iterator raw_const_iterator;
673 typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
674 typedef typename solist_t::const_iterator const_iterator;
675 typedef iterator local_iterator;
676 typedef const_iterator const_local_iterator;
677 using Traits::my_hash_compare;
678 using Traits::get_key;
679 using Traits::allow_multimapping;
682 typedef std::pair<iterator, iterator> pairii_t;
683 typedef std::pair<const_iterator, const_iterator> paircc_t;
685 static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
686 static const size_type initial_bucket_number = 8; // Initial number of buckets
687 static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
690 // Constructors/Destructors
691 concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
692 const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
693 : Traits(hc), my_solist(a),
694 my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
696 if( n_of_buckets == 0) ++n_of_buckets;
697 my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
701 concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
702 : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
705 internal_copy(right);
708 concurrent_unordered_base(const concurrent_unordered_base& right)
709 : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
712 internal_copy(right);
715 concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
717 internal_copy(right);
721 ~concurrent_unordered_base() {
722 // Delete all node segments
727 allocator_type get_allocator() const {
728 return my_solist.get_allocator();
731 // Size and capacity function
733 return my_solist.empty();
736 size_type size() const {
737 return my_solist.size();
740 size_type max_size() const {
741 return my_solist.max_size();
746 return my_solist.begin();
749 const_iterator begin() const {
750 return my_solist.begin();
754 return my_solist.end();
757 const_iterator end() const {
758 return my_solist.end();
761 const_iterator cbegin() const {
762 return my_solist.cbegin();
765 const_iterator cend() const {
766 return my_solist.cend();
769 // Parallel traversal support
770 class const_range_type : tbb::internal::no_assign {
771 const concurrent_unordered_base &my_table;
772 raw_const_iterator my_begin_node;
773 raw_const_iterator my_end_node;
774 mutable raw_const_iterator my_midpoint_node;
776 //! Type for size of a range
777 typedef typename concurrent_unordered_base::size_type size_type;
778 typedef typename concurrent_unordered_base::value_type value_type;
779 typedef typename concurrent_unordered_base::reference reference;
780 typedef typename concurrent_unordered_base::difference_type difference_type;
781 typedef typename concurrent_unordered_base::const_iterator iterator;
783 //! True if range is empty.
784 bool empty() const {return my_begin_node == my_end_node;}
786 //! True if range can be partitioned into two subranges.
787 bool is_divisible() const {
788 return my_midpoint_node != my_end_node;
791 const_range_type( const_range_type &r, split ) :
792 my_table(r.my_table), my_end_node(r.my_end_node)
794 r.my_end_node = my_begin_node = r.my_midpoint_node;
795 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
796 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
800 //! Init range with container and grainsize specified
801 const_range_type( const concurrent_unordered_base &a_table ) :
802 my_table(a_table), my_begin_node(a_table.my_solist.begin()),
803 my_end_node(a_table.my_solist.end())
807 iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
808 iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
809 //! The grain size for this range.
810 size_type grainsize() const { return 1; }
812 //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
813 void set_midpoint() const {
814 if( my_begin_node == my_end_node ) // not divisible
815 my_midpoint_node = my_end_node;
817 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
818 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
819 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
820 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
821 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
822 if( my_midpoint_node == my_begin_node )
823 my_midpoint_node = my_end_node;
826 sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
827 __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
828 __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
830 #endif // TBB_USE_ASSERT
835 class range_type : public const_range_type {
837 typedef typename concurrent_unordered_base::iterator iterator;
839 range_type( range_type &r, split ) : const_range_type( r, split() ) {}
840 //! Init range with container and grainsize specified
841 range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
843 iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
844 iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
848 return range_type( *this );
851 const_range_type range() const {
852 return const_range_type( *this );
856 std::pair<iterator, bool> insert(const value_type& value) {
857 return internal_insert(value);
860 iterator insert(const_iterator, const value_type& value) {
862 return insert(value).first;
865 template<class Iterator>
866 void insert(Iterator first, Iterator last) {
867 for (Iterator it = first; it != last; ++it)
871 iterator unsafe_erase(const_iterator where) {
872 return internal_erase(where);
875 iterator unsafe_erase(const_iterator first, const_iterator last) {
876 while (first != last)
877 unsafe_erase(first++);
878 return my_solist.get_iterator(first);
881 size_type unsafe_erase(const key_type& key) {
882 pairii_t where = equal_range(key);
883 size_type item_count = internal_distance(where.first, where.second);
884 unsafe_erase(where.first, where.second);
888 void swap(concurrent_unordered_base& right) {
889 if (this != &right) {
890 std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
891 my_solist.swap(right.my_solist);
892 internal_swap_buckets(right);
893 std::swap(my_number_of_buckets, right.my_number_of_buckets);
894 std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
906 // Initialize bucket 0
907 __TBB_ASSERT(my_buckets[0] == NULL, NULL);
908 raw_iterator dummy_node = my_solist.raw_begin();
909 set_bucket(0, dummy_node);
913 iterator find(const key_type& key) {
914 return internal_find(key);
917 const_iterator find(const key_type& key) const {
918 return const_cast<self_type*>(this)->internal_find(key);
921 size_type count(const key_type& key) const {
922 if(allow_multimapping) {
923 paircc_t answer = equal_range(key);
924 size_type item_count = internal_distance(answer.first, answer.second);
927 return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
931 std::pair<iterator, iterator> equal_range(const key_type& key) {
932 return internal_equal_range(key);
935 std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
936 return const_cast<self_type*>(this)->internal_equal_range(key);
939 // Bucket interface - for debugging
940 size_type unsafe_bucket_count() const {
941 return my_number_of_buckets;
944 size_type unsafe_max_bucket_count() const {
945 return segment_size(pointers_per_table-1);
948 size_type unsafe_bucket_size(size_type bucket) {
949 size_type item_count = 0;
950 if (is_initialized(bucket)) {
951 raw_iterator it = get_bucket(bucket);
953 for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
959 size_type unsafe_bucket(const key_type& key) const {
960 sokey_t order_key = (sokey_t) my_hash_compare(key);
961 size_type bucket = order_key % my_number_of_buckets;
965 // If the bucket is initialized, return a first non-dummy element in it
966 local_iterator unsafe_begin(size_type bucket) {
967 if (!is_initialized(bucket))
970 raw_iterator it = get_bucket(bucket);
971 return my_solist.first_real_iterator(it);
974 // If the bucket is initialized, return a first non-dummy element in it
975 const_local_iterator unsafe_begin(size_type bucket) const
977 if (!is_initialized(bucket))
980 raw_const_iterator it = get_bucket(bucket);
981 return my_solist.first_real_iterator(it);
984 // @REVIEW: Takes O(n)
985 // Returns the iterator after the last non-dummy element in the bucket
986 local_iterator unsafe_end(size_type bucket)
988 if (!is_initialized(bucket))
991 raw_iterator it = get_bucket(bucket);
993 // Find the end of the bucket, denoted by the dummy element
995 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
997 // Return the first real element past the end of the bucket
998 return my_solist.first_real_iterator(it);
1001 // @REVIEW: Takes O(n)
1002 // Returns the iterator after the last non-dummy element in the bucket
1003 const_local_iterator unsafe_end(size_type bucket) const
1005 if (!is_initialized(bucket))
1008 raw_const_iterator it = get_bucket(bucket);
1010 // Find the end of the bucket, denoted by the dummy element
1012 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1014 // Return the first real element past the end of the bucket
1015 return my_solist.first_real_iterator(it);
1018 const_local_iterator unsafe_cbegin(size_type bucket) const {
1019 return ((const self_type *) this)->begin();
1022 const_local_iterator unsafe_cend(size_type bucket) const {
1023 return ((const self_type *) this)->end();
1027 float load_factor() const {
1028 return (float) size() / (float) unsafe_bucket_count();
1031 float max_load_factor() const {
1032 return my_maximum_bucket_size;
1035 void max_load_factor(float newmax) {
1036 if (newmax != newmax || newmax < 0)
1037 tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
1038 my_maximum_bucket_size = newmax;
1041 // This function is a noop, because the underlying split-ordered list
1042 // is already sorted, so an increase in the bucket number will be
1043 // reflected next time this bucket is touched.
1044 void rehash(size_type buckets) {
1045 size_type current_buckets = my_number_of_buckets;
1046 if (current_buckets >= buckets)
1048 my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1053 // Initialize the hash and keep the first bucket open
1054 void internal_init() {
1055 // Allocate an array of segment pointers
1056 memset(my_buckets, 0, pointers_per_table * sizeof(void *));
1058 // Initialize bucket 0
1059 raw_iterator dummy_node = my_solist.raw_begin();
1060 set_bucket(0, dummy_node);
1063 void internal_clear() {
1064 for (size_type index = 0; index < pointers_per_table; ++index) {
1065 if (my_buckets[index] != NULL) {
1066 size_type sz = segment_size(index);
1067 for (size_type index2 = 0; index2 < sz; ++index2)
1068 my_allocator.destroy(&my_buckets[index][index2]);
1069 my_allocator.deallocate(my_buckets[index], sz);
1070 my_buckets[index] = 0;
1075 void internal_copy(const self_type& right) {
1078 my_maximum_bucket_size = right.my_maximum_bucket_size;
1079 my_number_of_buckets = right.my_number_of_buckets;
1082 insert(right.begin(), right.end());
1083 my_hash_compare = right.my_hash_compare;
1084 } __TBB_CATCH(...) {
1090 void internal_swap_buckets(concurrent_unordered_base& right)
1092 // Swap all node segments
1093 for (size_type index = 0; index < pointers_per_table; ++index)
1095 raw_iterator * iterator_pointer = my_buckets[index];
1096 my_buckets[index] = right.my_buckets[index];
1097 right.my_buckets[index] = iterator_pointer;
1102 size_type internal_distance(const_iterator first, const_iterator last) const
1106 for (const_iterator it = first; it != last; ++it)
1112 // Insert an element in the hash given its value
1113 std::pair<iterator, bool> internal_insert(const value_type& value)
1115 sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
1116 size_type bucket = order_key % my_number_of_buckets;
1118 // If bucket is empty, initialize it first
1119 if (!is_initialized(bucket))
1120 init_bucket(bucket);
1122 size_type new_count;
1123 order_key = split_order_key_regular(order_key);
1124 raw_iterator it = get_bucket(bucket);
1125 raw_iterator last = my_solist.raw_end();
1126 raw_iterator where = it;
1128 __TBB_ASSERT(where != last, "Invalid head node");
1130 // First node is a dummy node
1135 if (where == last || solist_t::get_order_key(where) > order_key)
1137 // Try to insert it in the right place
1138 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
1142 // Insertion succeeded, adjust the table size, if needed
1143 adjust_table_size(new_count, my_number_of_buckets);
1148 // Insertion failed: either the same node was inserted by another thread, or
1149 // another element was inserted at exactly the same place as this node.
1150 // Proceed with the search from the previous location where order key was
1151 // known to be larger (note: this is legal only because there is no safe
1152 // concurrent erase operation supported).
1158 else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
1160 // Element already in the list, return it
1161 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1164 // Move the iterator forward
1170 // Find the element in the split-ordered list
1171 iterator internal_find(const key_type& key)
1173 sokey_t order_key = (sokey_t) my_hash_compare(key);
1174 size_type bucket = order_key % my_number_of_buckets;
1176 // If bucket is empty, initialize it first
1177 if (!is_initialized(bucket))
1178 init_bucket(bucket);
1180 order_key = split_order_key_regular(order_key);
1181 raw_iterator last = my_solist.raw_end();
1183 for (raw_iterator it = get_bucket(bucket); it != last; ++it)
1185 if (solist_t::get_order_key(it) > order_key)
1187 // If the order key is smaller than the current order key, the element
1188 // is not in the hash.
1191 else if (solist_t::get_order_key(it) == order_key)
1193 // The fact that order keys match does not mean that the element is found.
1194 // Key function comparison has to be performed to check whether this is the
1195 // right element. If not, keep searching while order key is the same.
1196 if (!my_hash_compare(get_key(*it), key))
1197 return my_solist.get_iterator(it);
1204 // Erase an element from the list. This is not a concurrency safe function.
1205 iterator internal_erase(const_iterator it)
1207 key_type key = get_key(*it);
1208 sokey_t order_key = (sokey_t) my_hash_compare(key);
1209 size_type bucket = order_key % my_number_of_buckets;
1211 // If bucket is empty, initialize it first
1212 if (!is_initialized(bucket))
1213 init_bucket(bucket);
1215 order_key = split_order_key_regular(order_key);
1217 raw_iterator previous = get_bucket(bucket);
1218 raw_iterator last = my_solist.raw_end();
1219 raw_iterator where = previous;
1221 __TBB_ASSERT(where != last, "Invalid head node");
1223 // First node is a dummy node
1229 else if (my_solist.get_iterator(where) == it)
1230 return my_solist.erase_node(previous, it);
1232 // Move the iterator forward
1238 // Return the [begin, end) pair of iterators with the same key values.
1239 // This operation makes sense only if mapping is many-to-one.
1240 pairii_t internal_equal_range(const key_type& key)
1242 sokey_t order_key = (sokey_t) my_hash_compare(key);
1243 size_type bucket = order_key % my_number_of_buckets;
1245 // If bucket is empty, initialize it first
1246 if (!is_initialized(bucket))
1247 init_bucket(bucket);
1249 order_key = split_order_key_regular(order_key);
1250 raw_iterator end_it = my_solist.raw_end();
1252 for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
1254 if (solist_t::get_order_key(it) > order_key)
1256 // There is no element with the given key
1257 return pairii_t(end(), end());
1259 else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
1261 iterator first = my_solist.get_iterator(it);
1262 iterator last = first;
1263 do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1264 return pairii_t(first, last);
1268 return pairii_t(end(), end());
1272 void init_bucket(size_type bucket)
1274 // Bucket 0 has no parent.
1275 __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1277 size_type parent_bucket = get_parent(bucket);
1279 // All parent_bucket buckets have to be initialized before this bucket is
1280 if (!is_initialized(parent_bucket))
1281 init_bucket(parent_bucket);
1283 raw_iterator parent = get_bucket(parent_bucket);
1285 // Create a dummy first node in this bucket
1286 raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
1287 set_bucket(bucket, dummy_node);
1290 void adjust_table_size(size_type total_elements, size_type current_size)
1292 // Grow the table by a factor of 2 if possible and needed
1293 if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1295 // Double the size of the hash only if size has not changed inbetween loads
1296 __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, uintptr_t(2u*current_size), uintptr_t(current_size) );
1297 //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1298 //due to overzealous compiler warnings in /Wp64 mode
1302 size_type get_parent(size_type bucket) const
1304 // Unsets bucket's most significant turned-on bit
1305 size_type msb = __TBB_Log2((uintptr_t)bucket);
1306 return bucket & ~(size_type(1) << msb);
1310 // Dynamic sized array (segments)
1311 //! @return segment index of given index in the array
1312 static size_type segment_index_of( size_type index ) {
1313 return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1316 //! @return the first array index of given segment
1317 static size_type segment_base( size_type k ) {
1318 return (size_type(1)<<k & ~size_type(1));
1321 //! @return segment size
1322 static size_type segment_size( size_type k ) {
1323 return k? size_type(1)<<k : 2;
1326 raw_iterator get_bucket(size_type bucket) const {
1327 size_type segment = segment_index_of(bucket);
1328 bucket -= segment_base(segment);
1329 __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1330 return my_buckets[segment][bucket];
1333 void set_bucket(size_type bucket, raw_iterator dummy_head) {
1334 size_type segment = segment_index_of(bucket);
1335 bucket -= segment_base(segment);
1337 if (my_buckets[segment] == NULL) {
1338 size_type sz = segment_size(segment);
1339 raw_iterator * new_segment = my_allocator.allocate(sz);
1340 std::memset(new_segment, 0, sz*sizeof(raw_iterator));
1342 if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
1343 my_allocator.deallocate(new_segment, sz);
1346 my_buckets[segment][bucket] = dummy_head;
1349 bool is_initialized(size_type bucket) const {
1350 size_type segment = segment_index_of(bucket);
1351 bucket -= segment_base(segment);
1353 if (my_buckets[segment] == NULL)
1356 raw_iterator it = my_buckets[segment][bucket];
1357 return (it.get_node_ptr() != NULL);
1360 // Utilities for keys
1362 // A regular order key has its original hash value reversed and the last bit set
1363 sokey_t split_order_key_regular(sokey_t order_key) const {
1364 return __TBB_ReverseBits(order_key) | 0x1;
1367 // A dummy order key has its original hash value reversed and the last bit unset
1368 sokey_t split_order_key_dummy(sokey_t order_key) const {
1369 return __TBB_ReverseBits(order_key) & ~(0x1);
1373 atomic<size_type> my_number_of_buckets; // Current table size
1374 solist_t my_solist; // List where all the elements are kept
1375 typename allocator_type::template rebind<raw_iterator>::other my_allocator; // Allocator object for segments
1376 float my_maximum_bucket_size; // Maximum size of the bucket
1377 atomic<raw_iterator*> my_buckets[pointers_per_table]; // The segment table
1380 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
1384 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
1385 } // namespace internal
1387 //! Hasher functions
1388 template<typename T>
1389 inline size_t tbb_hasher( const T& t ) {
1390 return static_cast<size_t>( t ) * internal::hash_multiplier;
1392 template<typename P>
1393 inline size_t tbb_hasher( P* ptr ) {
1394 size_t const h = reinterpret_cast<size_t>( ptr );
1395 return (h >> 3) ^ h;
1397 template<typename E, typename S, typename A>
1398 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
1400 for( const E* c = s.c_str(); *c; ++c )
1401 h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
1404 template<typename F, typename S>
1405 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
1406 return tbb_hasher(p.first) ^ tbb_hasher(p.second);
1408 } // namespace interface5
1409 using interface5::tbb_hasher;
1412 // Template class for hash compare
1413 template<typename Key>
1419 size_t operator()(const Key& key) const
1421 return tbb_hasher(key);
1426 #endif// __TBB_concurrent_unordered_internal_H