_concurrent_unordered_internal.h

00001 /*
00002     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 /* Container implementations in this header are based on PPL implementations 
00022    provided by Microsoft. */
00023 
00024 #ifndef __TBB_concurrent_unordered_internal_H
00025 #define __TBB_concurrent_unordered_internal_H
00026 
00027 #include "tbb_stddef.h"
00028 
00029 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00030     // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
00031     #pragma warning (push)
00032     #pragma warning (disable: 4530)
00033 #endif
00034 
00035 #include <iterator>
00036 #include <utility>      // Need std::pair
00037 #include <functional>
00038 #include <string>       // For tbb_hasher
00039 #include <cstring>      // Need std::memset
00040 
00041 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00042     #pragma warning (pop)
00043 #endif
00044 
00045 #include "atomic.h"
00046 #include "tbb_exception.h"
00047 #include "tbb_allocator.h"
00048 
00049 namespace tbb {
00050 namespace interface5 {
00052 namespace internal {
00053 
00054 template <typename T, typename Allocator>
00055 class split_ordered_list;
00056 template <typename Traits>
00057 class concurrent_unordered_base;
00058 
00059 // Forward list iterators (without skipping dummy elements)
00060 template<class Solist, typename Value>
00061 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
00062 {
00063     template <typename T, typename Allocator>
00064     friend class split_ordered_list;
00065     template <typename Traits>
00066     friend class concurrent_unordered_base;
00067     template<class M, typename V>
00068     friend class flist_iterator;
00069 
00070     typedef typename Solist::nodeptr_t nodeptr_t;
00071 public:
00072     typedef typename Solist::value_type value_type;
00073     typedef typename Solist::difference_type difference_type;
00074     typedef typename Solist::pointer pointer;
00075     typedef typename Solist::reference reference;
00076 
00077     flist_iterator() : my_node_ptr(0) {}
00078     flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
00079         : my_node_ptr(other.my_node_ptr) {}
00080 
00081     reference operator*() const { return my_node_ptr->my_element; }
00082     pointer operator->() const { return &**this; }
00083 
00084     flist_iterator& operator++() {
00085         my_node_ptr = my_node_ptr->my_next;
00086         return *this;
00087     }
00088 
00089     flist_iterator operator++(int) {
00090         flist_iterator tmp = *this;
00091         ++*this;
00092         return tmp;
00093     }
00094 
00095 protected:
00096     flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
00097     nodeptr_t get_node_ptr() const { return my_node_ptr; }
00098 
00099     nodeptr_t my_node_ptr;
00100 
00101     template<typename M, typename T, typename U>
00102     friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
00103     template<typename M, typename T, typename U>
00104     friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
00105 };
00106 
00107 template<typename Solist, typename T, typename U>
00108 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
00109     return i.my_node_ptr == j.my_node_ptr;
00110 }
00111 template<typename Solist, typename T, typename U>
00112 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
00113     return i.my_node_ptr != j.my_node_ptr;
00114 }
00115 
00116 // Split-order list iterators, needed to skip dummy elements
00117 template<class Solist, typename Value>
00118 class solist_iterator : public flist_iterator<Solist, Value>
00119 {
00120     typedef flist_iterator<Solist, Value> base_type;
00121     typedef typename Solist::nodeptr_t nodeptr_t;
00122     using base_type::get_node_ptr;
00123     template <typename T, typename Allocator>
00124     friend class split_ordered_list;
00125     template<class M, typename V>
00126     friend class solist_iterator;
00127     template<typename M, typename T, typename U>
00128     friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
00129     template<typename M, typename T, typename U>
00130     friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
00131 
00132     const Solist *my_list_ptr;
00133     solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
00134 
00135 public:
00136     typedef typename Solist::value_type value_type;
00137     typedef typename Solist::difference_type difference_type;
00138     typedef typename Solist::pointer pointer;
00139     typedef typename Solist::reference reference;
00140 
00141     solist_iterator() {}
00142     solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
00143         : base_type(other), my_list_ptr(other.my_list_ptr) {}
00144 
00145     reference operator*() const {
00146         return this->base_type::operator*();
00147     }
00148 
00149     pointer operator->() const {
00150         return (&**this);
00151     }
00152 
00153     solist_iterator& operator++() {
00154         do ++(*(base_type *)this);
00155         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00156 
00157         return (*this);
00158     }
00159 
00160     solist_iterator operator++(int) {
00161         solist_iterator tmp = *this;
00162         do ++*this;
00163         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00164 
00165         return (tmp);
00166     }
00167 };
00168 
00169 template<typename Solist, typename T, typename U>
00170 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
00171     return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
00172 }
00173 template<typename Solist, typename T, typename U>
00174 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
00175     return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
00176 }
00177 
00178 // Forward type and class definitions
00179 typedef size_t sokey_t;
00180 
00181 // Forward list in which elements are sorted in a split-order
00182 template <typename T, typename Allocator>
00183 class split_ordered_list
00184 {
00185 public:
00186     typedef split_ordered_list<T, Allocator> self_type;
00187     typedef typename Allocator::template rebind<T>::other allocator_type;
00188     struct node;
00189     typedef node *nodeptr_t;
00190 
00191     typedef typename allocator_type::size_type size_type;
00192     typedef typename allocator_type::difference_type difference_type;
00193     typedef typename allocator_type::pointer pointer;
00194     typedef typename allocator_type::const_pointer const_pointer;
00195     typedef typename allocator_type::reference reference;
00196     typedef typename allocator_type::const_reference const_reference;
00197     typedef typename allocator_type::value_type value_type;
00198 
00199     typedef solist_iterator<self_type, const value_type> const_iterator;
00200     typedef solist_iterator<self_type, value_type> iterator;
00201     typedef flist_iterator<self_type, const value_type> raw_const_iterator;
00202     typedef flist_iterator<self_type, value_type> raw_iterator;
00203 
00204     // Node that holds the element in a split-ordered list
00205     struct node : tbb::internal::no_assign
00206     {
00207         // Initialize the node with the given order key
00208         void init(sokey_t order_key) {
00209             my_order_key = order_key;
00210             my_next = NULL;
00211         }
00212 
00213         // Return the order key (needed for hashing)
00214         sokey_t get_order_key() const { // TODO: remove
00215             return my_order_key;
00216         }
00217 
00218         // Inserts the new element in the list in an atomic fashion
00219         nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
00220         {
00221             // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
00222             nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
00223 
00224             if (exchange_node == current_node) // TODO: why this branch?
00225             {
00226                 // Operation succeeded, return the new node
00227                 return new_node;
00228             }
00229             else
00230             {
00231                 // Operation failed, return the "interfering" node
00232                 return exchange_node;
00233             }
00234         }
00235 
00236         // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
00237         // in the hash table to quickly index into the right subsection of the split-ordered list.
00238         bool is_dummy() const {
00239             return (my_order_key & 0x1) == 0;
00240         }
00241 
00242 
00243         nodeptr_t  my_next;      // Next element in the list
00244         value_type my_element;   // Element storage
00245         sokey_t    my_order_key; // Order key for this element
00246     };
00247 
00248     // Allocate a new node with the given order key and value
00249     nodeptr_t create_node(sokey_t order_key, const T &value) {
00250         nodeptr_t pnode = my_node_allocator.allocate(1);
00251 
00252         __TBB_TRY {
00253             new(static_cast<void*>(&pnode->my_element)) T(value);
00254             pnode->init(order_key);
00255         } __TBB_CATCH(...) {
00256             my_node_allocator.deallocate(pnode, 1);
00257             __TBB_RETHROW();
00258         }
00259 
00260         return (pnode);
00261     }
00262 
00263     // Allocate a new node with the given order key; used to allocate dummy nodes
00264     nodeptr_t create_node(sokey_t order_key) {
00265         nodeptr_t pnode = my_node_allocator.allocate(1);
00266 
00267         __TBB_TRY {
00268             new(static_cast<void*>(&pnode->my_element)) T();
00269             pnode->init(order_key);
00270         } __TBB_CATCH(...) {
00271             my_node_allocator.deallocate(pnode, 1);
00272             __TBB_RETHROW();
00273         }
00274 
00275         return (pnode);
00276     }
00277 
00278    split_ordered_list(allocator_type a = allocator_type())
00279        : my_node_allocator(a), my_element_count(0)
00280     {
00281         // Immediately allocate a dummy node with order key of 0. This node
00282         // will always be the head of the list.
00283         my_head = create_node(0);
00284     }
00285 
00286     ~split_ordered_list()
00287     {
00288         // Clear the list
00289         clear();
00290 
00291         // Remove the head element which is not cleared by clear()
00292         nodeptr_t pnode = my_head;
00293         my_head = NULL;
00294 
00295         __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
00296 
00297         destroy_node(pnode);
00298     }
00299 
00300     // Common forward list functions
00301 
00302     allocator_type get_allocator() const {
00303         return (my_node_allocator);
00304     }
00305 
00306     void clear() {
00307         nodeptr_t pnext;
00308         nodeptr_t pnode = my_head;
00309 
00310         __TBB_ASSERT(my_head != NULL, "Invalid head list node");
00311         pnext = pnode->my_next;
00312         pnode->my_next = NULL;
00313         pnode = pnext;
00314 
00315         while (pnode != NULL)
00316         {
00317             pnext = pnode->my_next;
00318             destroy_node(pnode);
00319             pnode = pnext;
00320         }
00321 
00322         my_element_count = 0;
00323     }
00324 
00325     // Returns a first non-dummy element in the SOL
00326     iterator begin() {
00327         return first_real_iterator(raw_begin());
00328     }
00329 
00330     // Returns a first non-dummy element in the SOL
00331     const_iterator begin() const {
00332         return first_real_iterator(raw_begin());
00333     }
00334 
00335     iterator end() {
00336         return (iterator(0, this));
00337     }
00338 
00339     const_iterator end() const {
00340         return (const_iterator(0, this));
00341     }
00342 
00343     const_iterator cbegin() const {
00344         return (((const self_type *)this)->begin());
00345     }
00346 
00347     const_iterator cend() const {
00348         return (((const self_type *)this)->end());
00349     }
00350 
00351     // Checks if the number of elements (non-dummy) is 0
00352     bool empty() const {
00353         return (my_element_count == 0);
00354     }
00355 
00356     // Returns the number of non-dummy elements in the list
00357     size_type size() const {
00358         return my_element_count;
00359     }
00360 
00361     // Returns the maximum size of the list, determined by the allocator
00362     size_type max_size() const {
00363         return my_node_allocator.max_size();
00364     }
00365 
00366     // Swaps 'this' list with the passed in one
00367     void swap(self_type& other)
00368     {
00369         if (this == &other)
00370         {
00371             // Nothing to do
00372             return;
00373         }
00374 
00375         std::swap(my_element_count, other.my_element_count);
00376         std::swap(my_head, other.my_head);
00377     }
00378 
00379     // Split-order list functions
00380 
00381     // Returns a first element in the SOL, which is always a dummy
00382     raw_iterator raw_begin() {
00383         return raw_iterator(my_head);
00384     }
00385 
00386     // Returns a first element in the SOL, which is always a dummy
00387     raw_const_iterator raw_begin() const {
00388         return raw_const_iterator(my_head);
00389     }
00390 
00391     raw_iterator raw_end() {
00392         return raw_iterator(0);
00393     }
00394 
00395     raw_const_iterator raw_end() const {
00396         return raw_const_iterator(0);
00397     }
00398 
00399     static sokey_t get_order_key(const raw_const_iterator& it) {
00400         return it.get_node_ptr()->get_order_key();
00401     }
00402 
00403     static sokey_t get_safe_order_key(const raw_const_iterator& it) {
00404         if( !it.get_node_ptr() ) return sokey_t(~0U);
00405         return it.get_node_ptr()->get_order_key();
00406     }
00407 
00408     // Returns a public iterator version of the internal iterator. Public iterator must not
00409     // be a dummy private iterator.
00410     iterator get_iterator(raw_iterator it) {
00411         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00412         return iterator(it.get_node_ptr(), this);
00413     }
00414 
00415     // Returns a public iterator version of the internal iterator. Public iterator must not
00416     // be a dummy private iterator.
00417     const_iterator get_iterator(raw_const_iterator it) const {
00418         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00419         return const_iterator(it.get_node_ptr(), this);
00420     }
00421 
00422     // Returns a non-const version of the raw_iterator
00423     raw_iterator get_iterator(raw_const_iterator it) {
00424         return raw_iterator(it.get_node_ptr());
00425     }
00426 
00427     // Returns a non-const version of the iterator
00428     static iterator get_iterator(const_iterator it) {
00429         return iterator(it.my_node_ptr, it.my_list_ptr);
00430     }
00431 
00432     // Returns a public iterator version of a first non-dummy internal iterator at or after
00433     // the passed in internal iterator.
00434     iterator first_real_iterator(raw_iterator it)
00435     {
00436         // Skip all dummy, internal only iterators
00437         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00438             ++it;
00439 
00440         return iterator(it.get_node_ptr(), this);
00441     }
00442 
00443     // Returns a public iterator version of a first non-dummy internal iterator at or after
00444     // the passed in internal iterator.
00445     const_iterator first_real_iterator(raw_const_iterator it) const
00446     {
00447         // Skip all dummy, internal only iterators
00448         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00449             ++it;
00450 
00451         return const_iterator(it.get_node_ptr(), this);
00452     }
00453 
00454     // Erase an element using the allocator
00455     void destroy_node(nodeptr_t pnode) {
00456         my_node_allocator.destroy(pnode);
00457         my_node_allocator.deallocate(pnode, 1);
00458     }
00459 
00460     // Try to insert a new element in the list. If insert fails, return the node that
00461     // was inserted instead.
00462     nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
00463         new_node->my_next = current_node;
00464         return previous->atomic_set_next(new_node, current_node);
00465     }
00466 
00467     // Insert a new element between passed in iterators
00468     std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
00469     {
00470         nodeptr_t pnode = create_node(order_key, value);
00471         nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
00472 
00473         if (inserted_node == pnode)
00474         {
00475             // If the insert succeeded, check that the order is correct and increment the element count
00476             check_range();
00477             *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
00478             return std::pair<iterator, bool>(iterator(pnode, this), true);
00479         }
00480         else
00481         {
00482             // If the insert failed (element already there), then delete the new one
00483             destroy_node(pnode);
00484             return std::pair<iterator, bool>(end(), false);
00485         }
00486     }
00487 
00488     // Insert a new dummy element, starting search at a parent dummy element
00489     raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
00490     {
00491         raw_iterator last = raw_end();
00492         raw_iterator where = it;
00493 
00494         __TBB_ASSERT(where != last, "Invalid head node");
00495 
00496         ++where;
00497 
00498         // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
00499         nodeptr_t dummy_node = create_node(order_key);
00500 
00501         for (;;)
00502         {
00503             __TBB_ASSERT(it != last, "Invalid head list node");
00504 
00505             // If the head iterator is at the end of the list, or past the point where this dummy
00506             // node needs to be inserted, then try to insert it.
00507             if (where == last || get_order_key(where) > order_key)
00508             {
00509                 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
00510 
00511                 // Try to insert it in the right place
00512                 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
00513 
00514                 if (inserted_node == dummy_node)
00515                 {
00516                     // Insertion succeeded, check the list for order violations
00517                     check_range();
00518                     return raw_iterator(dummy_node);
00519                 }
00520                 else
00521                 {
00522                     // Insertion failed: either dummy node was inserted by another thread, or
00523                     // a real element was inserted at exactly the same place as dummy node.
00524                     // Proceed with the search from the previous location where order key was
00525                     // known to be larger (note: this is legal only because there is no safe
00526                     // concurrent erase operation supported).
00527                     where = it;
00528                     ++where;
00529                     continue;
00530                 }
00531             }
00532             else if (get_order_key(where) == order_key)
00533             {
00534                 // Another dummy node with the same value found, discard the new one.
00535                 destroy_node(dummy_node);
00536                 return where;
00537             }
00538 
00539             // Move the iterator forward
00540             it = where;
00541             ++where;
00542         }
00543 
00544     }
00545 
00546     // This erase function can handle both real and dummy nodes
00547     void erase_node(raw_iterator previous, raw_const_iterator& where)
00548     {
00549         nodeptr_t pnode = (where++).get_node_ptr();
00550         nodeptr_t prevnode = previous.get_node_ptr();
00551         __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
00552         prevnode->my_next = pnode->my_next;
00553 
00554         destroy_node(pnode);
00555     }
00556 
00557     // Erase the element (previous node needs to be passed because this is a forward only list)
00558     iterator erase_node(raw_iterator previous, const_iterator where)
00559     {
00560         raw_const_iterator it = where;
00561         erase_node(previous, it);
00562         my_element_count--;
00563 
00564         return get_iterator(first_real_iterator(it));
00565     }
00566 
00567     // Move all elements from the passed in split-ordered list to this one
00568     void move_all(self_type& source)
00569     {
00570         raw_const_iterator first = source.raw_begin();
00571         raw_const_iterator last = source.raw_end();
00572 
00573         if (first == last)
00574             return;
00575 
00576         nodeptr_t previous_node = my_head;
00577         raw_const_iterator begin_iterator = first++;
00578 
00579         // Move all elements one by one, including dummy ones
00580         for (raw_const_iterator it = first; it != last;)
00581         {
00582             nodeptr_t pnode = it.get_node_ptr();
00583 
00584             nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
00585             previous_node = try_insert(previous_node, dummy_node, NULL);
00586             __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
00587             raw_const_iterator where = it++;
00588             source.erase_node(get_iterator(begin_iterator), where);
00589         }
00590         check_range();
00591     }
00592 
00593 
00594 private:
00595 
00596     // Check the list for order violations
00597     void check_range()
00598     {
00599 #if TBB_USE_ASSERT
00600         for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
00601         {
00602             raw_iterator next_iterator = it;
00603             ++next_iterator;
00604 
00605             __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
00606         }
00607 #endif
00608     }
00609 
00610     typename allocator_type::template rebind<node>::other my_node_allocator;  // allocator object for nodes
00611     size_type                                             my_element_count;   // Total item count, not counting dummy nodes
00612     nodeptr_t                                             my_head;            // pointer to head node
00613 };
00614 
00615 // Template class for hash compare
00616 template<typename Key, typename Hasher, typename Key_equality>
00617 class hash_compare
00618 {
00619 public:
00620     hash_compare() {}
00621 
00622     hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
00623 
00624     hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
00625 
00626     size_t operator()(const Key& key) const {
00627         return ((size_t)my_hash_object(key));
00628     }
00629 
00630     bool operator()(const Key& key1, const Key& key2) const {
00631         return (!my_key_compare_object(key1, key2));
00632     }
00633 
00634     Hasher       my_hash_object;        // The hash object
00635     Key_equality my_key_compare_object; // The equality comparator object
00636 };
00637 
00638 #if _MSC_VER
00639 #pragma warning(push)
00640 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
00641 #endif
00642 
00643 template <typename Traits>
00644 class concurrent_unordered_base : public Traits
00645 {
00646 protected:
00647     // Type definitions
00648     typedef concurrent_unordered_base<Traits> self_type;
00649     typedef typename Traits::value_type value_type;
00650     typedef typename Traits::key_type key_type;
00651     typedef typename Traits::hash_compare hash_compare;
00652     typedef typename Traits::value_compare value_compare;
00653     typedef typename Traits::allocator_type allocator_type;
00654     typedef typename allocator_type::pointer pointer;
00655     typedef typename allocator_type::const_pointer const_pointer;
00656     typedef typename allocator_type::reference reference;
00657     typedef typename allocator_type::const_reference const_reference;
00658     typedef typename allocator_type::size_type size_type;
00659     typedef typename allocator_type::difference_type difference_type;
00660     typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
00661     typedef typename solist_t::nodeptr_t nodeptr_t;
00662     // Iterators that walk the entire split-order list, including dummy nodes
00663     typedef typename solist_t::raw_iterator raw_iterator;
00664     typedef typename solist_t::raw_const_iterator raw_const_iterator;
00665     typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
00666     typedef typename solist_t::const_iterator const_iterator;
00667     typedef iterator local_iterator;
00668     typedef const_iterator const_local_iterator;
00669     using Traits::my_hash_compare;
00670     using Traits::get_key;
00671     using Traits::allow_multimapping;
00672 
00673 private:
00674     typedef std::pair<iterator, iterator> pairii_t;
00675     typedef std::pair<const_iterator, const_iterator> paircc_t;
00676 
00677     static size_type const pointers_per_table = sizeof(size_type) * 8;              // One bucket segment per bit
00678     static const size_type initial_bucket_number = 8;                               // Initial number of buckets
00679     static const size_type initial_bucket_load = 4;                                // Initial maximum number of elements per bucket
00680 
00681 protected:
00682     // Constructors/Destructors
00683     concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
00684         const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
00685         : Traits(hc), my_solist(a),
00686           my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
00687     {
00688         __TBB_ASSERT( n_of_buckets, "number of buckets must be greater than 0");
00689         my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
00690         internal_init();
00691     }
00692 
00693     concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
00694         : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
00695     {
00696         internal_copy(right);
00697     }
00698 
00699     concurrent_unordered_base(const concurrent_unordered_base& right)
00700         : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
00701     {
00702         internal_init();
00703         internal_copy(right);
00704     }
00705 
00706     concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
00707         if (this != &right)
00708             internal_copy(right);
00709         return (*this);
00710     }
00711 
00712     ~concurrent_unordered_base() {
00713         // Delete all node segments
00714         internal_clear();
00715     }
00716 
00717 public:
00718     allocator_type get_allocator() const {
00719         return my_solist.get_allocator();
00720     }
00721 
00722     // Size and capacity function
00723     bool empty() const {
00724         return my_solist.empty();
00725     }
00726 
00727     size_type size() const {
00728         return my_solist.size();
00729     }
00730 
00731     size_type max_size() const {
00732         return my_solist.max_size();
00733     }
00734 
00735     // Iterators 
00736     iterator begin() {
00737         return my_solist.begin();
00738     }
00739 
00740     const_iterator begin() const {
00741         return my_solist.begin();
00742     }
00743 
00744     iterator end() {
00745         return my_solist.end();
00746     }
00747 
00748     const_iterator end() const {
00749         return my_solist.end();
00750     }
00751 
00752     const_iterator cbegin() const {
00753         return my_solist.cbegin();
00754     }
00755 
00756     const_iterator cend() const {
00757         return my_solist.cend();
00758     }
00759 
00760     // Parallel traversal support
00761     class const_range_type : tbb::internal::no_assign {
00762         const concurrent_unordered_base &my_table;
00763         raw_const_iterator my_begin_node;
00764         raw_const_iterator my_end_node;
00765         mutable raw_const_iterator my_midpoint_node;
00766     public:
00768         typedef typename concurrent_unordered_base::size_type size_type;
00769         typedef typename concurrent_unordered_base::value_type value_type;
00770         typedef typename concurrent_unordered_base::reference reference;
00771         typedef typename concurrent_unordered_base::difference_type difference_type;
00772         typedef typename concurrent_unordered_base::const_iterator iterator;
00773 
00775         bool empty() const {return my_begin_node == my_end_node;}
00776 
00778         bool is_divisible() const {
00779             return my_midpoint_node != my_end_node;
00780         }
00782         const_range_type( const_range_type &r, split ) : 
00783             my_table(r.my_table), my_end_node(r.my_end_node)
00784         {
00785             r.my_end_node = my_begin_node = r.my_midpoint_node;
00786             __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00787             __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00788             set_midpoint();
00789             r.set_midpoint();
00790         }
00792         const_range_type( const concurrent_unordered_base &a_table ) : 
00793             my_table(a_table), my_begin_node(a_table.my_solist.begin()),
00794             my_end_node(a_table.my_solist.end())
00795         {
00796             set_midpoint();
00797         }
00798         iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
00799         iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
00801         size_type grainsize() const { return 1; }
00802 
00804         void set_midpoint() const {
00805             if( my_begin_node == my_end_node ) // not divisible
00806                 my_midpoint_node = my_end_node;
00807             else {
00808                 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
00809                 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
00810                 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
00811                 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
00812                 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
00813                 if( my_midpoint_node == my_begin_node )
00814                     my_midpoint_node = my_end_node;
00815 #if TBB_USE_ASSERT
00816                 else {
00817                     sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
00818                     __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
00819                     __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
00820                 }
00821 #endif // TBB_USE_ASSERT
00822             }
00823         }
00824     };
00825 
00826     class range_type : public const_range_type {
00827     public:
00828         typedef typename concurrent_unordered_base::iterator iterator;
00830         range_type( range_type &r, split ) : const_range_type( r, split() ) {}
00832         range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
00833 
00834         iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
00835         iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
00836     };
00837 
00838     range_type range() {
00839         return range_type( *this );
00840     }
00841 
00842     const_range_type range() const {
00843         return const_range_type( *this );
00844     }
00845 
00846     // Modifiers
00847     std::pair<iterator, bool> insert(const value_type& value) {
00848         return internal_insert(value);
00849     }
00850 
00851     iterator insert(const_iterator, const value_type& value) {
00852         // Ignore hint
00853         return insert(value).first;
00854     }
00855 
00856     template<class Iterator>
00857     void insert(Iterator first, Iterator last) {
00858         for (Iterator it = first; it != last; ++it)
00859             insert(*it);
00860     }
00861 
00862     iterator unsafe_erase(const_iterator where) {
00863         return internal_erase(where);
00864     }
00865 
00866     iterator unsafe_erase(const_iterator first, const_iterator last) {
00867         while (first != last)
00868             unsafe_erase(first++);
00869         return my_solist.get_iterator(first);
00870     }
00871 
00872     size_type unsafe_erase(const key_type& key) {
00873         pairii_t where = equal_range(key);
00874         size_type item_count = internal_distance(where.first, where.second);
00875         unsafe_erase(where.first, where.second);
00876         return item_count;
00877     }
00878 
00879     void swap(concurrent_unordered_base& right) {
00880         if (this != &right) {
00881             std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
00882             my_solist.swap(right.my_solist);
00883             internal_swap_buckets(right);
00884             std::swap(my_number_of_buckets, right.my_number_of_buckets);
00885             std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
00886         }
00887     }
00888 
00889     // Observers
00890     void clear() {
00891         // Clear list
00892         my_solist.clear();
00893 
00894         // Clear buckets
00895         internal_clear();
00896     }
00897 
00898     // Lookup
00899     iterator find(const key_type& key) {
00900         return internal_find(key);
00901     }
00902 
00903     const_iterator find(const key_type& key) const {
00904         return const_cast<self_type*>(this)->internal_find(key);
00905     }
00906 
00907     size_type count(const key_type& key) const {
00908         paircc_t answer = equal_range(key);
00909         size_type item_count = internal_distance(answer.first, answer.second);
00910         return item_count;
00911     }
00912 
00913     std::pair<iterator, iterator> equal_range(const key_type& key) {
00914         return internal_equal_range(key);
00915     }
00916 
00917     std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
00918         return const_cast<self_type*>(this)->internal_equal_range(key);
00919     }
00920 
00921     // Bucket interface - for debugging 
00922     size_type unsafe_bucket_count() const {
00923         return my_number_of_buckets;
00924     }
00925 
00926     size_type unsafe_max_bucket_count() const {
00927         return segment_size(pointers_per_table-1);
00928     }
00929 
00930     size_type unsafe_bucket_size(size_type bucket) {
00931         size_type item_count = 0;
00932         if (is_initialized(bucket)) {
00933             raw_iterator it = get_bucket(bucket);
00934             ++it;
00935             for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
00936                 ++item_count;
00937         }
00938         return item_count;
00939     }
00940 
00941     size_type unsafe_bucket(const key_type& key) const {
00942         sokey_t order_key = (sokey_t) my_hash_compare(key);
00943         size_type bucket = order_key % my_number_of_buckets;
00944         return bucket;
00945     }
00946 
00947     // If the bucket is initialized, return a first non-dummy element in it
00948     local_iterator unsafe_begin(size_type bucket) {
00949         if (!is_initialized(bucket))
00950             return end();
00951 
00952         raw_iterator it = get_bucket(bucket);
00953         return my_solist.first_real_iterator(it);
00954     }
00955 
00956     // If the bucket is initialized, return a first non-dummy element in it
00957     const_local_iterator unsafe_begin(size_type bucket) const
00958     {
00959         if (!is_initialized(bucket))
00960             return end();
00961 
00962         raw_const_iterator it = get_bucket(bucket);
00963         return my_solist.first_real_iterator(it);
00964     }
00965 
00966     // @REVIEW: Takes O(n)
00967     // Returns the iterator after the last non-dummy element in the bucket
00968     local_iterator unsafe_end(size_type bucket)
00969     {
00970         if (!is_initialized(bucket))
00971             return end();
00972 
00973         raw_iterator it = get_bucket(bucket);
00974     
00975         // Find the end of the bucket, denoted by the dummy element
00976         do ++it;
00977         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00978 
00979         // Return the first real element past the end of the bucket
00980         return my_solist.first_real_iterator(it);
00981     }
00982 
00983     // @REVIEW: Takes O(n)
00984     // Returns the iterator after the last non-dummy element in the bucket
00985     const_local_iterator unsafe_end(size_type bucket) const
00986     {
00987         if (!is_initialized(bucket))
00988             return end();
00989 
00990         raw_const_iterator it = get_bucket(bucket);
00991     
00992         // Find the end of the bucket, denoted by the dummy element
00993         do ++it;
00994         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00995 
00996         // Return the first real element past the end of the bucket
00997         return my_solist.first_real_iterator(it);
00998     }
00999 
01000     const_local_iterator unsafe_cbegin(size_type bucket) const {
01001         return ((const self_type *) this)->begin();
01002     }
01003 
01004     const_local_iterator unsafe_cend(size_type bucket) const {
01005         return ((const self_type *) this)->end();
01006     }
01007 
01008     // Hash policy
01009     float load_factor() const {
01010         return (float) size() / (float) unsafe_bucket_count();
01011     }
01012 
01013     float max_load_factor() const {
01014         return my_maximum_bucket_size;
01015     }
01016 
01017     void max_load_factor(float newmax) {
01018         if (newmax != newmax || newmax < 0)
01019             tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
01020         my_maximum_bucket_size = newmax;
01021     }
01022 
01023     // This function is a noop, because the underlying split-ordered list
01024     // is already sorted, so an increase in the bucket number will be
01025     // reflected next time this bucket is touched.
01026     void rehash(size_type buckets) {
01027         size_type current_buckets = my_number_of_buckets;
01028         if (current_buckets >= buckets)
01029             return;
01030         my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
01031     }
01032 
01033 private:
01034 
01035     // Initialize the hash and keep the first bucket open
01036     void internal_init() {
01037         // Allocate an array of segment pointers
01038         memset(my_buckets, 0, pointers_per_table * sizeof(void *));
01039 
01040         // Insert the first element in the split-ordered list
01041         raw_iterator dummy_node = my_solist.raw_begin();
01042         set_bucket(0, dummy_node);
01043     }
01044 
01045     void internal_clear() {
01046         for (size_type index = 0; index < pointers_per_table; ++index) {
01047             if (my_buckets[index] != NULL) {
01048                 size_type sz = segment_size(index);
01049                 for (size_type index2 = 0; index2 < sz; ++index2)
01050                     my_allocator.destroy(&my_buckets[index][index2]);
01051                 my_allocator.deallocate(my_buckets[index], sz);
01052                 my_buckets[index] = 0;
01053             }
01054         }
01055     }
01056 
01057     void internal_copy(const self_type& right) {
01058         clear();
01059 
01060         my_maximum_bucket_size = right.my_maximum_bucket_size;
01061         my_number_of_buckets = right.my_number_of_buckets;
01062 
01063         __TBB_TRY {
01064             insert(right.begin(), right.end());
01065             my_hash_compare = right.my_hash_compare;
01066         } __TBB_CATCH(...) {
01067             my_solist.clear();
01068             __TBB_RETHROW();
01069         }
01070     }
01071 
01072     void internal_swap_buckets(concurrent_unordered_base& right)
01073     {
01074         // Swap all node segments
01075         for (size_type index = 0; index < pointers_per_table; ++index)
01076         {
01077             raw_iterator * iterator_pointer = my_buckets[index];
01078             my_buckets[index] = right.my_buckets[index];
01079             right.my_buckets[index] = iterator_pointer;
01080         }
01081     }
01082 
01083     // Hash APIs
01084     size_type internal_distance(const_iterator first, const_iterator last) const
01085     {
01086         size_type num = 0;
01087 
01088         for (const_iterator it = first; it != last; ++it)
01089             ++num;
01090 
01091         return num;
01092     }
01093 
01094     // Insert an element in the hash given its value
01095     std::pair<iterator, bool> internal_insert(const value_type& value)
01096     {
01097         sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
01098         size_type bucket = order_key % my_number_of_buckets;
01099 
01100         // If bucket is empty, initialize it first
01101         if (!is_initialized(bucket))
01102             init_bucket(bucket);
01103 
01104         size_type new_count;
01105         order_key = split_order_key_regular(order_key);
01106         raw_iterator it = get_bucket(bucket);
01107         raw_iterator last = my_solist.raw_end();
01108         raw_iterator where = it;
01109 
01110         __TBB_ASSERT(where != last, "Invalid head node");
01111 
01112         // First node is a dummy node
01113         ++where;
01114 
01115         for (;;)
01116         {
01117             if (where == last || solist_t::get_order_key(where) > order_key)
01118             {
01119                 // Try to insert it in the right place
01120                 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
01121                 
01122                 if (result.second)
01123                 {
01124                     // Insertion succeeded, adjust the table size, if needed
01125                     adjust_table_size(new_count, my_number_of_buckets);
01126                     return result;
01127                 }
01128                 else
01129                 {
01130                     // Insertion failed: either the same node was inserted by another thread, or
01131                     // another element was inserted at exactly the same place as this node.
01132                     // Proceed with the search from the previous location where order key was
01133                     // known to be larger (note: this is legal only because there is no safe
01134                     // concurrent erase operation supported).
01135                     where = it;
01136                     ++where;
01137                     continue;
01138                 }
01139             }
01140             else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
01141             {
01142                 // Element already in the list, return it
01143                 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
01144             }
01145 
01146             // Move the iterator forward
01147             it = where;
01148             ++where;
01149         }
01150     }
01151 
01152     // Find the element in the split-ordered list
01153     iterator internal_find(const key_type& key)
01154     {
01155         sokey_t order_key = (sokey_t) my_hash_compare(key);
01156         size_type bucket = order_key % my_number_of_buckets;
01157 
01158         // If bucket is empty, initialize it first
01159         if (!is_initialized(bucket))
01160             init_bucket(bucket);
01161 
01162         order_key = split_order_key_regular(order_key);
01163         raw_iterator last = my_solist.raw_end();
01164 
01165         for (raw_iterator it = get_bucket(bucket); it != last; ++it)
01166         {
01167             if (solist_t::get_order_key(it) > order_key)
01168             {
01169                 // If the order key is smaller than the current order key, the element
01170                 // is not in the hash.
01171                 return end();
01172             }
01173             else if (solist_t::get_order_key(it) == order_key)
01174             {
01175                 // The fact that order keys match does not mean that the element is found.
01176                 // Key function comparison has to be performed to check whether this is the
01177                 // right element. If not, keep searching while order key is the same.
01178                 if (!my_hash_compare(get_key(*it), key))
01179                     return my_solist.get_iterator(it);
01180             }
01181         }
01182 
01183         return end();
01184     }
01185 
01186     // Erase an element from the list. This is not a concurrency safe function.
01187     iterator internal_erase(const_iterator it)
01188     {
01189         key_type key = get_key(*it);
01190         sokey_t order_key = (sokey_t) my_hash_compare(key);
01191         size_type bucket = order_key % my_number_of_buckets;
01192 
01193         // If bucket is empty, initialize it first
01194         if (!is_initialized(bucket))
01195             init_bucket(bucket);
01196 
01197         order_key = split_order_key_regular(order_key);
01198 
01199         raw_iterator previous = get_bucket(bucket);
01200         raw_iterator last = my_solist.raw_end();
01201         raw_iterator where = previous;
01202 
01203         __TBB_ASSERT(where != last, "Invalid head node");
01204 
01205         // First node is a dummy node
01206         ++where;
01207 
01208         for (;;) {
01209             if (where == last)
01210                 return end();
01211             else if (my_solist.get_iterator(where) == it)
01212                 return my_solist.erase_node(previous, it);
01213 
01214             // Move the iterator forward
01215             previous = where;
01216             ++where;
01217         }
01218     }
01219 
01220     // Return the [begin, end) pair of iterators with the same key values.
01221     // This operation makes sense only if mapping is many-to-one.
01222     pairii_t internal_equal_range(const key_type& key)
01223     {
01224         sokey_t order_key = (sokey_t) my_hash_compare(key);
01225         size_type bucket = order_key % my_number_of_buckets;
01226 
01227         // If bucket is empty, initialize it first
01228         if (!is_initialized(bucket))
01229             init_bucket(bucket);
01230 
01231         order_key = split_order_key_regular(order_key);
01232         raw_iterator end_it = my_solist.raw_end();
01233 
01234         for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
01235         {
01236             if (solist_t::get_order_key(it) > order_key)
01237             {
01238                 // There is no element with the given key
01239                 return pairii_t(end(), end());
01240             }
01241             else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
01242             {
01243                 iterator first = my_solist.get_iterator(it);
01244                 iterator last = first;
01245 
01246                 while( last != end() && !my_hash_compare(get_key(*last), key) )
01247                     ++last;
01248                 return pairii_t(first, last);
01249             }
01250         }
01251 
01252         return pairii_t(end(), end());
01253     }
01254 
01255     // Bucket APIs
01256     void init_bucket(size_type bucket)
01257     {
01258         // Bucket 0 has no parent. Initialize it and return.
01259         if (bucket == 0) {
01260             internal_init();
01261             return;
01262         }
01263 
01264         size_type parent_bucket = get_parent(bucket);
01265 
01266         // All parent_bucket buckets have to be initialized before this bucket is
01267         if (!is_initialized(parent_bucket))
01268             init_bucket(parent_bucket);
01269 
01270         raw_iterator parent = get_bucket(parent_bucket);
01271 
01272         // Create a dummy first node in this bucket
01273         raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
01274         set_bucket(bucket, dummy_node);
01275     }
01276 
01277     void adjust_table_size(size_type total_elements, size_type current_size)
01278     {
01279         // Grow the table by a factor of 2 if possible and needed
01280         if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
01281         {
01282              // Double the size of the hash only if size has not changed inbetween loads
01283             __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, 2 * current_size, current_size );
01284         }
01285     }
01286 
01287     size_type get_parent(size_type bucket) const
01288     {
01289         // Unsets bucket's most significant turned-on bit
01290         size_type msb = __TBB_Log2((uintptr_t)bucket);
01291         return bucket & ~(size_type(1) << msb);
01292     }
01293 
01294 
01295     // Dynamic sized array (segments)
01297     static size_type segment_index_of( size_type index ) {
01298         return size_type( __TBB_Log2( index|1 ) );
01299     }
01300 
01302     static size_type segment_base( size_type k ) {
01303         return (size_type(1)<<k & ~size_type(1));
01304     }
01305 
01307     static size_type segment_size( size_type k ) {
01308         return k? size_type(1)<<k : 2;
01309     }
01310 
01311     raw_iterator get_bucket(size_type bucket) const {
01312         size_type segment = segment_index_of(bucket);
01313         bucket -= segment_base(segment);
01314         __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
01315         return my_buckets[segment][bucket];
01316     }
01317 
01318     void set_bucket(size_type bucket, raw_iterator dummy_head) {
01319         size_type segment = segment_index_of(bucket);
01320         bucket -= segment_base(segment);
01321 
01322         if (my_buckets[segment] == NULL) {
01323             size_type sz = segment_size(segment);
01324             raw_iterator * new_segment = my_allocator.allocate(sz);
01325             std::memset(new_segment, 0, sz*sizeof(raw_iterator));
01326 
01327             if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
01328                 my_allocator.deallocate(new_segment, sz);
01329         }
01330 
01331         my_buckets[segment][bucket] = dummy_head;
01332     }
01333 
01334     bool is_initialized(size_type bucket) const {
01335         size_type segment = segment_index_of(bucket);
01336         bucket -= segment_base(segment);
01337 
01338         if (my_buckets[segment] == NULL)
01339             return false;
01340 
01341         raw_iterator it = my_buckets[segment][bucket];
01342         return (it.get_node_ptr() != NULL);
01343     }
01344 
01345     // Utilities for keys
01346 
01347     // A regular order key has its original hash value reversed and the last bit set
01348     sokey_t split_order_key_regular(sokey_t order_key) const {
01349         return __TBB_ReverseBits(order_key) | 0x1;
01350     }
01351 
01352     // A dummy order key has its original hash value reversed and the last bit unset
01353     sokey_t split_order_key_dummy(sokey_t order_key) const {
01354         return __TBB_ReverseBits(order_key) & ~(0x1);
01355     }
01356 
01357     // Shared variables
01358     atomic<size_type>                                             my_number_of_buckets;       // Current table size
01359     solist_t                                                      my_solist;                  // List where all the elements are kept
01360     typename allocator_type::template rebind<raw_iterator>::other my_allocator;               // Allocator object for segments
01361     float                                                         my_maximum_bucket_size;     // Maximum size of the bucket
01362     atomic<raw_iterator*>                                         my_buckets[pointers_per_table]; // The segment table
01363 };
01364 #if _MSC_VER
01365 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
01366 #endif
01367 
01369 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
01370 } // namespace internal
01373 template<typename T>
01374 inline size_t tbb_hasher( const T& t ) {
01375     return static_cast<size_t>( t ) * internal::hash_multiplier;
01376 }
01377 template<typename P>
01378 inline size_t tbb_hasher( P* ptr ) {
01379     size_t const h = reinterpret_cast<size_t>( ptr );
01380     return (h >> 3) ^ h;
01381 }
01382 template<typename E, typename S, typename A>
01383 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
01384     size_t h = 0;
01385     for( const E* c = s.c_str(); *c; ++c )
01386         h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
01387     return h;
01388 }
01389 template<typename F, typename S>
01390 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
01391     return tbb_hasher(p.first) ^ tbb_hasher(p.second);
01392 }
01393 } // namespace interface5
01394 using interface5::tbb_hasher;
01395 } // namespace tbb
01396 #endif// __TBB_concurrent_unordered_internal_H

Copyright © 2005-2010 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.