You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1566 lines
55 KiB
C++
1566 lines
55 KiB
C++
/*
|
|
Copyright 2005-2014 Intel Corporation. All Rights Reserved.
|
|
|
|
This file is part of Threading Building Blocks. Threading Building Blocks is free software;
|
|
you can redistribute it and/or modify it under the terms of the GNU General Public License
|
|
version 2 as published by the Free Software Foundation. Threading Building Blocks is
|
|
distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
|
|
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
See the GNU General Public License for more details. You should have received a copy of
|
|
the GNU General Public License along with Threading Building Blocks; if not, write to the
|
|
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
As a special exception, you may use this file as part of a free software library without
|
|
restriction. Specifically, if other files instantiate templates or use macros or inline
|
|
functions from this file, or you compile this file and link it with other files to produce
|
|
an executable, this file does not by itself cause the resulting executable to be covered
|
|
by the GNU General Public License. This exception does not however invalidate any other
|
|
reasons why the executable file might be covered by the GNU General Public License.
|
|
*/
|
|
|
|
/* Container implementations in this header are based on PPL implementations
|
|
provided by Microsoft. */
|
|
|
|
#ifndef __TBB__concurrent_unordered_impl_H
|
|
#define __TBB__concurrent_unordered_impl_H
|
|
#if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
|
|
#error Do not #include this internal file directly; use public TBB headers instead.
|
|
#endif
|
|
|
|
#include "../tbb_stddef.h"
|
|
|
|
#if !TBB_USE_EXCEPTIONS && _MSC_VER
|
|
// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
|
|
#pragma warning (push)
|
|
#pragma warning (disable: 4530)
|
|
#endif
|
|
|
|
#include <iterator>
|
|
#include <utility> // Need std::pair
|
|
#include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
|
|
#include <string> // For tbb_hasher
|
|
#include <cstring> // Need std::memset
|
|
|
|
#if !TBB_USE_EXCEPTIONS && _MSC_VER
|
|
#pragma warning (pop)
|
|
#endif
|
|
|
|
#include "../atomic.h"
|
|
#include "../tbb_exception.h"
|
|
#include "../tbb_allocator.h"
|
|
|
|
#if __TBB_INITIALIZER_LISTS_PRESENT
|
|
#include <initializer_list>
|
|
#endif
|
|
|
|
namespace tbb {
|
|
namespace interface5 {
|
|
//! @cond INTERNAL
|
|
namespace internal {
|
|
|
|
template <typename T, typename Allocator>
|
|
class split_ordered_list;
|
|
template <typename Traits>
|
|
class concurrent_unordered_base;
|
|
|
|
// Forward list iterators (without skipping dummy elements)
|
|
template<class Solist, typename Value>
|
|
class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
|
|
{
|
|
template <typename T, typename Allocator>
|
|
friend class split_ordered_list;
|
|
template <typename Traits>
|
|
friend class concurrent_unordered_base;
|
|
template<class M, typename V>
|
|
friend class flist_iterator;
|
|
|
|
typedef typename Solist::nodeptr_t nodeptr_t;
|
|
public:
|
|
typedef typename Solist::value_type value_type;
|
|
typedef typename Solist::difference_type difference_type;
|
|
typedef typename Solist::pointer pointer;
|
|
typedef typename Solist::reference reference;
|
|
|
|
flist_iterator() : my_node_ptr(0) {}
|
|
flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
|
|
: my_node_ptr(other.my_node_ptr) {}
|
|
|
|
reference operator*() const { return my_node_ptr->my_element; }
|
|
pointer operator->() const { return &**this; }
|
|
|
|
flist_iterator& operator++() {
|
|
my_node_ptr = my_node_ptr->my_next;
|
|
return *this;
|
|
}
|
|
|
|
flist_iterator operator++(int) {
|
|
flist_iterator tmp = *this;
|
|
++*this;
|
|
return tmp;
|
|
}
|
|
|
|
protected:
|
|
flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
|
|
nodeptr_t get_node_ptr() const { return my_node_ptr; }
|
|
|
|
nodeptr_t my_node_ptr;
|
|
|
|
template<typename M, typename T, typename U>
|
|
friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
|
|
template<typename M, typename T, typename U>
|
|
friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
|
|
};
|
|
|
|
template<typename Solist, typename T, typename U>
|
|
bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
|
|
return i.my_node_ptr == j.my_node_ptr;
|
|
}
|
|
template<typename Solist, typename T, typename U>
|
|
bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
|
|
return i.my_node_ptr != j.my_node_ptr;
|
|
}
|
|
|
|
// Split-order list iterators, needed to skip dummy elements
|
|
template<class Solist, typename Value>
|
|
class solist_iterator : public flist_iterator<Solist, Value>
|
|
{
|
|
typedef flist_iterator<Solist, Value> base_type;
|
|
typedef typename Solist::nodeptr_t nodeptr_t;
|
|
using base_type::get_node_ptr;
|
|
template <typename T, typename Allocator>
|
|
friend class split_ordered_list;
|
|
template<class M, typename V>
|
|
friend class solist_iterator;
|
|
template<typename M, typename T, typename U>
|
|
friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
|
|
template<typename M, typename T, typename U>
|
|
friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
|
|
|
|
const Solist *my_list_ptr;
|
|
solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
|
|
|
|
public:
|
|
typedef typename Solist::value_type value_type;
|
|
typedef typename Solist::difference_type difference_type;
|
|
typedef typename Solist::pointer pointer;
|
|
typedef typename Solist::reference reference;
|
|
|
|
solist_iterator() {}
|
|
solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
|
|
: base_type(other), my_list_ptr(other.my_list_ptr) {}
|
|
|
|
reference operator*() const {
|
|
return this->base_type::operator*();
|
|
}
|
|
|
|
pointer operator->() const {
|
|
return (&**this);
|
|
}
|
|
|
|
solist_iterator& operator++() {
|
|
do ++(*(base_type *)this);
|
|
while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
|
|
|
|
return (*this);
|
|
}
|
|
|
|
solist_iterator operator++(int) {
|
|
solist_iterator tmp = *this;
|
|
do ++*this;
|
|
while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
|
|
|
|
return (tmp);
|
|
}
|
|
};
|
|
|
|
template<typename Solist, typename T, typename U>
|
|
bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
|
|
return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
|
|
}
|
|
template<typename Solist, typename T, typename U>
|
|
bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
|
|
return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
|
|
}
|
|
|
|
// Forward type and class definitions
|
|
typedef size_t sokey_t;
|
|
|
|
|
|
// Forward list in which elements are sorted in a split-order
|
|
template <typename T, typename Allocator>
|
|
class split_ordered_list
|
|
{
|
|
public:
|
|
typedef split_ordered_list<T, Allocator> self_type;
|
|
typedef typename Allocator::template rebind<T>::other allocator_type;
|
|
struct node;
|
|
typedef node *nodeptr_t;
|
|
|
|
typedef typename allocator_type::size_type size_type;
|
|
typedef typename allocator_type::difference_type difference_type;
|
|
typedef typename allocator_type::pointer pointer;
|
|
typedef typename allocator_type::const_pointer const_pointer;
|
|
typedef typename allocator_type::reference reference;
|
|
typedef typename allocator_type::const_reference const_reference;
|
|
typedef typename allocator_type::value_type value_type;
|
|
|
|
typedef solist_iterator<self_type, const value_type> const_iterator;
|
|
typedef solist_iterator<self_type, value_type> iterator;
|
|
typedef flist_iterator<self_type, const value_type> raw_const_iterator;
|
|
typedef flist_iterator<self_type, value_type> raw_iterator;
|
|
|
|
// Node that holds the element in a split-ordered list
|
|
struct node : tbb::internal::no_assign
|
|
{
|
|
private:
|
|
// for compilers that try to generate default constructors though they are not needed.
|
|
node(); // VS 2008, 2010, 2012
|
|
public:
|
|
// Initialize the node with the given order key
|
|
void init(sokey_t order_key) {
|
|
my_order_key = order_key;
|
|
my_next = NULL;
|
|
}
|
|
|
|
// Return the order key (needed for hashing)
|
|
sokey_t get_order_key() const { // TODO: remove
|
|
return my_order_key;
|
|
}
|
|
|
|
// Inserts the new element in the list in an atomic fashion
|
|
nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
|
|
{
|
|
// Try to change the next pointer on the current element to a new element, only if it still points to the cached next
|
|
nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
|
|
|
|
if (exchange_node == current_node) // TODO: why this branch?
|
|
{
|
|
// Operation succeeded, return the new node
|
|
return new_node;
|
|
}
|
|
else
|
|
{
|
|
// Operation failed, return the "interfering" node
|
|
return exchange_node;
|
|
}
|
|
}
|
|
|
|
// Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
|
|
// in the hash table to quickly index into the right subsection of the split-ordered list.
|
|
bool is_dummy() const {
|
|
return (my_order_key & 0x1) == 0;
|
|
}
|
|
|
|
|
|
nodeptr_t my_next; // Next element in the list
|
|
value_type my_element; // Element storage
|
|
sokey_t my_order_key; // Order key for this element
|
|
};
|
|
|
|
// Allocate a new node with the given order key and value
|
|
nodeptr_t create_node(sokey_t order_key, const T &value) {
|
|
nodeptr_t pnode = my_node_allocator.allocate(1);
|
|
|
|
__TBB_TRY {
|
|
new(static_cast<void*>(&pnode->my_element)) T(value);
|
|
pnode->init(order_key);
|
|
} __TBB_CATCH(...) {
|
|
my_node_allocator.deallocate(pnode, 1);
|
|
__TBB_RETHROW();
|
|
}
|
|
|
|
return (pnode);
|
|
}
|
|
|
|
#if __TBB_CPP11_RVALUE_REF_PRESENT
|
|
//TODO: try to combine both implementations using poor man forward
|
|
//TODO: use RAII scoped guard instead of explicit catch
|
|
// Allocate a new node with the given order key and value
|
|
nodeptr_t create_node(sokey_t order_key, T &&value) {
|
|
nodeptr_t pnode = my_node_allocator.allocate(1);
|
|
|
|
__TBB_TRY {
|
|
new(static_cast<void*>(&pnode->my_element)) T(std::move(value));
|
|
pnode->init(order_key);
|
|
} __TBB_CATCH(...) {
|
|
my_node_allocator.deallocate(pnode, 1);
|
|
__TBB_RETHROW();
|
|
}
|
|
|
|
return (pnode);
|
|
}
|
|
#endif //__TBB_CPP11_RVALUE_REF_PRESENT
|
|
|
|
// Allocate a new node with the given order key; used to allocate dummy nodes
|
|
nodeptr_t create_node(sokey_t order_key) {
|
|
nodeptr_t pnode = my_node_allocator.allocate(1);
|
|
pnode->init(order_key);
|
|
return (pnode);
|
|
}
|
|
|
|
split_ordered_list(allocator_type a = allocator_type())
|
|
: my_node_allocator(a), my_element_count(0)
|
|
{
|
|
// Immediately allocate a dummy node with order key of 0. This node
|
|
// will always be the head of the list.
|
|
my_head = create_node(0);
|
|
}
|
|
|
|
~split_ordered_list()
|
|
{
|
|
// Clear the list
|
|
clear();
|
|
|
|
// Remove the head element which is not cleared by clear()
|
|
nodeptr_t pnode = my_head;
|
|
my_head = NULL;
|
|
|
|
__TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
|
|
|
|
destroy_node(pnode);
|
|
}
|
|
|
|
// Common forward list functions
|
|
|
|
allocator_type get_allocator() const {
|
|
return (my_node_allocator);
|
|
}
|
|
|
|
void clear() {
|
|
nodeptr_t pnext;
|
|
nodeptr_t pnode = my_head;
|
|
|
|
__TBB_ASSERT(my_head != NULL, "Invalid head list node");
|
|
pnext = pnode->my_next;
|
|
pnode->my_next = NULL;
|
|
pnode = pnext;
|
|
|
|
while (pnode != NULL)
|
|
{
|
|
pnext = pnode->my_next;
|
|
destroy_node(pnode);
|
|
pnode = pnext;
|
|
}
|
|
|
|
my_element_count = 0;
|
|
}
|
|
|
|
// Returns a first non-dummy element in the SOL
|
|
iterator begin() {
|
|
return first_real_iterator(raw_begin());
|
|
}
|
|
|
|
// Returns a first non-dummy element in the SOL
|
|
const_iterator begin() const {
|
|
return first_real_iterator(raw_begin());
|
|
}
|
|
|
|
iterator end() {
|
|
return (iterator(0, this));
|
|
}
|
|
|
|
const_iterator end() const {
|
|
return (const_iterator(0, this));
|
|
}
|
|
|
|
const_iterator cbegin() const {
|
|
return (((const self_type *)this)->begin());
|
|
}
|
|
|
|
const_iterator cend() const {
|
|
return (((const self_type *)this)->end());
|
|
}
|
|
|
|
// Checks if the number of elements (non-dummy) is 0
|
|
bool empty() const {
|
|
return (my_element_count == 0);
|
|
}
|
|
|
|
// Returns the number of non-dummy elements in the list
|
|
size_type size() const {
|
|
return my_element_count;
|
|
}
|
|
|
|
// Returns the maximum size of the list, determined by the allocator
|
|
size_type max_size() const {
|
|
return my_node_allocator.max_size();
|
|
}
|
|
|
|
// Swaps 'this' list with the passed in one
|
|
void swap(self_type& other)
|
|
{
|
|
if (this == &other)
|
|
{
|
|
// Nothing to do
|
|
return;
|
|
}
|
|
|
|
std::swap(my_element_count, other.my_element_count);
|
|
std::swap(my_head, other.my_head);
|
|
}
|
|
|
|
// Split-order list functions
|
|
|
|
// Returns a first element in the SOL, which is always a dummy
|
|
raw_iterator raw_begin() {
|
|
return raw_iterator(my_head);
|
|
}
|
|
|
|
// Returns a first element in the SOL, which is always a dummy
|
|
raw_const_iterator raw_begin() const {
|
|
return raw_const_iterator(my_head);
|
|
}
|
|
|
|
raw_iterator raw_end() {
|
|
return raw_iterator(0);
|
|
}
|
|
|
|
raw_const_iterator raw_end() const {
|
|
return raw_const_iterator(0);
|
|
}
|
|
|
|
static sokey_t get_order_key(const raw_const_iterator& it) {
|
|
return it.get_node_ptr()->get_order_key();
|
|
}
|
|
|
|
static sokey_t get_safe_order_key(const raw_const_iterator& it) {
|
|
if( !it.get_node_ptr() ) return ~sokey_t(0);
|
|
return it.get_node_ptr()->get_order_key();
|
|
}
|
|
|
|
// Returns a public iterator version of the internal iterator. Public iterator must not
|
|
// be a dummy private iterator.
|
|
iterator get_iterator(raw_iterator it) {
|
|
__TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
|
|
return iterator(it.get_node_ptr(), this);
|
|
}
|
|
|
|
// Returns a public iterator version of the internal iterator. Public iterator must not
|
|
// be a dummy private iterator.
|
|
const_iterator get_iterator(raw_const_iterator it) const {
|
|
__TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
|
|
return const_iterator(it.get_node_ptr(), this);
|
|
}
|
|
|
|
// Returns a non-const version of the raw_iterator
|
|
raw_iterator get_iterator(raw_const_iterator it) {
|
|
return raw_iterator(it.get_node_ptr());
|
|
}
|
|
|
|
// Returns a non-const version of the iterator
|
|
static iterator get_iterator(const_iterator it) {
|
|
return iterator(it.my_node_ptr, it.my_list_ptr);
|
|
}
|
|
|
|
// Returns a public iterator version of a first non-dummy internal iterator at or after
|
|
// the passed in internal iterator.
|
|
iterator first_real_iterator(raw_iterator it)
|
|
{
|
|
// Skip all dummy, internal only iterators
|
|
while (it != raw_end() && it.get_node_ptr()->is_dummy())
|
|
++it;
|
|
|
|
return iterator(it.get_node_ptr(), this);
|
|
}
|
|
|
|
// Returns a public iterator version of a first non-dummy internal iterator at or after
|
|
// the passed in internal iterator.
|
|
const_iterator first_real_iterator(raw_const_iterator it) const
|
|
{
|
|
// Skip all dummy, internal only iterators
|
|
while (it != raw_end() && it.get_node_ptr()->is_dummy())
|
|
++it;
|
|
|
|
return const_iterator(it.get_node_ptr(), this);
|
|
}
|
|
|
|
// Erase an element using the allocator
|
|
void destroy_node(nodeptr_t pnode) {
|
|
if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
|
|
my_node_allocator.deallocate(pnode, 1);
|
|
}
|
|
|
|
// Try to insert a new element in the list. If insert fails, return the node that
|
|
// was inserted instead.
|
|
nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
|
|
new_node->my_next = current_node;
|
|
return previous->atomic_set_next(new_node, current_node);
|
|
}
|
|
|
|
// Insert a new element between passed in iterators
|
|
std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
|
|
{
|
|
nodeptr_t pnode = create_node(order_key, value);
|
|
nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
|
|
|
|
if (inserted_node == pnode)
|
|
{
|
|
// If the insert succeeded, check that the order is correct and increment the element count
|
|
check_range();
|
|
*new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
|
|
return std::pair<iterator, bool>(iterator(pnode, this), true);
|
|
}
|
|
else
|
|
{
|
|
// If the insert failed (element already there), then delete the new one
|
|
destroy_node(pnode);
|
|
return std::pair<iterator, bool>(end(), false);
|
|
}
|
|
}
|
|
|
|
// Insert a new dummy element, starting search at a parent dummy element
|
|
raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
|
|
{
|
|
raw_iterator last = raw_end();
|
|
raw_iterator where = it;
|
|
|
|
__TBB_ASSERT(where != last, "Invalid head node");
|
|
|
|
++where;
|
|
|
|
// Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
|
|
nodeptr_t dummy_node = create_node(order_key);
|
|
|
|
for (;;)
|
|
{
|
|
__TBB_ASSERT(it != last, "Invalid head list node");
|
|
|
|
// If the head iterator is at the end of the list, or past the point where this dummy
|
|
// node needs to be inserted, then try to insert it.
|
|
if (where == last || get_order_key(where) > order_key)
|
|
{
|
|
__TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
|
|
|
|
// Try to insert it in the right place
|
|
nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
|
|
|
|
if (inserted_node == dummy_node)
|
|
{
|
|
// Insertion succeeded, check the list for order violations
|
|
check_range();
|
|
return raw_iterator(dummy_node);
|
|
}
|
|
else
|
|
{
|
|
// Insertion failed: either dummy node was inserted by another thread, or
|
|
// a real element was inserted at exactly the same place as dummy node.
|
|
// Proceed with the search from the previous location where order key was
|
|
// known to be larger (note: this is legal only because there is no safe
|
|
// concurrent erase operation supported).
|
|
where = it;
|
|
++where;
|
|
continue;
|
|
}
|
|
}
|
|
else if (get_order_key(where) == order_key)
|
|
{
|
|
// Another dummy node with the same value found, discard the new one.
|
|
destroy_node(dummy_node);
|
|
return where;
|
|
}
|
|
|
|
// Move the iterator forward
|
|
it = where;
|
|
++where;
|
|
}
|
|
|
|
}
|
|
|
|
// This erase function can handle both real and dummy nodes
|
|
void erase_node(raw_iterator previous, raw_const_iterator& where)
|
|
{
|
|
nodeptr_t pnode = (where++).get_node_ptr();
|
|
nodeptr_t prevnode = previous.get_node_ptr();
|
|
__TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
|
|
prevnode->my_next = pnode->my_next;
|
|
|
|
destroy_node(pnode);
|
|
}
|
|
|
|
// Erase the element (previous node needs to be passed because this is a forward only list)
|
|
iterator erase_node(raw_iterator previous, const_iterator where)
|
|
{
|
|
raw_const_iterator it = where;
|
|
erase_node(previous, it);
|
|
my_element_count--;
|
|
|
|
return get_iterator(first_real_iterator(it));
|
|
}
|
|
|
|
// Move all elements from the passed in split-ordered list to this one
|
|
void move_all(self_type& source)
|
|
{
|
|
raw_const_iterator first = source.raw_begin();
|
|
raw_const_iterator last = source.raw_end();
|
|
|
|
if (first == last)
|
|
return;
|
|
|
|
nodeptr_t previous_node = my_head;
|
|
raw_const_iterator begin_iterator = first++;
|
|
|
|
// Move all elements one by one, including dummy ones
|
|
for (raw_const_iterator it = first; it != last;)
|
|
{
|
|
nodeptr_t pnode = it.get_node_ptr();
|
|
|
|
nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
|
|
previous_node = try_insert(previous_node, dummy_node, NULL);
|
|
__TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
|
|
raw_const_iterator where = it++;
|
|
source.erase_node(get_iterator(begin_iterator), where);
|
|
}
|
|
check_range();
|
|
}
|
|
|
|
|
|
private:
|
|
//Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
|
|
template <typename Traits>
|
|
friend class concurrent_unordered_base;
|
|
|
|
// Check the list for order violations
|
|
void check_range()
|
|
{
|
|
#if TBB_USE_ASSERT
|
|
for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
|
|
{
|
|
raw_iterator next_iterator = it;
|
|
++next_iterator;
|
|
|
|
__TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
|
|
}
|
|
#endif
|
|
}
|
|
|
|
typename allocator_type::template rebind<node>::other my_node_allocator; // allocator object for nodes
|
|
size_type my_element_count; // Total item count, not counting dummy nodes
|
|
nodeptr_t my_head; // pointer to head node
|
|
};
|
|
|
|
// Template class for hash compare
|
|
template<typename Key, typename Hasher, typename Key_equality>
|
|
class hash_compare
|
|
{
|
|
public:
|
|
typedef Hasher hasher;
|
|
typedef Key_equality key_equal;
|
|
|
|
hash_compare() {}
|
|
|
|
hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
|
|
|
|
hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
|
|
|
|
size_t operator()(const Key& key) const {
|
|
return ((size_t)my_hash_object(key));
|
|
}
|
|
|
|
bool operator()(const Key& key1, const Key& key2) const {
|
|
return (!my_key_compare_object(key1, key2));
|
|
}
|
|
|
|
Hasher my_hash_object; // The hash object
|
|
Key_equality my_key_compare_object; // The equality comparator object
|
|
};
|
|
|
|
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
|
|
#pragma warning(push)
|
|
#pragma warning(disable: 4127) // warning C4127: conditional expression is constant
|
|
#endif
|
|
|
|
template <typename Traits>
|
|
class concurrent_unordered_base : public Traits
|
|
{
|
|
protected:
|
|
// Type definitions
|
|
typedef concurrent_unordered_base<Traits> self_type;
|
|
typedef typename Traits::value_type value_type;
|
|
typedef typename Traits::key_type key_type;
|
|
typedef typename Traits::hash_compare hash_compare;
|
|
typedef typename Traits::value_compare value_compare;
|
|
typedef typename Traits::allocator_type allocator_type;
|
|
typedef typename hash_compare::hasher hasher;
|
|
typedef typename hash_compare::key_equal key_equal;
|
|
typedef typename allocator_type::pointer pointer;
|
|
typedef typename allocator_type::const_pointer const_pointer;
|
|
typedef typename allocator_type::reference reference;
|
|
typedef typename allocator_type::const_reference const_reference;
|
|
typedef typename allocator_type::size_type size_type;
|
|
typedef typename allocator_type::difference_type difference_type;
|
|
typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
|
|
typedef typename solist_t::nodeptr_t nodeptr_t;
|
|
// Iterators that walk the entire split-order list, including dummy nodes
|
|
typedef typename solist_t::raw_iterator raw_iterator;
|
|
typedef typename solist_t::raw_const_iterator raw_const_iterator;
|
|
typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
|
|
typedef typename solist_t::const_iterator const_iterator;
|
|
typedef iterator local_iterator;
|
|
typedef const_iterator const_local_iterator;
|
|
using Traits::my_hash_compare;
|
|
using Traits::get_key;
|
|
using Traits::allow_multimapping;
|
|
|
|
static const size_type initial_bucket_number = 8; // Initial number of buckets
|
|
private:
|
|
typedef std::pair<iterator, iterator> pairii_t;
|
|
typedef std::pair<const_iterator, const_iterator> paircc_t;
|
|
|
|
static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
|
|
static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
|
|
|
|
struct call_internal_clear_on_exit{
|
|
concurrent_unordered_base* my_instance;
|
|
call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {}
|
|
void dismiss(){ my_instance = NULL;}
|
|
~call_internal_clear_on_exit(){
|
|
if (my_instance){
|
|
my_instance->internal_clear();
|
|
}
|
|
}
|
|
};
|
|
protected:
|
|
// Constructors/Destructors
|
|
concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
|
|
const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
|
|
: Traits(hc), my_solist(a),
|
|
my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
|
|
{
|
|
if( n_of_buckets == 0) ++n_of_buckets;
|
|
my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
|
|
internal_init();
|
|
}
|
|
|
|
concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
|
|
: Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
|
|
{
|
|
internal_init();
|
|
internal_copy(right);
|
|
}
|
|
|
|
concurrent_unordered_base(const concurrent_unordered_base& right)
|
|
: Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
|
|
{
|
|
//FIXME:exception safety seems to be broken here
|
|
internal_init();
|
|
internal_copy(right);
|
|
}
|
|
|
|
#if __TBB_CPP11_RVALUE_REF_PRESENT
|
|
concurrent_unordered_base(concurrent_unordered_base&& right)
|
|
: Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
|
|
{
|
|
internal_init();
|
|
swap(right);
|
|
}
|
|
|
|
concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a)
|
|
: Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
|
|
{
|
|
call_internal_clear_on_exit clear_buckets_on_exception(this);
|
|
|
|
internal_init();
|
|
if (a == right.get_allocator()){
|
|
this->swap(right);
|
|
}else{
|
|
my_maximum_bucket_size = right.my_maximum_bucket_size;
|
|
my_number_of_buckets = right.my_number_of_buckets;
|
|
my_solist.my_element_count = right.my_solist.my_element_count;
|
|
|
|
if (! right.my_solist.empty()){
|
|
nodeptr_t previous_node = my_solist.my_head;
|
|
|
|
// Move all elements one by one, including dummy ones
|
|
for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
|
|
{
|
|
const nodeptr_t pnode = it.get_node_ptr();
|
|
nodeptr_t node;
|
|
if (pnode->is_dummy()) {
|
|
node = my_solist.create_node(pnode->get_order_key());
|
|
size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
|
|
set_bucket(bucket, node);
|
|
}else{
|
|
node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
|
|
}
|
|
|
|
previous_node = my_solist.try_insert(previous_node, node, NULL);
|
|
__TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
|
|
}
|
|
my_solist.check_range();
|
|
}
|
|
}
|
|
|
|
clear_buckets_on_exception.dismiss();
|
|
}
|
|
|
|
#endif //__TBB_CPP11_RVALUE_REF_PRESENT
|
|
|
|
concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
|
|
if (this != &right)
|
|
internal_copy(right);
|
|
return (*this);
|
|
}
|
|
|
|
#if __TBB_CPP11_RVALUE_REF_PRESENT
|
|
concurrent_unordered_base& operator=(concurrent_unordered_base&& other)
|
|
{
|
|
if(this != &other){
|
|
typedef typename tbb::internal::allocator_traits<allocator_type>::propagate_on_container_move_assignment pocma_t;
|
|
if(pocma_t::value || this->my_allocator == other.my_allocator) {
|
|
concurrent_unordered_base trash (std::move(*this));
|
|
swap(other);
|
|
if (pocma_t::value) {
|
|
using std::swap;
|
|
//TODO: swapping allocators here may be a problem, replace with single direction moving
|
|
swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
|
|
swap(this->my_allocator, other.my_allocator);
|
|
}
|
|
} else {
|
|
concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
|
|
this->swap(moved_copy);
|
|
}
|
|
}
|
|
return *this;
|
|
}
|
|
|
|
#endif //__TBB_CPP11_RVALUE_REF_PRESENT
|
|
|
|
#if __TBB_INITIALIZER_LISTS_PRESENT
|
|
//! assignment operator from initializer_list
|
|
concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
|
|
{
|
|
this->clear();
|
|
this->insert(il.begin(),il.end());
|
|
return (*this);
|
|
}
|
|
#endif //# __TBB_INITIALIZER_LISTS_PRESENT
|
|
|
|
|
|
~concurrent_unordered_base() {
|
|
// Delete all node segments
|
|
internal_clear();
|
|
}
|
|
|
|
public:
|
|
allocator_type get_allocator() const {
|
|
return my_solist.get_allocator();
|
|
}
|
|
|
|
// Size and capacity function
|
|
bool empty() const {
|
|
return my_solist.empty();
|
|
}
|
|
|
|
size_type size() const {
|
|
return my_solist.size();
|
|
}
|
|
|
|
size_type max_size() const {
|
|
return my_solist.max_size();
|
|
}
|
|
|
|
// Iterators
|
|
iterator begin() {
|
|
return my_solist.begin();
|
|
}
|
|
|
|
const_iterator begin() const {
|
|
return my_solist.begin();
|
|
}
|
|
|
|
iterator end() {
|
|
return my_solist.end();
|
|
}
|
|
|
|
const_iterator end() const {
|
|
return my_solist.end();
|
|
}
|
|
|
|
const_iterator cbegin() const {
|
|
return my_solist.cbegin();
|
|
}
|
|
|
|
const_iterator cend() const {
|
|
return my_solist.cend();
|
|
}
|
|
|
|
// Parallel traversal support
|
|
class const_range_type : tbb::internal::no_assign {
|
|
const concurrent_unordered_base &my_table;
|
|
raw_const_iterator my_begin_node;
|
|
raw_const_iterator my_end_node;
|
|
mutable raw_const_iterator my_midpoint_node;
|
|
public:
|
|
//! Type for size of a range
|
|
typedef typename concurrent_unordered_base::size_type size_type;
|
|
typedef typename concurrent_unordered_base::value_type value_type;
|
|
typedef typename concurrent_unordered_base::reference reference;
|
|
typedef typename concurrent_unordered_base::difference_type difference_type;
|
|
typedef typename concurrent_unordered_base::const_iterator iterator;
|
|
|
|
//! True if range is empty.
|
|
bool empty() const {return my_begin_node == my_end_node;}
|
|
|
|
//! True if range can be partitioned into two subranges.
|
|
bool is_divisible() const {
|
|
return my_midpoint_node != my_end_node;
|
|
}
|
|
//! Split range.
|
|
const_range_type( const_range_type &r, split ) :
|
|
my_table(r.my_table), my_end_node(r.my_end_node)
|
|
{
|
|
r.my_end_node = my_begin_node = r.my_midpoint_node;
|
|
__TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
|
|
__TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
|
|
set_midpoint();
|
|
r.set_midpoint();
|
|
}
|
|
//! Init range with container and grainsize specified
|
|
const_range_type( const concurrent_unordered_base &a_table ) :
|
|
my_table(a_table), my_begin_node(a_table.my_solist.begin()),
|
|
my_end_node(a_table.my_solist.end())
|
|
{
|
|
set_midpoint();
|
|
}
|
|
iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
|
|
iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
|
|
//! The grain size for this range.
|
|
size_type grainsize() const { return 1; }
|
|
|
|
//! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
|
|
void set_midpoint() const {
|
|
if( my_begin_node == my_end_node ) // not divisible
|
|
my_midpoint_node = my_end_node;
|
|
else {
|
|
sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
|
|
sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
|
|
size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
|
|
while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
|
|
if(__TBB_ReverseBits(mid_bucket) > begin_key) {
|
|
// found a dummy_node between begin and end
|
|
my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
|
|
}
|
|
else {
|
|
// didn't find a dummy node between begin and end.
|
|
my_midpoint_node = my_end_node;
|
|
}
|
|
#if TBB_USE_ASSERT
|
|
{
|
|
sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
|
|
__TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
|
|
__TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
|
|
}
|
|
#endif // TBB_USE_ASSERT
|
|
}
|
|
}
|
|
};
|
|
|
|
class range_type : public const_range_type {
|
|
public:
|
|
typedef typename concurrent_unordered_base::iterator iterator;
|
|
//! Split range.
|
|
range_type( range_type &r, split ) : const_range_type( r, split() ) {}
|
|
//! Init range with container and grainsize specified
|
|
range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
|
|
|
|
iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
|
|
iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
|
|
};
|
|
|
|
range_type range() {
|
|
return range_type( *this );
|
|
}
|
|
|
|
const_range_type range() const {
|
|
return const_range_type( *this );
|
|
}
|
|
|
|
// Modifiers
|
|
std::pair<iterator, bool> insert(const value_type& value) {
|
|
return internal_insert(value);
|
|
}
|
|
|
|
iterator insert(const_iterator, const value_type& value) {
|
|
// Ignore hint
|
|
return insert(value).first;
|
|
}
|
|
|
|
template<class Iterator>
|
|
void insert(Iterator first, Iterator last) {
|
|
for (Iterator it = first; it != last; ++it)
|
|
insert(*it);
|
|
}
|
|
|
|
#if __TBB_INITIALIZER_LISTS_PRESENT
|
|
//! Insert initializer list
|
|
void insert(std::initializer_list<value_type> il) {
|
|
insert(il.begin(), il.end());
|
|
}
|
|
#endif
|
|
|
|
iterator unsafe_erase(const_iterator where) {
|
|
return internal_erase(where);
|
|
}
|
|
|
|
iterator unsafe_erase(const_iterator first, const_iterator last) {
|
|
while (first != last)
|
|
unsafe_erase(first++);
|
|
return my_solist.get_iterator(first);
|
|
}
|
|
|
|
size_type unsafe_erase(const key_type& key) {
|
|
pairii_t where = equal_range(key);
|
|
size_type item_count = internal_distance(where.first, where.second);
|
|
unsafe_erase(where.first, where.second);
|
|
return item_count;
|
|
}
|
|
|
|
void swap(concurrent_unordered_base& right) {
|
|
if (this != &right) {
|
|
std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
|
|
my_solist.swap(right.my_solist);
|
|
internal_swap_buckets(right);
|
|
std::swap(my_number_of_buckets, right.my_number_of_buckets);
|
|
std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
|
|
}
|
|
}
|
|
|
|
// Observers
|
|
hasher hash_function() const {
|
|
return my_hash_compare.my_hash_object;
|
|
}
|
|
|
|
key_equal key_eq() const {
|
|
return my_hash_compare.my_key_compare_object;
|
|
}
|
|
|
|
void clear() {
|
|
// Clear list
|
|
my_solist.clear();
|
|
|
|
// Clear buckets
|
|
internal_clear();
|
|
|
|
// Initialize bucket 0
|
|
__TBB_ASSERT(my_buckets[0] == NULL, NULL);
|
|
raw_iterator dummy_node = my_solist.raw_begin();
|
|
set_bucket(0, dummy_node);
|
|
}
|
|
|
|
// Lookup
|
|
iterator find(const key_type& key) {
|
|
return internal_find(key);
|
|
}
|
|
|
|
const_iterator find(const key_type& key) const {
|
|
return const_cast<self_type*>(this)->internal_find(key);
|
|
}
|
|
|
|
size_type count(const key_type& key) const {
|
|
if(allow_multimapping) {
|
|
paircc_t answer = equal_range(key);
|
|
size_type item_count = internal_distance(answer.first, answer.second);
|
|
return item_count;
|
|
} else {
|
|
return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
|
|
}
|
|
}
|
|
|
|
std::pair<iterator, iterator> equal_range(const key_type& key) {
|
|
return internal_equal_range(key);
|
|
}
|
|
|
|
std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
|
|
return const_cast<self_type*>(this)->internal_equal_range(key);
|
|
}
|
|
|
|
// Bucket interface - for debugging
|
|
size_type unsafe_bucket_count() const {
|
|
return my_number_of_buckets;
|
|
}
|
|
|
|
size_type unsafe_max_bucket_count() const {
|
|
return segment_size(pointers_per_table-1);
|
|
}
|
|
|
|
size_type unsafe_bucket_size(size_type bucket) {
|
|
size_type item_count = 0;
|
|
if (is_initialized(bucket)) {
|
|
raw_iterator it = get_bucket(bucket);
|
|
++it;
|
|
for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
|
|
++item_count;
|
|
}
|
|
return item_count;
|
|
}
|
|
|
|
size_type unsafe_bucket(const key_type& key) const {
|
|
sokey_t order_key = (sokey_t) my_hash_compare(key);
|
|
size_type bucket = order_key % my_number_of_buckets;
|
|
return bucket;
|
|
}
|
|
|
|
// If the bucket is initialized, return a first non-dummy element in it
|
|
local_iterator unsafe_begin(size_type bucket) {
|
|
if (!is_initialized(bucket))
|
|
return end();
|
|
|
|
raw_iterator it = get_bucket(bucket);
|
|
return my_solist.first_real_iterator(it);
|
|
}
|
|
|
|
// If the bucket is initialized, return a first non-dummy element in it
|
|
const_local_iterator unsafe_begin(size_type bucket) const
|
|
{
|
|
if (!is_initialized(bucket))
|
|
return end();
|
|
|
|
raw_const_iterator it = get_bucket(bucket);
|
|
return my_solist.first_real_iterator(it);
|
|
}
|
|
|
|
// @REVIEW: Takes O(n)
|
|
// Returns the iterator after the last non-dummy element in the bucket
|
|
local_iterator unsafe_end(size_type bucket)
|
|
{
|
|
if (!is_initialized(bucket))
|
|
return end();
|
|
|
|
raw_iterator it = get_bucket(bucket);
|
|
|
|
// Find the end of the bucket, denoted by the dummy element
|
|
do ++it;
|
|
while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
|
|
|
|
// Return the first real element past the end of the bucket
|
|
return my_solist.first_real_iterator(it);
|
|
}
|
|
|
|
// @REVIEW: Takes O(n)
|
|
// Returns the iterator after the last non-dummy element in the bucket
|
|
const_local_iterator unsafe_end(size_type bucket) const
|
|
{
|
|
if (!is_initialized(bucket))
|
|
return end();
|
|
|
|
raw_const_iterator it = get_bucket(bucket);
|
|
|
|
// Find the end of the bucket, denoted by the dummy element
|
|
do ++it;
|
|
while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
|
|
|
|
// Return the first real element past the end of the bucket
|
|
return my_solist.first_real_iterator(it);
|
|
}
|
|
|
|
const_local_iterator unsafe_cbegin(size_type bucket) const {
|
|
return ((const self_type *) this)->unsafe_begin(bucket);
|
|
}
|
|
|
|
const_local_iterator unsafe_cend(size_type bucket) const {
|
|
return ((const self_type *) this)->unsafe_end(bucket);
|
|
}
|
|
|
|
// Hash policy
|
|
float load_factor() const {
|
|
return (float) size() / (float) unsafe_bucket_count();
|
|
}
|
|
|
|
float max_load_factor() const {
|
|
return my_maximum_bucket_size;
|
|
}
|
|
|
|
void max_load_factor(float newmax) {
|
|
if (newmax != newmax || newmax < 0)
|
|
tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
|
|
my_maximum_bucket_size = newmax;
|
|
}
|
|
|
|
// This function is a noop, because the underlying split-ordered list
|
|
// is already sorted, so an increase in the bucket number will be
|
|
// reflected next time this bucket is touched.
|
|
void rehash(size_type buckets) {
|
|
size_type current_buckets = my_number_of_buckets;
|
|
if (current_buckets >= buckets)
|
|
return;
|
|
my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
|
|
}
|
|
|
|
private:
|
|
|
|
// Initialize the hash and keep the first bucket open
|
|
void internal_init() {
|
|
// Allocate an array of segment pointers
|
|
memset(my_buckets, 0, pointers_per_table * sizeof(void *));
|
|
|
|
// Initialize bucket 0
|
|
raw_iterator dummy_node = my_solist.raw_begin();
|
|
set_bucket(0, dummy_node);
|
|
}
|
|
|
|
void internal_clear() {
|
|
for (size_type index = 0; index < pointers_per_table; ++index) {
|
|
if (my_buckets[index] != NULL) {
|
|
size_type sz = segment_size(index);
|
|
for (size_type index2 = 0; index2 < sz; ++index2)
|
|
my_allocator.destroy(&my_buckets[index][index2]);
|
|
my_allocator.deallocate(my_buckets[index], sz);
|
|
my_buckets[index] = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
void internal_copy(const self_type& right) {
|
|
clear();
|
|
|
|
my_maximum_bucket_size = right.my_maximum_bucket_size;
|
|
my_number_of_buckets = right.my_number_of_buckets;
|
|
|
|
__TBB_TRY {
|
|
insert(right.begin(), right.end());
|
|
my_hash_compare = right.my_hash_compare;
|
|
} __TBB_CATCH(...) {
|
|
my_solist.clear();
|
|
__TBB_RETHROW();
|
|
}
|
|
}
|
|
|
|
void internal_swap_buckets(concurrent_unordered_base& right)
|
|
{
|
|
// Swap all node segments
|
|
for (size_type index = 0; index < pointers_per_table; ++index)
|
|
{
|
|
raw_iterator * iterator_pointer = my_buckets[index];
|
|
my_buckets[index] = right.my_buckets[index];
|
|
right.my_buckets[index] = iterator_pointer;
|
|
}
|
|
}
|
|
|
|
//TODO: why not use std::distance?
|
|
// Hash APIs
|
|
size_type internal_distance(const_iterator first, const_iterator last) const
|
|
{
|
|
size_type num = 0;
|
|
|
|
for (const_iterator it = first; it != last; ++it)
|
|
++num;
|
|
|
|
return num;
|
|
}
|
|
|
|
// Insert an element in the hash given its value
|
|
std::pair<iterator, bool> internal_insert(const value_type& value)
|
|
{
|
|
sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
|
|
size_type bucket = order_key % my_number_of_buckets;
|
|
|
|
// If bucket is empty, initialize it first
|
|
if (!is_initialized(bucket))
|
|
init_bucket(bucket);
|
|
|
|
size_type new_count = 0;
|
|
order_key = split_order_key_regular(order_key);
|
|
raw_iterator it = get_bucket(bucket);
|
|
raw_iterator last = my_solist.raw_end();
|
|
raw_iterator where = it;
|
|
|
|
__TBB_ASSERT(where != last, "Invalid head node");
|
|
|
|
// First node is a dummy node
|
|
++where;
|
|
|
|
for (;;)
|
|
{
|
|
if (where == last || solist_t::get_order_key(where) > order_key)
|
|
{
|
|
// Try to insert it in the right place
|
|
std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
|
|
|
|
if (result.second)
|
|
{
|
|
// Insertion succeeded, adjust the table size, if needed
|
|
adjust_table_size(new_count, my_number_of_buckets);
|
|
return result;
|
|
}
|
|
else
|
|
{
|
|
// Insertion failed: either the same node was inserted by another thread, or
|
|
// another element was inserted at exactly the same place as this node.
|
|
// Proceed with the search from the previous location where order key was
|
|
// known to be larger (note: this is legal only because there is no safe
|
|
// concurrent erase operation supported).
|
|
where = it;
|
|
++where;
|
|
continue;
|
|
}
|
|
}
|
|
else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
|
|
{
|
|
// Element already in the list, return it
|
|
return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
|
|
}
|
|
|
|
// Move the iterator forward
|
|
it = where;
|
|
++where;
|
|
}
|
|
}
|
|
|
|
// Find the element in the split-ordered list
|
|
iterator internal_find(const key_type& key)
|
|
{
|
|
sokey_t order_key = (sokey_t) my_hash_compare(key);
|
|
size_type bucket = order_key % my_number_of_buckets;
|
|
|
|
// If bucket is empty, initialize it first
|
|
if (!is_initialized(bucket))
|
|
init_bucket(bucket);
|
|
|
|
order_key = split_order_key_regular(order_key);
|
|
raw_iterator last = my_solist.raw_end();
|
|
|
|
for (raw_iterator it = get_bucket(bucket); it != last; ++it)
|
|
{
|
|
if (solist_t::get_order_key(it) > order_key)
|
|
{
|
|
// If the order key is smaller than the current order key, the element
|
|
// is not in the hash.
|
|
return end();
|
|
}
|
|
else if (solist_t::get_order_key(it) == order_key)
|
|
{
|
|
// The fact that order keys match does not mean that the element is found.
|
|
// Key function comparison has to be performed to check whether this is the
|
|
// right element. If not, keep searching while order key is the same.
|
|
if (!my_hash_compare(get_key(*it), key))
|
|
return my_solist.get_iterator(it);
|
|
}
|
|
}
|
|
|
|
return end();
|
|
}
|
|
|
|
// Erase an element from the list. This is not a concurrency safe function.
|
|
iterator internal_erase(const_iterator it)
|
|
{
|
|
key_type key = get_key(*it);
|
|
sokey_t order_key = (sokey_t) my_hash_compare(key);
|
|
size_type bucket = order_key % my_number_of_buckets;
|
|
|
|
// If bucket is empty, initialize it first
|
|
if (!is_initialized(bucket))
|
|
init_bucket(bucket);
|
|
|
|
order_key = split_order_key_regular(order_key);
|
|
|
|
raw_iterator previous = get_bucket(bucket);
|
|
raw_iterator last = my_solist.raw_end();
|
|
raw_iterator where = previous;
|
|
|
|
__TBB_ASSERT(where != last, "Invalid head node");
|
|
|
|
// First node is a dummy node
|
|
++where;
|
|
|
|
for (;;) {
|
|
if (where == last)
|
|
return end();
|
|
else if (my_solist.get_iterator(where) == it)
|
|
return my_solist.erase_node(previous, it);
|
|
|
|
// Move the iterator forward
|
|
previous = where;
|
|
++where;
|
|
}
|
|
}
|
|
|
|
// Return the [begin, end) pair of iterators with the same key values.
|
|
// This operation makes sense only if mapping is many-to-one.
|
|
pairii_t internal_equal_range(const key_type& key)
|
|
{
|
|
sokey_t order_key = (sokey_t) my_hash_compare(key);
|
|
size_type bucket = order_key % my_number_of_buckets;
|
|
|
|
// If bucket is empty, initialize it first
|
|
if (!is_initialized(bucket))
|
|
init_bucket(bucket);
|
|
|
|
order_key = split_order_key_regular(order_key);
|
|
raw_iterator end_it = my_solist.raw_end();
|
|
|
|
for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
|
|
{
|
|
if (solist_t::get_order_key(it) > order_key)
|
|
{
|
|
// There is no element with the given key
|
|
return pairii_t(end(), end());
|
|
}
|
|
else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
|
|
{
|
|
iterator first = my_solist.get_iterator(it);
|
|
iterator last = first;
|
|
do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
|
|
return pairii_t(first, last);
|
|
}
|
|
}
|
|
|
|
return pairii_t(end(), end());
|
|
}
|
|
|
|
// Bucket APIs
|
|
void init_bucket(size_type bucket)
|
|
{
|
|
// Bucket 0 has no parent.
|
|
__TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
|
|
|
|
size_type parent_bucket = get_parent(bucket);
|
|
|
|
// All parent_bucket buckets have to be initialized before this bucket is
|
|
if (!is_initialized(parent_bucket))
|
|
init_bucket(parent_bucket);
|
|
|
|
raw_iterator parent = get_bucket(parent_bucket);
|
|
|
|
// Create a dummy first node in this bucket
|
|
raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
|
|
set_bucket(bucket, dummy_node);
|
|
}
|
|
|
|
void adjust_table_size(size_type total_elements, size_type current_size)
|
|
{
|
|
// Grow the table by a factor of 2 if possible and needed
|
|
if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
|
|
{
|
|
// Double the size of the hash only if size has not changed in between loads
|
|
my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
|
|
//Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
|
|
//due to overzealous compiler warnings in /Wp64 mode
|
|
}
|
|
}
|
|
|
|
size_type get_parent(size_type bucket) const
|
|
{
|
|
// Unsets bucket's most significant turned-on bit
|
|
size_type msb = __TBB_Log2((uintptr_t)bucket);
|
|
return bucket & ~(size_type(1) << msb);
|
|
}
|
|
|
|
|
|
// Dynamic sized array (segments)
|
|
//! @return segment index of given index in the array
|
|
static size_type segment_index_of( size_type index ) {
|
|
return size_type( __TBB_Log2( uintptr_t(index|1) ) );
|
|
}
|
|
|
|
//! @return the first array index of given segment
|
|
static size_type segment_base( size_type k ) {
|
|
return (size_type(1)<<k & ~size_type(1));
|
|
}
|
|
|
|
//! @return segment size
|
|
static size_type segment_size( size_type k ) {
|
|
return k? size_type(1)<<k : 2;
|
|
}
|
|
|
|
raw_iterator get_bucket(size_type bucket) const {
|
|
size_type segment = segment_index_of(bucket);
|
|
bucket -= segment_base(segment);
|
|
__TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
|
|
return my_buckets[segment][bucket];
|
|
}
|
|
|
|
void set_bucket(size_type bucket, raw_iterator dummy_head) {
|
|
size_type segment = segment_index_of(bucket);
|
|
bucket -= segment_base(segment);
|
|
|
|
if (my_buckets[segment] == NULL) {
|
|
size_type sz = segment_size(segment);
|
|
raw_iterator * new_segment = my_allocator.allocate(sz);
|
|
std::memset(new_segment, 0, sz*sizeof(raw_iterator));
|
|
|
|
if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
|
|
my_allocator.deallocate(new_segment, sz);
|
|
}
|
|
|
|
my_buckets[segment][bucket] = dummy_head;
|
|
}
|
|
|
|
bool is_initialized(size_type bucket) const {
|
|
size_type segment = segment_index_of(bucket);
|
|
bucket -= segment_base(segment);
|
|
|
|
if (my_buckets[segment] == NULL)
|
|
return false;
|
|
|
|
raw_iterator it = my_buckets[segment][bucket];
|
|
return (it.get_node_ptr() != NULL);
|
|
}
|
|
|
|
// Utilities for keys
|
|
|
|
// A regular order key has its original hash value reversed and the last bit set
|
|
sokey_t split_order_key_regular(sokey_t order_key) const {
|
|
return __TBB_ReverseBits(order_key) | 0x1;
|
|
}
|
|
|
|
// A dummy order key has its original hash value reversed and the last bit unset
|
|
sokey_t split_order_key_dummy(sokey_t order_key) const {
|
|
return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
|
|
}
|
|
|
|
// Shared variables
|
|
atomic<size_type> my_number_of_buckets; // Current table size
|
|
solist_t my_solist; // List where all the elements are kept
|
|
typename allocator_type::template rebind<raw_iterator>::other my_allocator; // Allocator object for segments
|
|
float my_maximum_bucket_size; // Maximum size of the bucket
|
|
atomic<raw_iterator*> my_buckets[pointers_per_table]; // The segment table
|
|
};
|
|
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
|
|
#pragma warning(pop) // warning 4127 is back
|
|
#endif
|
|
|
|
//! Hash multiplier
|
|
static const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value;
|
|
} // namespace internal
|
|
//! @endcond
|
|
//! Hasher functions
|
|
template<typename T>
|
|
inline size_t tbb_hasher( const T& t ) {
|
|
return static_cast<size_t>( t ) * internal::hash_multiplier;
|
|
}
|
|
template<typename P>
|
|
inline size_t tbb_hasher( P* ptr ) {
|
|
size_t const h = reinterpret_cast<size_t>( ptr );
|
|
return (h >> 3) ^ h;
|
|
}
|
|
template<typename E, typename S, typename A>
|
|
inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
|
|
size_t h = 0;
|
|
for( const E* c = s.c_str(); *c; ++c )
|
|
h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
|
|
return h;
|
|
}
|
|
template<typename F, typename S>
|
|
inline size_t tbb_hasher( const std::pair<F,S>& p ) {
|
|
return tbb_hasher(p.first) ^ tbb_hasher(p.second);
|
|
}
|
|
} // namespace interface5
|
|
using interface5::tbb_hasher;
|
|
|
|
|
|
// Template class for hash compare
|
|
template<typename Key>
|
|
class tbb_hash
|
|
{
|
|
public:
|
|
tbb_hash() {}
|
|
|
|
size_t operator()(const Key& key) const
|
|
{
|
|
return tbb_hasher(key);
|
|
}
|
|
};
|
|
|
|
} // namespace tbb
|
|
#endif// __TBB__concurrent_unordered_impl_H
|