Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_set.hpp
// Copyright (C) 2003-2004 Jeremy B. Maitin-Shepard. // Copyright (C) 2005-2011 Daniel James. // Copyright (C) 2022-2023 Christian Mazakas // Copyright (C) 2024 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org/libs/unordered for documentation #ifndef BOOST_UNORDERED_UNORDERED_SET_HPP_INCLUDED #define BOOST_UNORDERED_UNORDERED_SET_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/unordered/detail/serialize_fca_container.hpp> #include <boost/unordered/detail/set.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/container_hash/hash.hpp> #include <initializer_list> #if defined(BOOST_MSVC) #pragma warning(push) // conditional expression is constant #pragma warning(disable : 4127) #if BOOST_MSVC >= 1400 // the inline specifier cannot be used when a friend declaration refers to a // specialization of a function template #pragma warning(disable : 4396) #endif #endif namespace boost { namespace unordered { template <class T, class H, class P, class A> class unordered_set { template <typename, typename, typename, typename> friend class unordered_multiset; public: typedef T key_type; typedef T value_type; typedef H hasher; typedef P key_equal; typedef A allocator_type; private: typedef boost::unordered::detail::set<A, T, H, P> types; typedef typename types::value_allocator_traits value_allocator_traits; typedef typename types::table table; public: typedef typename value_allocator_traits::pointer pointer; typedef typename value_allocator_traits::const_pointer const_pointer; typedef value_type& reference; typedef value_type const& const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef typename table::c_iterator iterator; typedef typename table::c_iterator const_iterator; typedef typename table::cl_iterator local_iterator; typedef typename table::cl_iterator const_local_iterator; typedef typename types::node_type node_type; typedef typename types::insert_return_type insert_return_type; private: table table_; public: // constructors unordered_set(); explicit unordered_set(size_type, const hasher& = hasher(), const key_equal& = key_equal(), const allocator_type& = allocator_type()); template <class InputIt> unordered_set(InputIt, InputIt, size_type = boost::unordered::detail::default_bucket_count, const hasher& = hasher(), const key_equal& = key_equal(), const allocator_type& = allocator_type()); unordered_set(unordered_set const&); unordered_set(unordered_set&& other) noexcept(table::nothrow_move_constructible) : table_(other.table_, boost::unordered::detail::move_tag()) { // The move is done in table_ } explicit unordered_set(allocator_type const&); unordered_set(unordered_set const&, allocator_type const&); unordered_set(unordered_set&&, allocator_type const&); unordered_set(std::initializer_list<value_type>, size_type = boost::unordered::detail::default_bucket_count, const hasher& = hasher(), const key_equal& l = key_equal(), const allocator_type& = allocator_type()); explicit unordered_set(size_type, const allocator_type&); explicit unordered_set(size_type, const hasher&, const allocator_type&); template <class InputIterator> unordered_set(InputIterator, InputIterator, const allocator_type&); template <class InputIt> unordered_set(InputIt, InputIt, size_type, const allocator_type&); template <class InputIt> unordered_set( InputIt, InputIt, size_type, const hasher&, const allocator_type&); unordered_set(std::initializer_list<value_type>, const allocator_type&); unordered_set( std::initializer_list<value_type>, size_type, const allocator_type&); unordered_set(std::initializer_list<value_type>, size_type, const hasher&, const allocator_type&); // Destructor ~unordered_set() noexcept; // Assign unordered_set& operator=(unordered_set const& x) { table_.assign(x.table_, std::true_type()); return *this; } unordered_set& operator=(unordered_set&& x) noexcept(value_allocator_traits::is_always_equal::value&& std::is_nothrow_move_assignable<H>::value&& std::is_nothrow_move_assignable<P>::value) { table_.move_assign(x.table_, std::true_type()); return *this; } unordered_set& operator=(std::initializer_list<value_type>); allocator_type get_allocator() const noexcept { return allocator_type(table_.node_alloc()); } // iterators iterator begin() noexcept { return iterator(table_.begin()); } const_iterator begin() const noexcept { return const_iterator(table_.begin()); } iterator end() noexcept { return iterator(); } const_iterator end() const noexcept { return const_iterator(); } const_iterator cbegin() const noexcept { return const_iterator(table_.begin()); } const_iterator cend() const noexcept { return const_iterator(); } // size and capacity BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return table_.size_ == 0; } size_type size() const noexcept { return table_.size_; } size_type max_size() const noexcept; // emplace template <class... Args> std::pair<iterator, bool> emplace(Args&&... args) { return table_.emplace_unique( table::extractor::extract(std::forward<Args>(args)...), std::forward<Args>(args)...); } template <class... Args> iterator emplace_hint(const_iterator hint, Args&&... args) { return table_.emplace_hint_unique(hint, table::extractor::extract(std::forward<Args>(args)...), std::forward<Args>(args)...); } std::pair<iterator, bool> insert(value_type const& x) { return this->emplace(x); } std::pair<iterator, bool> insert(value_type&& x) { return this->emplace(std::move(x)); } template <class Key> typename boost::enable_if_c< detail::transparent_non_iterable<Key, unordered_set>::value, std::pair<iterator, bool> >::type insert(Key&& k) { return table_.try_emplace_unique(std::forward<Key>(k)); } iterator insert(const_iterator hint, value_type const& x) { return this->emplace_hint(hint, x); } iterator insert(const_iterator hint, value_type&& x) { return this->emplace_hint(hint, std::move(x)); } template <class Key> typename boost::enable_if_c< detail::transparent_non_iterable<Key, unordered_set>::value, iterator>::type insert(const_iterator hint, Key&& k) { return table_.try_emplace_hint_unique(hint, std::forward<Key>(k)); } template <class InputIt> void insert(InputIt, InputIt); void insert(std::initializer_list<value_type>); // extract node_type extract(const_iterator position) { return node_type( table_.extract_by_iterator_unique(position), allocator_type(table_.node_alloc())); } node_type extract(const key_type& k) { return node_type( table_.extract_by_key_impl(k), allocator_type(table_.node_alloc())); } template <class Key> typename boost::enable_if_c< detail::transparent_non_iterable<Key, unordered_set>::value, node_type>::type extract(const Key& k) { return node_type( table_.extract_by_key_impl(k), allocator_type(table_.node_alloc())); } insert_return_type insert(node_type&& np) { insert_return_type result; table_.move_insert_node_type_unique(np, result); return result; } iterator insert(const_iterator hint, node_type&& np) { return table_.move_insert_node_type_with_hint_unique(hint, np); } iterator erase(const_iterator); size_type erase(const key_type&); iterator erase(const_iterator, const_iterator); template <class Key> typename boost::enable_if_c< detail::transparent_non_iterable<Key, unordered_set>::value, size_type>::type erase(Key&& k) { return table_.erase_key_unique_impl(std::forward<Key>(k)); } BOOST_UNORDERED_DEPRECATED("Use erase instead") void quick_erase(const_iterator it) { erase(it); } BOOST_UNORDERED_DEPRECATED("Use erase instead") void erase_return_void(const_iterator it) { erase(it); } void swap(unordered_set&) noexcept(value_allocator_traits::is_always_equal::value&& boost::unordered::detail::is_nothrow_swappable<H>::value&& boost::unordered::detail::is_nothrow_swappable<P>::value); void clear() noexcept { table_.clear_impl(); } template <typename H2, typename P2> void merge(boost::unordered_set<T, H2, P2, A>& source); template <typename H2, typename P2> void merge(boost::unordered_set<T, H2, P2, A>&& source); template <typename H2, typename P2> void merge(boost::unordered_multiset<T, H2, P2, A>& source); template <typename H2, typename P2> void merge(boost::unordered_multiset<T, H2, P2, A>&& source); // observers hasher hash_function() const; key_equal key_eq() const; // lookup const_iterator find(const key_type&) const; template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, const_iterator>::type find(const Key& k) const { return const_iterator(table_.find(k)); } template <class CompatibleKey, class CompatibleHash, class CompatiblePredicate> const_iterator find(CompatibleKey const&, CompatibleHash const&, CompatiblePredicate const&) const; bool contains(key_type const& k) const { return table_.find(k) != this->end(); } template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, bool>::type contains(const Key& k) const { return table_.find(k) != this->end(); } size_type count(const key_type&) const; template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, size_type>::type count(const Key& k) const { return table_.find(k) != this->end() ? 1 : 0; } std::pair<const_iterator, const_iterator> equal_range( const key_type&) const; template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, std::pair<const_iterator, const_iterator> >::type equal_range(Key const& k) const { iterator n = table_.find(k); iterator m = n; if (m != this->end()) { ++m; } return std::make_pair(const_iterator(n), const_iterator(m)); } // bucket interface size_type bucket_count() const noexcept { return table_.bucket_count(); } size_type max_bucket_count() const noexcept { return table_.max_bucket_count(); } size_type bucket_size(size_type) const; size_type bucket(const key_type& k) const { return table_.hash_to_bucket(table_.hash(k)); } template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, size_type>::type bucket(Key&& k) const { return table_.hash_to_bucket(table_.hash(std::forward<Key>(k))); } local_iterator begin(size_type n) { return local_iterator(table_.begin(n)); } const_local_iterator begin(size_type n) const { return const_local_iterator(table_.begin(n)); } local_iterator end(size_type) { return local_iterator(); } const_local_iterator end(size_type) const { return const_local_iterator(); } const_local_iterator cbegin(size_type n) const { return const_local_iterator(table_.begin(n)); } const_local_iterator cend(size_type) const { return const_local_iterator(); } // hash policy float load_factor() const noexcept; float max_load_factor() const noexcept { return table_.mlf_; } void max_load_factor(float) noexcept; void rehash(size_type); void reserve(size_type); #if !BOOST_WORKAROUND(BOOST_BORLANDC, < 0x0582) friend bool operator== <T, H, P, A>(unordered_set const&, unordered_set const&); friend bool operator!= <T, H, P, A>(unordered_set const&, unordered_set const&); #endif }; // class template unordered_set template <class Archive, class K, class H, class P, class A> void serialize( Archive& ar, unordered_set<K, H, P, A>& c, unsigned int version) { detail::serialize_fca_container(ar, c, version); } #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES template <class InputIterator, class Hash = boost::hash<typename std::iterator_traits<InputIterator>::value_type>, class Pred = std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, class Allocator = std::allocator< typename std::iterator_traits<InputIterator>::value_type>, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(InputIterator, InputIterator, std::size_t = boost::unordered::detail::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_set<typename std::iterator_traits<InputIterator>::value_type, Hash, Pred, Allocator>; template <class T, class Hash = boost::hash<T>, class Pred = std::equal_to<T>, class Allocator = std::allocator<T>, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(std::initializer_list<T>, std::size_t = boost::unordered::detail::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_set<T, Hash, Pred, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(InputIterator, InputIterator, std::size_t, Allocator) -> unordered_set<typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class InputIterator, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(InputIterator, InputIterator, std::size_t, Hash, Allocator) -> unordered_set<typename std::iterator_traits<InputIterator>::value_type, Hash, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(std::initializer_list<T>, std::size_t, Allocator) -> unordered_set<T, boost::hash<T>, std::equal_to<T>, Allocator>; template <class T, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(std::initializer_list<T>, std::size_t, Hash, Allocator) -> unordered_set<T, Hash, std::equal_to<T>, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(InputIterator, InputIterator, Allocator) -> unordered_set<typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_set(std::initializer_list<T>, Allocator) -> unordered_set<T, boost::hash<T>, std::equal_to<T>, Allocator>; #endif template <class T, class H, class P, class A> class unordered_multiset { template <typename, typename, typename, typename> friend class unordered_set; public: typedef T key_type; typedef T value_type; typedef H hasher; typedef P key_equal; typedef A allocator_type; private: typedef boost::unordered::detail::set<A, T, H, P> types; typedef typename types::value_allocator_traits value_allocator_traits; typedef typename types::table table; public: typedef typename value_allocator_traits::pointer pointer; typedef typename value_allocator_traits::const_pointer const_pointer; typedef value_type& reference; typedef value_type const& const_reference; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef typename table::c_iterator iterator; typedef typename table::c_iterator const_iterator; typedef typename table::cl_iterator local_iterator; typedef typename table::cl_iterator const_local_iterator; typedef typename types::node_type node_type; private: table table_; public: // constructors unordered_multiset(); explicit unordered_multiset(size_type, const hasher& = hasher(), const key_equal& = key_equal(), const allocator_type& = allocator_type()); template <class InputIt> unordered_multiset(InputIt, InputIt, size_type = boost::unordered::detail::default_bucket_count, const hasher& = hasher(), const key_equal& = key_equal(), const allocator_type& = allocator_type()); unordered_multiset(unordered_multiset const&); unordered_multiset(unordered_multiset&& other) noexcept(table::nothrow_move_constructible) : table_(other.table_, boost::unordered::detail::move_tag()) { // The move is done in table_ } explicit unordered_multiset(allocator_type const&); unordered_multiset(unordered_multiset const&, allocator_type const&); unordered_multiset(unordered_multiset&&, allocator_type const&); unordered_multiset(std::initializer_list<value_type>, size_type = boost::unordered::detail::default_bucket_count, const hasher& = hasher(), const key_equal& l = key_equal(), const allocator_type& = allocator_type()); explicit unordered_multiset(size_type, const allocator_type&); explicit unordered_multiset( size_type, const hasher&, const allocator_type&); template <class InputIterator> unordered_multiset(InputIterator, InputIterator, const allocator_type&); template <class InputIt> unordered_multiset(InputIt, InputIt, size_type, const allocator_type&); template <class InputIt> unordered_multiset( InputIt, InputIt, size_type, const hasher&, const allocator_type&); unordered_multiset( std::initializer_list<value_type>, const allocator_type&); unordered_multiset( std::initializer_list<value_type>, size_type, const allocator_type&); unordered_multiset(std::initializer_list<value_type>, size_type, const hasher&, const allocator_type&); // Destructor ~unordered_multiset() noexcept; // Assign unordered_multiset& operator=(unordered_multiset const& x) { table_.assign(x.table_, std::false_type()); return *this; } unordered_multiset& operator=(unordered_multiset&& x) noexcept(value_allocator_traits::is_always_equal::value&& std::is_nothrow_move_assignable<H>::value&& std::is_nothrow_move_assignable<P>::value) { table_.move_assign(x.table_, std::false_type()); return *this; } unordered_multiset& operator=(std::initializer_list<value_type>); allocator_type get_allocator() const noexcept { return allocator_type(table_.node_alloc()); } // iterators iterator begin() noexcept { return iterator(table_.begin()); } const_iterator begin() const noexcept { return const_iterator(table_.begin()); } iterator end() noexcept { return iterator(); } const_iterator end() const noexcept { return const_iterator(); } const_iterator cbegin() const noexcept { return const_iterator(table_.begin()); } const_iterator cend() const noexcept { return const_iterator(); } // size and capacity BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return table_.size_ == 0; } size_type size() const noexcept { return table_.size_; } size_type max_size() const noexcept; // emplace template <class... Args> iterator emplace(Args&&... args) { return iterator(table_.emplace_equiv( boost::unordered::detail::func::construct_node_from_args( table_.node_alloc(), std::forward<Args>(args)...))); } template <class... Args> iterator emplace_hint(const_iterator hint, Args&&... args) { return iterator(table_.emplace_hint_equiv( hint, boost::unordered::detail::func::construct_node_from_args( table_.node_alloc(), std::forward<Args>(args)...))); } iterator insert(value_type const& x) { return this->emplace(x); } iterator insert(value_type&& x) { return this->emplace(std::move(x)); } iterator insert(const_iterator hint, value_type const& x) { return this->emplace_hint(hint, x); } iterator insert(const_iterator hint, value_type&& x) { return this->emplace_hint(hint, std::move(x)); } template <class InputIt> void insert(InputIt, InputIt); void insert(std::initializer_list<value_type>); // extract node_type extract(const_iterator position) { return node_type( table_.extract_by_iterator_equiv(position), table_.node_alloc()); } node_type extract(const key_type& k) { return node_type(table_.extract_by_key_impl(k), table_.node_alloc()); } template <class Key> typename boost::enable_if_c< detail::transparent_non_iterable<Key, unordered_multiset>::value, node_type>::type extract(const Key& k) { return node_type(table_.extract_by_key_impl(k), table_.node_alloc()); } iterator insert(node_type&& np) { return table_.move_insert_node_type_equiv(np); } iterator insert(const_iterator hint, node_type&& np) { return table_.move_insert_node_type_with_hint_equiv(hint, np); } iterator erase(const_iterator); size_type erase(const key_type&); template <class Key> typename boost::enable_if_c< detail::transparent_non_iterable<Key, unordered_multiset>::value, size_type>::type erase(const Key& k) { return table_.erase_key_equiv_impl(k); } iterator erase(const_iterator, const_iterator); BOOST_UNORDERED_DEPRECATED("Use erase instead") void quick_erase(const_iterator it) { erase(it); } BOOST_UNORDERED_DEPRECATED("Use erase instead") void erase_return_void(const_iterator it) { erase(it); } void swap(unordered_multiset&) noexcept(value_allocator_traits::is_always_equal::value&& boost::unordered::detail::is_nothrow_swappable<H>::value&& boost::unordered::detail::is_nothrow_swappable<P>::value); void clear() noexcept { table_.clear_impl(); } template <typename H2, typename P2> void merge(boost::unordered_multiset<T, H2, P2, A>& source); template <typename H2, typename P2> void merge(boost::unordered_multiset<T, H2, P2, A>&& source); template <typename H2, typename P2> void merge(boost::unordered_set<T, H2, P2, A>& source); template <typename H2, typename P2> void merge(boost::unordered_set<T, H2, P2, A>&& source); // observers hasher hash_function() const; key_equal key_eq() const; // lookup const_iterator find(const key_type&) const; template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, const_iterator>::type find(const Key& k) const { return table_.find(k); } template <class CompatibleKey, class CompatibleHash, class CompatiblePredicate> const_iterator find(CompatibleKey const&, CompatibleHash const&, CompatiblePredicate const&) const; bool contains(const key_type& k) const { return table_.find(k) != this->end(); } template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, bool>::type contains(const Key& k) const { return table_.find(k) != this->end(); } size_type count(const key_type&) const; template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, size_type>::type count(const Key& k) const { return table_.group_count(k); } std::pair<const_iterator, const_iterator> equal_range( const key_type&) const; template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, std::pair<const_iterator, const_iterator> >::type equal_range(const Key& k) const { iterator first = table_.find(k); iterator last = table_.next_group(k, first); return std::make_pair(const_iterator(first), const_iterator(last)); } // bucket interface size_type bucket_count() const noexcept { return table_.bucket_count(); } size_type max_bucket_count() const noexcept { return table_.max_bucket_count(); } size_type bucket_size(size_type) const; size_type bucket(const key_type& k) const { return table_.hash_to_bucket(table_.hash(k)); } template <class Key> typename boost::enable_if_c<detail::are_transparent<Key, H, P>::value, size_type>::type bucket(Key&& k) const { return table_.hash_to_bucket(table_.hash(std::forward<Key>(k))); } local_iterator begin(size_type n) { return local_iterator(table_.begin(n)); } const_local_iterator begin(size_type n) const { return const_local_iterator(table_.begin(n)); } local_iterator end(size_type) { return local_iterator(); } const_local_iterator end(size_type) const { return const_local_iterator(); } const_local_iterator cbegin(size_type n) const { return const_local_iterator(table_.begin(n)); } const_local_iterator cend(size_type) const { return const_local_iterator(); } // hash policy float load_factor() const noexcept; float max_load_factor() const noexcept { return table_.mlf_; } void max_load_factor(float) noexcept; void rehash(size_type); void reserve(size_type); #if !BOOST_WORKAROUND(BOOST_BORLANDC, < 0x0582) friend bool operator== <T, H, P, A>(unordered_multiset const&, unordered_multiset const&); friend bool operator!= <T, H, P, A>(unordered_multiset const&, unordered_multiset const&); #endif }; // class template unordered_multiset template <class Archive, class K, class H, class P, class A> void serialize( Archive& ar, unordered_multiset<K, H, P, A>& c, unsigned int version) { detail::serialize_fca_container(ar, c, version); } #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES template <class InputIterator, class Hash = boost::hash<typename std::iterator_traits<InputIterator>::value_type>, class Pred = std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, class Allocator = std::allocator< typename std::iterator_traits<InputIterator>::value_type>, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset(InputIterator, InputIterator, std::size_t = boost::unordered::detail::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_multiset< typename std::iterator_traits<InputIterator>::value_type, Hash, Pred, Allocator>; template <class T, class Hash = boost::hash<T>, class Pred = std::equal_to<T>, class Allocator = std::allocator<T>, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset(std::initializer_list<T>, std::size_t = boost::unordered::detail::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_multiset<T, Hash, Pred, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset(InputIterator, InputIterator, std::size_t, Allocator) -> unordered_multiset< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class InputIterator, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset( InputIterator, InputIterator, std::size_t, Hash, Allocator) -> unordered_multiset< typename std::iterator_traits<InputIterator>::value_type, Hash, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset(std::initializer_list<T>, std::size_t, Allocator) -> unordered_multiset<T, boost::hash<T>, std::equal_to<T>, Allocator>; template <class T, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset(std::initializer_list<T>, std::size_t, Hash, Allocator) -> unordered_multiset<T, Hash, std::equal_to<T>, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset(InputIterator, InputIterator, Allocator) -> unordered_multiset< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_multiset(std::initializer_list<T>, Allocator) -> unordered_multiset<T, boost::hash<T>, std::equal_to<T>, Allocator>; #endif //////////////////////////////////////////////////////////////////////////// template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set() { } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set(size_type n, const hasher& hf, const key_equal& eql, const allocator_type& a) : table_(n, hf, eql, a) { } template <class T, class H, class P, class A> template <class InputIt> unordered_set<T, H, P, A>::unordered_set(InputIt f, InputIt l, size_type n, const hasher& hf, const key_equal& eql, const allocator_type& a) : table_(boost::unordered::detail::initial_size(f, l, n), hf, eql, a) { this->insert(f, l); } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set(unordered_set const& other) : table_(other.table_, unordered_set::value_allocator_traits:: select_on_container_copy_construction(other.get_allocator())) { if (other.size()) { table_.copy_buckets(other.table_, std::true_type()); } } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set(allocator_type const& a) : table_(boost::unordered::detail::default_bucket_count, hasher(), key_equal(), a) { } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( unordered_set const& other, allocator_type const& a) : table_(other.table_, a) { if (other.table_.size_) { table_.copy_buckets(other.table_, std::true_type()); } } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( unordered_set&& other, allocator_type const& a) : table_(other.table_, a, boost::unordered::detail::move_tag()) { table_.move_construct_buckets(other.table_); } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( std::initializer_list<value_type> list, size_type n, const hasher& hf, const key_equal& eql, const allocator_type& a) : table_( boost::unordered::detail::initial_size(list.begin(), list.end(), n), hf, eql, a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( size_type n, const allocator_type& a) : table_(n, hasher(), key_equal(), a) { } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( size_type n, const hasher& hf, const allocator_type& a) : table_(n, hf, key_equal(), a) { } template <class T, class H, class P, class A> template <class InputIterator> unordered_set<T, H, P, A>::unordered_set( InputIterator f, InputIterator l, const allocator_type& a) : table_(boost::unordered::detail::initial_size( f, l, detail::default_bucket_count), hasher(), key_equal(), a) { this->insert(f, l); } template <class T, class H, class P, class A> template <class InputIt> unordered_set<T, H, P, A>::unordered_set( InputIt f, InputIt l, size_type n, const allocator_type& a) : table_(boost::unordered::detail::initial_size(f, l, n), hasher(), key_equal(), a) { this->insert(f, l); } template <class T, class H, class P, class A> template <class InputIt> unordered_set<T, H, P, A>::unordered_set(InputIt f, InputIt l, size_type n, const hasher& hf, const allocator_type& a) : table_( boost::unordered::detail::initial_size(f, l, n), hf, key_equal(), a) { this->insert(f, l); } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( std::initializer_list<value_type> list, const allocator_type& a) : table_(boost::unordered::detail::initial_size( list.begin(), list.end(), detail::default_bucket_count), hasher(), key_equal(), a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( std::initializer_list<value_type> list, size_type n, const allocator_type& a) : table_( boost::unordered::detail::initial_size(list.begin(), list.end(), n), hasher(), key_equal(), a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::unordered_set( std::initializer_list<value_type> list, size_type n, const hasher& hf, const allocator_type& a) : table_( boost::unordered::detail::initial_size(list.begin(), list.end(), n), hf, key_equal(), a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_set<T, H, P, A>::~unordered_set() noexcept { } template <class T, class H, class P, class A> unordered_set<T, H, P, A>& unordered_set<T, H, P, A>::operator=( std::initializer_list<value_type> list) { this->clear(); this->insert(list.begin(), list.end()); return *this; } // size and capacity template <class T, class H, class P, class A> std::size_t unordered_set<T, H, P, A>::max_size() const noexcept { using namespace std; // size < mlf_ * count return boost::unordered::detail::double_to_size( ceil(static_cast<double>(table_.mlf_) * static_cast<double>(table_.max_bucket_count()))) - 1; } // modifiers template <class T, class H, class P, class A> template <class InputIt> void unordered_set<T, H, P, A>::insert(InputIt first, InputIt last) { if (first != last) { table_.insert_range_unique( table::extractor::extract(*first), first, last); } } template <class T, class H, class P, class A> void unordered_set<T, H, P, A>::insert( std::initializer_list<value_type> list) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::iterator unordered_set<T, H, P, A>::erase(const_iterator position) { return table_.erase_node(position); } template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::size_type unordered_set<T, H, P, A>::erase(const key_type& k) { return table_.erase_key_unique_impl(k); } template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::iterator unordered_set<T, H, P, A>::erase(const_iterator first, const_iterator last) { return table_.erase_nodes_range(first, last); } template <class T, class H, class P, class A> void unordered_set<T, H, P, A>::swap(unordered_set& other) noexcept(value_allocator_traits::is_always_equal::value&& boost::unordered::detail::is_nothrow_swappable<H>::value&& boost::unordered::detail::is_nothrow_swappable<P>::value) { table_.swap(other.table_); } // observers template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::hasher unordered_set<T, H, P, A>::hash_function() const { return table_.hash_function(); } template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::key_equal unordered_set<T, H, P, A>::key_eq() const { return table_.key_eq(); } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_set<T, H, P, A>::merge( boost::unordered_set<T, H2, P2, A>& source) { table_.merge_unique(source.table_); } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_set<T, H, P, A>::merge( boost::unordered_set<T, H2, P2, A>&& source) { table_.merge_unique(source.table_); } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_set<T, H, P, A>::merge( boost::unordered_multiset<T, H2, P2, A>& source) { table_.merge_unique(source.table_); } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_set<T, H, P, A>::merge( boost::unordered_multiset<T, H2, P2, A>&& source) { table_.merge_unique(source.table_); } // lookup template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::const_iterator unordered_set<T, H, P, A>::find(const key_type& k) const { return const_iterator(table_.find(k)); } template <class T, class H, class P, class A> template <class CompatibleKey, class CompatibleHash, class CompatiblePredicate> typename unordered_set<T, H, P, A>::const_iterator unordered_set<T, H, P, A>::find(CompatibleKey const& k, CompatibleHash const& hash, CompatiblePredicate const& eq) const { return table_.transparent_find(k, hash, eq); } template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::size_type unordered_set<T, H, P, A>::count(const key_type& k) const { return table_.find_node(k) ? 1 : 0; } template <class T, class H, class P, class A> std::pair<typename unordered_set<T, H, P, A>::const_iterator, typename unordered_set<T, H, P, A>::const_iterator> unordered_set<T, H, P, A>::equal_range(const key_type& k) const { iterator first = table_.find(k); iterator second = first; if (second != this->end()) { ++second; } return std::make_pair(first, second); } template <class T, class H, class P, class A> typename unordered_set<T, H, P, A>::size_type unordered_set<T, H, P, A>::bucket_size(size_type n) const { return table_.bucket_size(n); } // hash policy template <class T, class H, class P, class A> float unordered_set<T, H, P, A>::load_factor() const noexcept { if (table_.size_ == 0) { return 0.0f; } BOOST_ASSERT(table_.bucket_count() != 0); return static_cast<float>(table_.size_) / static_cast<float>(table_.bucket_count()); } template <class T, class H, class P, class A> void unordered_set<T, H, P, A>::max_load_factor(float m) noexcept { table_.max_load_factor(m); } template <class T, class H, class P, class A> void unordered_set<T, H, P, A>::rehash(size_type n) { table_.rehash(n); } template <class T, class H, class P, class A> void unordered_set<T, H, P, A>::reserve(size_type n) { table_.reserve(n); } template <class T, class H, class P, class A> inline bool operator==( unordered_set<T, H, P, A> const& m1, unordered_set<T, H, P, A> const& m2) { #if BOOST_WORKAROUND(BOOST_CODEGEARC, BOOST_TESTED_AT(0x0613)) struct dummy { unordered_set<T, H, P, A> x; }; #endif return m1.table_.equals_unique(m2.table_); } template <class T, class H, class P, class A> inline bool operator!=( unordered_set<T, H, P, A> const& m1, unordered_set<T, H, P, A> const& m2) { #if BOOST_WORKAROUND(BOOST_CODEGEARC, BOOST_TESTED_AT(0x0613)) struct dummy { unordered_set<T, H, P, A> x; }; #endif return !m1.table_.equals_unique(m2.table_); } template <class T, class H, class P, class A> inline void swap(unordered_set<T, H, P, A>& m1, unordered_set<T, H, P, A>& m2) noexcept(noexcept(m1.swap(m2))) { #if BOOST_WORKAROUND(BOOST_CODEGEARC, BOOST_TESTED_AT(0x0613)) struct dummy { unordered_set<T, H, P, A> x; }; #endif m1.swap(m2); } template <class K, class H, class P, class A, class Predicate> typename unordered_set<K, H, P, A>::size_type erase_if( unordered_set<K, H, P, A>& c, Predicate pred) { return detail::erase_if(c, pred); } //////////////////////////////////////////////////////////////////////////// template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset() { } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset(size_type n, const hasher& hf, const key_equal& eql, const allocator_type& a) : table_(n, hf, eql, a) { } template <class T, class H, class P, class A> template <class InputIt> unordered_multiset<T, H, P, A>::unordered_multiset(InputIt f, InputIt l, size_type n, const hasher& hf, const key_equal& eql, const allocator_type& a) : table_(boost::unordered::detail::initial_size(f, l, n), hf, eql, a) { this->insert(f, l); } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( unordered_multiset const& other) : table_(other.table_, unordered_multiset::value_allocator_traits:: select_on_container_copy_construction(other.get_allocator())) { if (other.table_.size_) { table_.copy_buckets(other.table_, std::false_type()); } } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset(allocator_type const& a) : table_(boost::unordered::detail::default_bucket_count, hasher(), key_equal(), a) { } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( unordered_multiset const& other, allocator_type const& a) : table_(other.table_, a) { if (other.table_.size_) { table_.copy_buckets(other.table_, std::false_type()); } } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( unordered_multiset&& other, allocator_type const& a) : table_(other.table_, a, boost::unordered::detail::move_tag()) { table_.move_construct_buckets(other.table_); } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( std::initializer_list<value_type> list, size_type n, const hasher& hf, const key_equal& eql, const allocator_type& a) : table_( boost::unordered::detail::initial_size(list.begin(), list.end(), n), hf, eql, a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( size_type n, const allocator_type& a) : table_(n, hasher(), key_equal(), a) { } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( size_type n, const hasher& hf, const allocator_type& a) : table_(n, hf, key_equal(), a) { } template <class T, class H, class P, class A> template <class InputIterator> unordered_multiset<T, H, P, A>::unordered_multiset( InputIterator f, InputIterator l, const allocator_type& a) : table_(boost::unordered::detail::initial_size( f, l, detail::default_bucket_count), hasher(), key_equal(), a) { this->insert(f, l); } template <class T, class H, class P, class A> template <class InputIt> unordered_multiset<T, H, P, A>::unordered_multiset( InputIt f, InputIt l, size_type n, const allocator_type& a) : table_(boost::unordered::detail::initial_size(f, l, n), hasher(), key_equal(), a) { this->insert(f, l); } template <class T, class H, class P, class A> template <class InputIt> unordered_multiset<T, H, P, A>::unordered_multiset(InputIt f, InputIt l, size_type n, const hasher& hf, const allocator_type& a) : table_( boost::unordered::detail::initial_size(f, l, n), hf, key_equal(), a) { this->insert(f, l); } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( std::initializer_list<value_type> list, const allocator_type& a) : table_(boost::unordered::detail::initial_size( list.begin(), list.end(), detail::default_bucket_count), hasher(), key_equal(), a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( std::initializer_list<value_type> list, size_type n, const allocator_type& a) : table_( boost::unordered::detail::initial_size(list.begin(), list.end(), n), hasher(), key_equal(), a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::unordered_multiset( std::initializer_list<value_type> list, size_type n, const hasher& hf, const allocator_type& a) : table_( boost::unordered::detail::initial_size(list.begin(), list.end(), n), hf, key_equal(), a) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>::~unordered_multiset() noexcept { } template <class T, class H, class P, class A> unordered_multiset<T, H, P, A>& unordered_multiset<T, H, P, A>::operator=( std::initializer_list<value_type> list) { this->clear(); this->insert(list.begin(), list.end()); return *this; } // size and capacity template <class T, class H, class P, class A> std::size_t unordered_multiset<T, H, P, A>::max_size() const noexcept { using namespace std; // size < mlf_ * count return boost::unordered::detail::double_to_size( ceil(static_cast<double>(table_.mlf_) * static_cast<double>(table_.max_bucket_count()))) - 1; } // modifiers template <class T, class H, class P, class A> template <class InputIt> void unordered_multiset<T, H, P, A>::insert(InputIt first, InputIt last) { table_.insert_range_equiv(first, last); } template <class T, class H, class P, class A> void unordered_multiset<T, H, P, A>::insert( std::initializer_list<value_type> list) { this->insert(list.begin(), list.end()); } template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::iterator unordered_multiset<T, H, P, A>::erase(const_iterator position) { BOOST_ASSERT(position != this->end()); return table_.erase_node(position); } template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::size_type unordered_multiset<T, H, P, A>::erase(const key_type& k) { return table_.erase_key_equiv(k); } template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::iterator unordered_multiset<T, H, P, A>::erase( const_iterator first, const_iterator last) { return table_.erase_nodes_range(first, last); } template <class T, class H, class P, class A> void unordered_multiset<T, H, P, A>::swap(unordered_multiset& other) noexcept(value_allocator_traits::is_always_equal::value&& boost::unordered::detail::is_nothrow_swappable<H>::value&& boost::unordered::detail::is_nothrow_swappable<P>::value) { table_.swap(other.table_); } // observers template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::hasher unordered_multiset<T, H, P, A>::hash_function() const { return table_.hash_function(); } template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::key_equal unordered_multiset<T, H, P, A>::key_eq() const { return table_.key_eq(); } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_multiset<T, H, P, A>::merge( boost::unordered_multiset<T, H2, P2, A>& source) { while (!source.empty()) { insert(source.extract(source.begin())); } } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_multiset<T, H, P, A>::merge( boost::unordered_multiset<T, H2, P2, A>&& source) { while (!source.empty()) { insert(source.extract(source.begin())); } } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_multiset<T, H, P, A>::merge( boost::unordered_set<T, H2, P2, A>& source) { while (!source.empty()) { insert(source.extract(source.begin())); } } template <class T, class H, class P, class A> template <typename H2, typename P2> void unordered_multiset<T, H, P, A>::merge( boost::unordered_set<T, H2, P2, A>&& source) { while (!source.empty()) { insert(source.extract(source.begin())); } } // lookup template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::const_iterator unordered_multiset<T, H, P, A>::find(const key_type& k) const { return const_iterator(table_.find(k)); } template <class T, class H, class P, class A> template <class CompatibleKey, class CompatibleHash, class CompatiblePredicate> typename unordered_multiset<T, H, P, A>::const_iterator unordered_multiset<T, H, P, A>::find(CompatibleKey const& k, CompatibleHash const& hash, CompatiblePredicate const& eq) const { return table_.transparent_find(k, hash, eq); } template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::size_type unordered_multiset<T, H, P, A>::count(const key_type& k) const { return table_.group_count(k); } template <class T, class H, class P, class A> std::pair<typename unordered_multiset<T, H, P, A>::const_iterator, typename unordered_multiset<T, H, P, A>::const_iterator> unordered_multiset<T, H, P, A>::equal_range(const key_type& k) const { iterator n = table_.find(k); return std::make_pair(const_iterator(n), const_iterator(n == end() ? n : table_.next_group(k, n))); } template <class T, class H, class P, class A> typename unordered_multiset<T, H, P, A>::size_type unordered_multiset<T, H, P, A>::bucket_size(size_type n) const { return table_.bucket_size(n); } // hash policy template <class T, class H, class P, class A> float unordered_multiset<T, H, P, A>::load_factor() const noexcept { if (table_.size_ == 0) { return 0.0f; } BOOST_ASSERT(table_.bucket_count() != 0); return static_cast<float>(table_.size_) / static_cast<float>(table_.bucket_count()); } template <class T, class H, class P, class A> void unordered_multiset<T, H, P, A>::max_load_factor(float m) noexcept { table_.max_load_factor(m); } template <class T, class H, class P, class A> void unordered_multiset<T, H, P, A>::rehash(size_type n) { table_.rehash(n); } template <class T, class H, class P, class A> void unordered_multiset<T, H, P, A>::reserve(size_type n) { table_.reserve(n); } template <class T, class H, class P, class A> inline bool operator==(unordered_multiset<T, H, P, A> const& m1, unordered_multiset<T, H, P, A> const& m2) { #if BOOST_WORKAROUND(BOOST_CODEGEARC, BOOST_TESTED_AT(0x0613)) struct dummy { unordered_multiset<T, H, P, A> x; }; #endif return m1.table_.equals_equiv(m2.table_); } template <class T, class H, class P, class A> inline bool operator!=(unordered_multiset<T, H, P, A> const& m1, unordered_multiset<T, H, P, A> const& m2) { #if BOOST_WORKAROUND(BOOST_CODEGEARC, BOOST_TESTED_AT(0x0613)) struct dummy { unordered_multiset<T, H, P, A> x; }; #endif return !m1.table_.equals_equiv(m2.table_); } template <class T, class H, class P, class A> inline void swap(unordered_multiset<T, H, P, A>& m1, unordered_multiset<T, H, P, A>& m2) noexcept(noexcept(m1.swap(m2))) { #if BOOST_WORKAROUND(BOOST_CODEGEARC, BOOST_TESTED_AT(0x0613)) struct dummy { unordered_multiset<T, H, P, A> x; }; #endif m1.swap(m2); } template <class K, class H, class P, class A, class Predicate> typename unordered_multiset<K, H, P, A>::size_type erase_if( unordered_multiset<K, H, P, A>& c, Predicate pred) { return detail::erase_if(c, pred); } template <typename N, typename T, typename A> class node_handle_set { template <typename Types> friend struct ::boost::unordered::detail::table; template <class T2, class H2, class P2, class A2> friend class unordered_set; template <class T2, class H2, class P2, class A2> friend class unordered_multiset; typedef typename boost::unordered::detail::rebind_wrap<A, T>::type value_allocator; typedef boost::unordered::detail::allocator_traits<value_allocator> value_allocator_traits; typedef N node; typedef typename boost::unordered::detail::rebind_wrap<A, node>::type node_allocator; typedef boost::unordered::detail::allocator_traits<node_allocator> node_allocator_traits; typedef typename node_allocator_traits::pointer node_pointer; public: typedef T value_type; typedef A allocator_type; private: node_pointer ptr_; bool has_alloc_; boost::unordered::detail::optional<value_allocator> alloc_; node_handle_set(node_pointer ptr, allocator_type const& a) : ptr_(ptr), alloc_(a) { } public: constexpr node_handle_set() noexcept : ptr_(), has_alloc_(false) {} node_handle_set(node_handle_set const&) = delete; node_handle_set& operator=(node_handle_set const&) = delete; ~node_handle_set() { if (ptr_) { node_allocator node_alloc(*alloc_); boost::unordered::detail::node_tmp<node_allocator> tmp( ptr_, node_alloc); } } node_handle_set(node_handle_set&& n) noexcept : ptr_(n.ptr_), alloc_(std::move(n.alloc_)) { n.ptr_ = node_pointer(); } node_handle_set& operator=(node_handle_set&& n) { BOOST_ASSERT(!alloc_.has_value() || value_allocator_traits:: propagate_on_container_move_assignment::value || (n.alloc_.has_value() && alloc_ == n.alloc_)); if (ptr_) { node_allocator node_alloc(*alloc_); boost::unordered::detail::node_tmp<node_allocator> tmp( ptr_, node_alloc); ptr_ = node_pointer(); } if (!alloc_.has_value() || value_allocator_traits::propagate_on_container_move_assignment:: value) { alloc_ = std::move(n.alloc_); } ptr_ = n.ptr_; n.ptr_ = node_pointer(); return *this; } value_type& value() const { return ptr_->value(); } allocator_type get_allocator() const { return *alloc_; } explicit operator bool() const noexcept { return !this->operator!(); } bool operator!() const noexcept { return ptr_ ? 0 : 1; } BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return ptr_ ? 0 : 1; } void swap(node_handle_set& n) noexcept(value_allocator_traits::propagate_on_container_swap::value || value_allocator_traits::is_always_equal::value) { BOOST_ASSERT( !alloc_.has_value() || !n.alloc_.has_value() || value_allocator_traits::propagate_on_container_swap::value || alloc_ == n.alloc_); if (value_allocator_traits::propagate_on_container_swap::value || !alloc_.has_value() || !n.alloc_.has_value()) { boost::core::invoke_swap(alloc_, n.alloc_); } boost::core::invoke_swap(ptr_, n.ptr_); } }; template <typename N, typename T, typename A> void swap(node_handle_set<N, T, A>& x, node_handle_set<N, T, A>& y) noexcept(noexcept(x.swap(y))) { x.swap(y); } template <class Iter, class NodeType> struct insert_return_type_set { public: Iter position; bool inserted; NodeType node; insert_return_type_set() : position(), inserted(false), node() {} insert_return_type_set(insert_return_type_set const&) = delete; insert_return_type_set& operator=(insert_return_type_set const&) = delete; insert_return_type_set(insert_return_type_set&& x) noexcept : position(x.position), inserted(x.inserted), node(std::move(x.node)) { } insert_return_type_set& operator=(insert_return_type_set&& x) { inserted = x.inserted; position = x.position; node = std::move(x.node); return *this; } }; template <class Iter, class NodeType> void swap(insert_return_type_set<Iter, NodeType>& x, insert_return_type_set<Iter, NodeType>& y) { boost::core::invoke_swap(x.node, y.node); boost::core::invoke_swap(x.inserted, y.inserted); boost::core::invoke_swap(x.position, y.position); } } // namespace unordered namespace serialization { template <class K, class H, class P, class A> struct version<boost::unordered_set<K, H, P, A> > { BOOST_STATIC_CONSTANT(int, value = 1); }; template <class K, class H, class P, class A> struct version<boost::unordered_multiset<K, H, P, A> > { BOOST_STATIC_CONSTANT(int, value = 1); }; } // namespace serialization } // namespace boost #if defined(BOOST_MSVC) #pragma warning(pop) #endif #endif // BOOST_UNORDERED_UNORDERED_SET_HPP_INCLUDED
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_map_fwd.hpp
// Copyright (C) 2008-2011 Daniel James. // Copyright (C) 2022-2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_MAP_FWD_HPP_INCLUDED #define BOOST_UNORDERED_MAP_FWD_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/container_hash/hash_fwd.hpp> #include <functional> #include <memory> namespace boost { namespace unordered { template <class K, class T, class H = boost::hash<K>, class P = std::equal_to<K>, class A = std::allocator<std::pair<const K, T> > > class unordered_map; template <class K, class T, class H, class P, class A> inline bool operator==( unordered_map<K, T, H, P, A> const&, unordered_map<K, T, H, P, A> const&); template <class K, class T, class H, class P, class A> inline bool operator!=( unordered_map<K, T, H, P, A> const&, unordered_map<K, T, H, P, A> const&); template <class K, class T, class H, class P, class A> inline void swap(unordered_map<K, T, H, P, A>& m1, unordered_map<K, T, H, P, A>& m2) noexcept(noexcept(m1.swap(m2))); template <class K, class T, class H, class P, class A, class Predicate> typename unordered_map<K, T, H, P, A>::size_type erase_if( unordered_map<K, T, H, P, A>& c, Predicate pred); template <class K, class T, class H = boost::hash<K>, class P = std::equal_to<K>, class A = std::allocator<std::pair<const K, T> > > class unordered_multimap; template <class K, class T, class H, class P, class A> inline bool operator==(unordered_multimap<K, T, H, P, A> const&, unordered_multimap<K, T, H, P, A> const&); template <class K, class T, class H, class P, class A> inline bool operator!=(unordered_multimap<K, T, H, P, A> const&, unordered_multimap<K, T, H, P, A> const&); template <class K, class T, class H, class P, class A> inline void swap(unordered_multimap<K, T, H, P, A>& m1, unordered_multimap<K, T, H, P, A>& m2) noexcept(noexcept(m1.swap(m2))); template <class K, class T, class H, class P, class A, class Predicate> typename unordered_multimap<K, T, H, P, A>::size_type erase_if( unordered_multimap<K, T, H, P, A>& c, Predicate pred); template <class N, class K, class T, class A> class node_handle_map; template <class Iter, class NodeType> struct insert_return_type_map; } // namespace unordered using boost::unordered::unordered_map; using boost::unordered::unordered_multimap; } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_node_map_fwd.hpp
// Copyright (C) 2022 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_NODE_MAP_FWD_HPP_INCLUDED #define BOOST_UNORDERED_NODE_MAP_FWD_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/container_hash/hash_fwd.hpp> #include <functional> #include <memory> namespace boost { namespace unordered { template <class Key, class T, class Hash = boost::hash<Key>, class KeyEqual = std::equal_to<Key>, class Allocator = std::allocator<std::pair<const Key, T> > > class unordered_node_map; template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class T, class Hash, class KeyEqual, class Allocator> void swap(unordered_node_map<Key, T, Hash, KeyEqual, Allocator>& lhs, unordered_node_map<Key, T, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))); } // namespace unordered using boost::unordered::unordered_node_map; } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/concurrent_flat_map_fwd.hpp
/* Fast open-addressing concurrent hashmap. * * Copyright 2023 Christian Mazakas. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_CONCURRENT_FLAT_MAP_FWD_HPP #define BOOST_UNORDERED_CONCURRENT_FLAT_MAP_FWD_HPP #include <boost/container_hash/hash_fwd.hpp> #include <functional> #include <memory> namespace boost { namespace unordered { template <class Key, class T, class Hash = boost::hash<Key>, class Pred = std::equal_to<Key>, class Allocator = std::allocator<std::pair<Key const, T> > > class concurrent_flat_map; template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator==( concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator!=( concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class T, class Hash, class Pred, class Alloc> void swap(concurrent_flat_map<Key, T, Hash, Pred, Alloc>& x, concurrent_flat_map<Key, T, Hash, Pred, Alloc>& y) noexcept(noexcept(x.swap(y))); template <class K, class T, class H, class P, class A, class Predicate> typename concurrent_flat_map<K, T, H, P, A>::size_type erase_if( concurrent_flat_map<K, T, H, P, A>& c, Predicate pred); } // namespace unordered using boost::unordered::concurrent_flat_map; } // namespace boost #endif // BOOST_UNORDERED_CONCURRENT_FLAT_MAP_HPP
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_flat_map_fwd.hpp
// Copyright (C) 2022 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_FLAT_MAP_FWD_HPP_INCLUDED #define BOOST_UNORDERED_FLAT_MAP_FWD_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/container_hash/hash_fwd.hpp> #include <functional> #include <memory> namespace boost { namespace unordered { template <class Key, class T, class Hash = boost::hash<Key>, class KeyEqual = std::equal_to<Key>, class Allocator = std::allocator<std::pair<const Key, T> > > class unordered_flat_map; template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class T, class Hash, class KeyEqual, class Allocator> void swap(unordered_flat_map<Key, T, Hash, KeyEqual, Allocator>& lhs, unordered_flat_map<Key, T, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))); } // namespace unordered using boost::unordered::unordered_flat_map; } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_node_set.hpp
// Copyright (C) 2022-2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_UNORDERED_NODE_SET_HPP_INCLUDED #define BOOST_UNORDERED_UNORDERED_NODE_SET_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/unordered/detail/foa/element_type.hpp> #include <boost/unordered/detail/foa/node_handle.hpp> #include <boost/unordered/detail/foa/node_set_types.hpp> #include <boost/unordered/detail/foa/table.hpp> #include <boost/unordered/detail/serialize_container.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/unordered_node_set_fwd.hpp> #include <boost/core/allocator_access.hpp> #include <boost/container_hash/hash.hpp> #include <boost/throw_exception.hpp> #include <initializer_list> #include <iterator> #include <type_traits> #include <utility> namespace boost { namespace unordered { #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4714) /* marked as __forceinline not inlined */ #endif namespace detail { template <class TypePolicy, class Allocator> struct node_set_handle : public detail::foa::node_handle_base<TypePolicy, Allocator> { private: using base_type = detail::foa::node_handle_base<TypePolicy, Allocator>; using typename base_type::type_policy; template <class Key, class Hash, class Pred, class Alloc> friend class boost::unordered::unordered_node_set; public: using value_type = typename TypePolicy::value_type; constexpr node_set_handle() noexcept = default; node_set_handle(node_set_handle&& nh) noexcept = default; node_set_handle& operator=(node_set_handle&&) noexcept = default; value_type& value() const { BOOST_ASSERT(!this->empty()); return const_cast<value_type&>(this->data()); } }; } // namespace detail template <class Key, class Hash, class KeyEqual, class Allocator> class unordered_node_set { using set_types = detail::foa::node_set_types<Key, typename boost::allocator_void_pointer<Allocator>::type>; using table_type = detail::foa::table<set_types, Hash, KeyEqual, typename boost::allocator_rebind<Allocator, typename set_types::value_type>::type>; table_type table_; template <class K, class H, class KE, class A> bool friend operator==(unordered_node_set<K, H, KE, A> const& lhs, unordered_node_set<K, H, KE, A> const& rhs); template <class K, class H, class KE, class A, class Pred> typename unordered_node_set<K, H, KE, A>::size_type friend erase_if( unordered_node_set<K, H, KE, A>& set, Pred pred); public: using key_type = Key; using value_type = typename set_types::value_type; using init_type = typename set_types::init_type; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using hasher = Hash; using key_equal = KeyEqual; using allocator_type = Allocator; using reference = value_type&; using const_reference = value_type const&; using pointer = typename boost::allocator_pointer<allocator_type>::type; using const_pointer = typename boost::allocator_const_pointer<allocator_type>::type; using iterator = typename table_type::iterator; using const_iterator = typename table_type::const_iterator; using node_type = detail::node_set_handle<set_types, typename boost::allocator_rebind<Allocator, typename set_types::value_type>::type>; using insert_return_type = detail::foa::insert_return_type<iterator, node_type>; unordered_node_set() : unordered_node_set(0) {} explicit unordered_node_set(size_type n, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : table_(n, h, pred, a) { } unordered_node_set(size_type n, allocator_type const& a) : unordered_node_set(n, hasher(), key_equal(), a) { } unordered_node_set(size_type n, hasher const& h, allocator_type const& a) : unordered_node_set(n, h, key_equal(), a) { } template <class InputIterator> unordered_node_set( InputIterator f, InputIterator l, allocator_type const& a) : unordered_node_set(f, l, size_type(0), hasher(), key_equal(), a) { } explicit unordered_node_set(allocator_type const& a) : unordered_node_set(0, a) { } template <class Iterator> unordered_node_set(Iterator first, Iterator last, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_node_set(n, h, pred, a) { this->insert(first, last); } template <class InputIt> unordered_node_set( InputIt first, InputIt last, size_type n, allocator_type const& a) : unordered_node_set(first, last, n, hasher(), key_equal(), a) { } template <class Iterator> unordered_node_set(Iterator first, Iterator last, size_type n, hasher const& h, allocator_type const& a) : unordered_node_set(first, last, n, h, key_equal(), a) { } unordered_node_set(unordered_node_set const& other) : table_(other.table_) { } unordered_node_set( unordered_node_set const& other, allocator_type const& a) : table_(other.table_, a) { } unordered_node_set(unordered_node_set&& other) noexcept(std::is_nothrow_move_constructible<table_type>::value) : table_(std::move(other.table_)) { } unordered_node_set(unordered_node_set&& other, allocator_type const& al) : table_(std::move(other.table_), al) { } unordered_node_set(std::initializer_list<value_type> ilist, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_node_set(ilist.begin(), ilist.end(), n, h, pred, a) { } unordered_node_set( std::initializer_list<value_type> il, allocator_type const& a) : unordered_node_set(il, size_type(0), hasher(), key_equal(), a) { } unordered_node_set(std::initializer_list<value_type> init, size_type n, allocator_type const& a) : unordered_node_set(init, n, hasher(), key_equal(), a) { } unordered_node_set(std::initializer_list<value_type> init, size_type n, hasher const& h, allocator_type const& a) : unordered_node_set(init, n, h, key_equal(), a) { } ~unordered_node_set() = default; unordered_node_set& operator=(unordered_node_set const& other) { table_ = other.table_; return *this; } unordered_node_set& operator=(unordered_node_set&& other) noexcept( noexcept(std::declval<table_type&>() = std::declval<table_type&&>())) { table_ = std::move(other.table_); return *this; } allocator_type get_allocator() const noexcept { return table_.get_allocator(); } /// Iterators /// iterator begin() noexcept { return table_.begin(); } const_iterator begin() const noexcept { return table_.begin(); } const_iterator cbegin() const noexcept { return table_.cbegin(); } iterator end() noexcept { return table_.end(); } const_iterator end() const noexcept { return table_.end(); } const_iterator cend() const noexcept { return table_.cend(); } /// Capacity /// BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return table_.empty(); } size_type size() const noexcept { return table_.size(); } size_type max_size() const noexcept { return table_.max_size(); } /// Modifiers /// void clear() noexcept { table_.clear(); } BOOST_FORCEINLINE std::pair<iterator, bool> insert( value_type const& value) { return table_.insert(value); } BOOST_FORCEINLINE std::pair<iterator, bool> insert(value_type&& value) { return table_.insert(std::move(value)); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_node_set>::value, std::pair<iterator, bool> >::type insert(K&& k) { return table_.try_emplace(std::forward<K>(k)); } BOOST_FORCEINLINE iterator insert(const_iterator, value_type const& value) { return table_.insert(value).first; } BOOST_FORCEINLINE iterator insert(const_iterator, value_type&& value) { return table_.insert(std::move(value)).first; } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_node_set>::value, iterator>::type insert(const_iterator, K&& k) { return table_.try_emplace(std::forward<K>(k)).first; } template <class InputIterator> void insert(InputIterator first, InputIterator last) { for (auto pos = first; pos != last; ++pos) { table_.emplace(*pos); } } void insert(std::initializer_list<value_type> ilist) { this->insert(ilist.begin(), ilist.end()); } insert_return_type insert(node_type&& nh) { if (nh.empty()) { return {end(), false, node_type{}}; } BOOST_ASSERT(get_allocator() == nh.get_allocator()); auto itp = table_.insert(std::move(nh.element())); if (itp.second) { nh.reset(); return {itp.first, true, node_type{}}; } else { return {itp.first, false, std::move(nh)}; } } iterator insert(const_iterator, node_type&& nh) { if (nh.empty()) { return end(); } BOOST_ASSERT(get_allocator() == nh.get_allocator()); auto itp = table_.insert(std::move(nh.element())); if (itp.second) { nh.reset(); return itp.first; } else { return itp.first; } } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> emplace(Args&&... args) { return table_.emplace(std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE iterator emplace_hint(const_iterator, Args&&... args) { return table_.emplace(std::forward<Args>(args)...).first; } BOOST_FORCEINLINE typename table_type::erase_return_type erase( const_iterator pos) { return table_.erase(pos); } iterator erase(const_iterator first, const_iterator last) { while (first != last) { this->erase(first++); } return iterator{detail::foa::const_iterator_cast_tag{}, last}; } BOOST_FORCEINLINE size_type erase(key_type const& key) { return table_.erase(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_node_set>::value, size_type>::type erase(K const& key) { return table_.erase(key); } void swap(unordered_node_set& rhs) noexcept( noexcept(std::declval<table_type&>().swap(std::declval<table_type&>()))) { table_.swap(rhs.table_); } node_type extract(const_iterator pos) { BOOST_ASSERT(pos != end()); node_type nh; auto elem = table_.extract(pos); nh.emplace(std::move(elem), get_allocator()); return nh; } node_type extract(key_type const& key) { auto pos = find(key); return pos != end() ? extract(pos) : node_type(); } template <class K> typename std::enable_if< boost::unordered::detail::transparent_non_iterable<K, unordered_node_set>::value, node_type>::type extract(K const& key) { auto pos = find(key); return pos != end() ? extract(pos) : node_type(); } template <class H2, class P2> void merge(unordered_node_set<key_type, H2, P2, allocator_type>& source) { BOOST_ASSERT(get_allocator() == source.get_allocator()); table_.merge(source.table_); } template <class H2, class P2> void merge(unordered_node_set<key_type, H2, P2, allocator_type>&& source) { BOOST_ASSERT(get_allocator() == source.get_allocator()); table_.merge(std::move(source.table_)); } /// Lookup /// BOOST_FORCEINLINE size_type count(key_type const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type count(K const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } BOOST_FORCEINLINE iterator find(key_type const& key) { return table_.find(key); } BOOST_FORCEINLINE const_iterator find(key_type const& key) const { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, iterator>::type find(K const& key) { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, const_iterator>::type find(K const& key) const { return table_.find(key); } BOOST_FORCEINLINE bool contains(key_type const& key) const { return this->find(key) != this->end(); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, bool>::type contains(K const& key) const { return this->find(key) != this->end(); } std::pair<iterator, iterator> equal_range(key_type const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } std::pair<const_iterator, const_iterator> equal_range( key_type const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<iterator, iterator> >::type equal_range(K const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<const_iterator, const_iterator> >::type equal_range(K const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } /// Hash Policy /// size_type bucket_count() const noexcept { return table_.capacity(); } float load_factor() const noexcept { return table_.load_factor(); } float max_load_factor() const noexcept { return table_.max_load_factor(); } void max_load_factor(float) {} size_type max_load() const noexcept { return table_.max_load(); } void rehash(size_type n) { table_.rehash(n); } void reserve(size_type n) { table_.reserve(n); } /// Observers /// hasher hash_function() const { return table_.hash_function(); } key_equal key_eq() const { return table_.key_eq(); } }; template <class Key, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_node_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_node_set<Key, Hash, KeyEqual, Allocator> const& rhs) { return lhs.table_ == rhs.table_; } template <class Key, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_node_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_node_set<Key, Hash, KeyEqual, Allocator> const& rhs) { return !(lhs == rhs); } template <class Key, class Hash, class KeyEqual, class Allocator> void swap(unordered_node_set<Key, Hash, KeyEqual, Allocator>& lhs, unordered_node_set<Key, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))) { lhs.swap(rhs); } template <class Key, class Hash, class KeyEqual, class Allocator, class Pred> typename unordered_node_set<Key, Hash, KeyEqual, Allocator>::size_type erase_if(unordered_node_set<Key, Hash, KeyEqual, Allocator>& set, Pred pred) { return erase_if(set.table_, pred); } template <class Archive, class Key, class Hash, class KeyEqual, class Allocator> void serialize(Archive& ar, unordered_node_set<Key, Hash, KeyEqual, Allocator>& set, unsigned int version) { detail::serialize_container(ar, set, version); } #if defined(BOOST_MSVC) #pragma warning(pop) /* C4714 */ #endif #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES template <class InputIterator, class Hash = boost::hash<typename std::iterator_traits<InputIterator>::value_type>, class Pred = std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, class Allocator = std::allocator< typename std::iterator_traits<InputIterator>::value_type>, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set(InputIterator, InputIterator, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_node_set< typename std::iterator_traits<InputIterator>::value_type, Hash, Pred, Allocator>; template <class T, class Hash = boost::hash<T>, class Pred = std::equal_to<T>, class Allocator = std::allocator<T>, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set(std::initializer_list<T>, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_node_set<T, Hash, Pred, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set(InputIterator, InputIterator, std::size_t, Allocator) -> unordered_node_set< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class InputIterator, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set( InputIterator, InputIterator, std::size_t, Hash, Allocator) -> unordered_node_set< typename std::iterator_traits<InputIterator>::value_type, Hash, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set(std::initializer_list<T>, std::size_t, Allocator) -> unordered_node_set<T, boost::hash<T>, std::equal_to<T>, Allocator>; template <class T, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set(std::initializer_list<T>, std::size_t, Hash, Allocator) -> unordered_node_set<T, Hash, std::equal_to<T>, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set(InputIterator, InputIterator, Allocator) -> unordered_node_set< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_set(std::initializer_list<T>, Allocator) -> unordered_node_set<T, boost::hash<T>, std::equal_to<T>, Allocator>; #endif } // namespace unordered } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_flat_map.hpp
// Copyright (C) 2022-2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_UNORDERED_FLAT_MAP_HPP_INCLUDED #define BOOST_UNORDERED_UNORDERED_FLAT_MAP_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/unordered/concurrent_flat_map_fwd.hpp> #include <boost/unordered/detail/foa/flat_map_types.hpp> #include <boost/unordered/detail/foa/table.hpp> #include <boost/unordered/detail/serialize_container.hpp> #include <boost/unordered/detail/throw_exception.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/unordered_flat_map_fwd.hpp> #include <boost/core/allocator_access.hpp> #include <boost/container_hash/hash.hpp> #include <initializer_list> #include <iterator> #include <stdexcept> #include <type_traits> #include <utility> namespace boost { namespace unordered { #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4714) /* marked as __forceinline not inlined */ #endif template <class Key, class T, class Hash, class KeyEqual, class Allocator> class unordered_flat_map { template <class Key2, class T2, class Hash2, class Pred2, class Allocator2> friend class concurrent_flat_map; using map_types = detail::foa::flat_map_types<Key, T>; using table_type = detail::foa::table<map_types, Hash, KeyEqual, typename boost::allocator_rebind<Allocator, typename map_types::value_type>::type>; table_type table_; template <class K, class V, class H, class KE, class A> bool friend operator==(unordered_flat_map<K, V, H, KE, A> const& lhs, unordered_flat_map<K, V, H, KE, A> const& rhs); template <class K, class V, class H, class KE, class A, class Pred> typename unordered_flat_map<K, V, H, KE, A>::size_type friend erase_if( unordered_flat_map<K, V, H, KE, A>& set, Pred pred); public: using key_type = Key; using mapped_type = T; using value_type = typename map_types::value_type; using init_type = typename map_types::init_type; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using hasher = typename boost::unordered::detail::type_identity<Hash>::type; using key_equal = typename boost::unordered::detail::type_identity<KeyEqual>::type; using allocator_type = typename boost::unordered::detail::type_identity<Allocator>::type; using reference = value_type&; using const_reference = value_type const&; using pointer = typename boost::allocator_pointer<allocator_type>::type; using const_pointer = typename boost::allocator_const_pointer<allocator_type>::type; using iterator = typename table_type::iterator; using const_iterator = typename table_type::const_iterator; unordered_flat_map() : unordered_flat_map(0) {} explicit unordered_flat_map(size_type n, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : table_(n, h, pred, a) { } unordered_flat_map(size_type n, allocator_type const& a) : unordered_flat_map(n, hasher(), key_equal(), a) { } unordered_flat_map(size_type n, hasher const& h, allocator_type const& a) : unordered_flat_map(n, h, key_equal(), a) { } template <class InputIterator> unordered_flat_map( InputIterator f, InputIterator l, allocator_type const& a) : unordered_flat_map(f, l, size_type(0), hasher(), key_equal(), a) { } explicit unordered_flat_map(allocator_type const& a) : unordered_flat_map(0, a) { } template <class Iterator> unordered_flat_map(Iterator first, Iterator last, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_flat_map(n, h, pred, a) { this->insert(first, last); } template <class Iterator> unordered_flat_map( Iterator first, Iterator last, size_type n, allocator_type const& a) : unordered_flat_map(first, last, n, hasher(), key_equal(), a) { } template <class Iterator> unordered_flat_map(Iterator first, Iterator last, size_type n, hasher const& h, allocator_type const& a) : unordered_flat_map(first, last, n, h, key_equal(), a) { } unordered_flat_map(unordered_flat_map const& other) : table_(other.table_) { } unordered_flat_map( unordered_flat_map const& other, allocator_type const& a) : table_(other.table_, a) { } unordered_flat_map(unordered_flat_map&& other) noexcept(std::is_nothrow_move_constructible<table_type>::value) : table_(std::move(other.table_)) { } unordered_flat_map(unordered_flat_map&& other, allocator_type const& al) : table_(std::move(other.table_), al) { } unordered_flat_map(std::initializer_list<value_type> ilist, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_flat_map(ilist.begin(), ilist.end(), n, h, pred, a) { } unordered_flat_map( std::initializer_list<value_type> il, allocator_type const& a) : unordered_flat_map(il, size_type(0), hasher(), key_equal(), a) { } unordered_flat_map(std::initializer_list<value_type> init, size_type n, allocator_type const& a) : unordered_flat_map(init, n, hasher(), key_equal(), a) { } unordered_flat_map(std::initializer_list<value_type> init, size_type n, hasher const& h, allocator_type const& a) : unordered_flat_map(init, n, h, key_equal(), a) { } unordered_flat_map( concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator>&& other) : table_(std::move(other.table_)) { } ~unordered_flat_map() = default; unordered_flat_map& operator=(unordered_flat_map const& other) { table_ = other.table_; return *this; } unordered_flat_map& operator=(unordered_flat_map&& other) noexcept( noexcept(std::declval<table_type&>() = std::declval<table_type&&>())) { table_ = std::move(other.table_); return *this; } allocator_type get_allocator() const noexcept { return table_.get_allocator(); } /// Iterators /// iterator begin() noexcept { return table_.begin(); } const_iterator begin() const noexcept { return table_.begin(); } const_iterator cbegin() const noexcept { return table_.cbegin(); } iterator end() noexcept { return table_.end(); } const_iterator end() const noexcept { return table_.end(); } const_iterator cend() const noexcept { return table_.cend(); } /// Capacity /// BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return table_.empty(); } size_type size() const noexcept { return table_.size(); } size_type max_size() const noexcept { return table_.max_size(); } /// Modifiers /// void clear() noexcept { table_.clear(); } template <class Ty> BOOST_FORCEINLINE auto insert(Ty&& value) -> decltype(table_.insert(std::forward<Ty>(value))) { return table_.insert(std::forward<Ty>(value)); } BOOST_FORCEINLINE std::pair<iterator, bool> insert(init_type&& value) { return table_.insert(std::move(value)); } template <class Ty> BOOST_FORCEINLINE auto insert(const_iterator, Ty&& value) -> decltype(table_.insert(std::forward<Ty>(value)).first) { return table_.insert(std::forward<Ty>(value)).first; } BOOST_FORCEINLINE iterator insert(const_iterator, init_type&& value) { return table_.insert(std::move(value)).first; } template <class InputIterator> BOOST_FORCEINLINE void insert(InputIterator first, InputIterator last) { for (auto pos = first; pos != last; ++pos) { table_.emplace(*pos); } } void insert(std::initializer_list<value_type> ilist) { this->insert(ilist.begin(), ilist.end()); } template <class M> std::pair<iterator, bool> insert_or_assign(key_type const& key, M&& obj) { auto ibp = table_.try_emplace(key, std::forward<M>(obj)); if (ibp.second) { return ibp; } ibp.first->second = std::forward<M>(obj); return ibp; } template <class M> std::pair<iterator, bool> insert_or_assign(key_type&& key, M&& obj) { auto ibp = table_.try_emplace(std::move(key), std::forward<M>(obj)); if (ibp.second) { return ibp; } ibp.first->second = std::forward<M>(obj); return ibp; } template <class K, class M> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, std::pair<iterator, bool> >::type insert_or_assign(K&& k, M&& obj) { auto ibp = table_.try_emplace(std::forward<K>(k), std::forward<M>(obj)); if (ibp.second) { return ibp; } ibp.first->second = std::forward<M>(obj); return ibp; } template <class M> iterator insert_or_assign(const_iterator, key_type const& key, M&& obj) { return this->insert_or_assign(key, std::forward<M>(obj)).first; } template <class M> iterator insert_or_assign(const_iterator, key_type&& key, M&& obj) { return this->insert_or_assign(std::move(key), std::forward<M>(obj)) .first; } template <class K, class M> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, iterator>::type insert_or_assign(const_iterator, K&& k, M&& obj) { return this->insert_or_assign(std::forward<K>(k), std::forward<M>(obj)) .first; } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> emplace(Args&&... args) { return table_.emplace(std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE iterator emplace_hint(const_iterator, Args&&... args) { return table_.emplace(std::forward<Args>(args)...).first; } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> try_emplace( key_type const& key, Args&&... args) { return table_.try_emplace(key, std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> try_emplace( key_type&& key, Args&&... args) { return table_.try_emplace(std::move(key), std::forward<Args>(args)...); } template <class K, class... Args> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::transparent_non_iterable<K, unordered_flat_map>::value, std::pair<iterator, bool> >::type try_emplace(K&& key, Args&&... args) { return table_.try_emplace( std::forward<K>(key), std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE iterator try_emplace( const_iterator, key_type const& key, Args&&... args) { return table_.try_emplace(key, std::forward<Args>(args)...).first; } template <class... Args> BOOST_FORCEINLINE iterator try_emplace( const_iterator, key_type&& key, Args&&... args) { return table_.try_emplace(std::move(key), std::forward<Args>(args)...) .first; } template <class K, class... Args> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::transparent_non_iterable<K, unordered_flat_map>::value, iterator>::type try_emplace(const_iterator, K&& key, Args&&... args) { return table_ .try_emplace(std::forward<K>(key), std::forward<Args>(args)...) .first; } BOOST_FORCEINLINE typename table_type::erase_return_type erase( iterator pos) { return table_.erase(pos); } BOOST_FORCEINLINE typename table_type::erase_return_type erase( const_iterator pos) { return table_.erase(pos); } iterator erase(const_iterator first, const_iterator last) { while (first != last) { this->erase(first++); } return iterator{detail::foa::const_iterator_cast_tag{}, last}; } BOOST_FORCEINLINE size_type erase(key_type const& key) { return table_.erase(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_flat_map>::value, size_type>::type erase(K const& key) { return table_.erase(key); } void swap(unordered_flat_map& rhs) noexcept( noexcept(std::declval<table_type&>().swap(std::declval<table_type&>()))) { table_.swap(rhs.table_); } template <class H2, class P2> void merge( unordered_flat_map<key_type, mapped_type, H2, P2, allocator_type>& source) { table_.merge(source.table_); } template <class H2, class P2> void merge( unordered_flat_map<key_type, mapped_type, H2, P2, allocator_type>&& source) { table_.merge(std::move(source.table_)); } /// Lookup /// mapped_type& at(key_type const& key) { auto pos = table_.find(key); if (pos != table_.end()) { return pos->second; } // TODO: someday refactor this to conditionally serialize the key and // include it in the error message // boost::unordered::detail::throw_out_of_range( "key was not found in unordered_flat_map"); } mapped_type const& at(key_type const& key) const { auto pos = table_.find(key); if (pos != table_.end()) { return pos->second; } boost::unordered::detail::throw_out_of_range( "key was not found in unordered_flat_map"); } template <class K> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, mapped_type&>::type at(K&& key) { auto pos = table_.find(std::forward<K>(key)); if (pos != table_.end()) { return pos->second; } boost::unordered::detail::throw_out_of_range( "key was not found in unordered_flat_map"); } template <class K> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, mapped_type const&>::type at(K&& key) const { auto pos = table_.find(std::forward<K>(key)); if (pos != table_.end()) { return pos->second; } boost::unordered::detail::throw_out_of_range( "key was not found in unordered_flat_map"); } BOOST_FORCEINLINE mapped_type& operator[](key_type const& key) { return table_.try_emplace(key).first->second; } BOOST_FORCEINLINE mapped_type& operator[](key_type&& key) { return table_.try_emplace(std::move(key)).first->second; } template <class K> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, mapped_type&>::type operator[](K&& key) { return table_.try_emplace(std::forward<K>(key)).first->second; } BOOST_FORCEINLINE size_type count(key_type const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type count(K const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } BOOST_FORCEINLINE iterator find(key_type const& key) { return table_.find(key); } BOOST_FORCEINLINE const_iterator find(key_type const& key) const { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, iterator>::type find(K const& key) { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, const_iterator>::type find(K const& key) const { return table_.find(key); } BOOST_FORCEINLINE bool contains(key_type const& key) const { return this->find(key) != this->end(); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, bool>::type contains(K const& key) const { return this->find(key) != this->end(); } std::pair<iterator, iterator> equal_range(key_type const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } std::pair<const_iterator, const_iterator> equal_range( key_type const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<iterator, iterator> >::type equal_range(K const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<const_iterator, const_iterator> >::type equal_range(K const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } /// Hash Policy /// size_type bucket_count() const noexcept { return table_.capacity(); } float load_factor() const noexcept { return table_.load_factor(); } float max_load_factor() const noexcept { return table_.max_load_factor(); } void max_load_factor(float) {} size_type max_load() const noexcept { return table_.max_load(); } void rehash(size_type n) { table_.rehash(n); } void reserve(size_type n) { table_.reserve(n); } /// Observers /// hasher hash_function() const { return table_.hash_function(); } key_equal key_eq() const { return table_.key_eq(); } }; template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs) { return lhs.table_ == rhs.table_; } template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs) { return !(lhs == rhs); } template <class Key, class T, class Hash, class KeyEqual, class Allocator> void swap(unordered_flat_map<Key, T, Hash, KeyEqual, Allocator>& lhs, unordered_flat_map<Key, T, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))) { lhs.swap(rhs); } template <class Key, class T, class Hash, class KeyEqual, class Allocator, class Pred> typename unordered_flat_map<Key, T, Hash, KeyEqual, Allocator>::size_type erase_if( unordered_flat_map<Key, T, Hash, KeyEqual, Allocator>& map, Pred pred) { return erase_if(map.table_, pred); } template <class Archive, class Key, class T, class Hash, class KeyEqual, class Allocator> void serialize(Archive& ar, unordered_flat_map<Key, T, Hash, KeyEqual, Allocator>& map, unsigned int version) { detail::serialize_container(ar, map, version); } #if defined(BOOST_MSVC) #pragma warning(pop) /* C4714 */ #endif #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES template <class InputIterator, class Hash = boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >, class Pred = std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, class Allocator = std::allocator< boost::unordered::detail::iter_to_alloc_t<InputIterator> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map(InputIterator, InputIterator, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_flat_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, Hash, Pred, Allocator>; template <class Key, class T, class Hash = boost::hash<std::remove_const_t<Key> >, class Pred = std::equal_to<std::remove_const_t<Key> >, class Allocator = std::allocator<std::pair<const Key, T> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map(std::initializer_list<std::pair<Key, T> >, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_flat_map<std::remove_const_t<Key>, T, Hash, Pred, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map(InputIterator, InputIterator, std::size_t, Allocator) -> unordered_flat_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >, std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map(InputIterator, InputIterator, Allocator) -> unordered_flat_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >, std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, Allocator>; template <class InputIterator, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map( InputIterator, InputIterator, std::size_t, Hash, Allocator) -> unordered_flat_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, Hash, std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, Allocator>; template <class Key, class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map(std::initializer_list<std::pair<Key, T> >, std::size_t, Allocator) -> unordered_flat_map<std::remove_const_t<Key>, T, boost::hash<std::remove_const_t<Key> >, std::equal_to<std::remove_const_t<Key> >, Allocator>; template <class Key, class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map(std::initializer_list<std::pair<Key, T> >, Allocator) -> unordered_flat_map<std::remove_const_t<Key>, T, boost::hash<std::remove_const_t<Key> >, std::equal_to<std::remove_const_t<Key> >, Allocator>; template <class Key, class T, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_map(std::initializer_list<std::pair<Key, T> >, std::size_t, Hash, Allocator) -> unordered_flat_map<std::remove_const_t<Key>, T, Hash, std::equal_to<std::remove_const_t<Key> >, Allocator>; #endif } // namespace unordered } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/concurrent_flat_set.hpp
/* Fast open-addressing concurrent hashset. * * Copyright 2023 Christian Mazakas. * Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_CONCURRENT_FLAT_SET_HPP #define BOOST_UNORDERED_CONCURRENT_FLAT_SET_HPP #include <boost/unordered/concurrent_flat_set_fwd.hpp> #include <boost/unordered/detail/concurrent_static_asserts.hpp> #include <boost/unordered/detail/foa/concurrent_table.hpp> #include <boost/unordered/detail/foa/flat_set_types.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/unordered_flat_set_fwd.hpp> #include <boost/container_hash/hash.hpp> #include <boost/core/allocator_access.hpp> #include <boost/core/serialization.hpp> #include <utility> namespace boost { namespace unordered { template <class Key, class Hash, class Pred, class Allocator> class concurrent_flat_set { private: template <class Key2, class Hash2, class Pred2, class Allocator2> friend class concurrent_flat_set; template <class Key2, class Hash2, class Pred2, class Allocator2> friend class unordered_flat_set; using type_policy = detail::foa::flat_set_types<Key>; using table_type = detail::foa::concurrent_table<type_policy, Hash, Pred, Allocator>; table_type table_; template <class K, class H, class KE, class A> bool friend operator==(concurrent_flat_set<K, H, KE, A> const& lhs, concurrent_flat_set<K, H, KE, A> const& rhs); template <class K, class H, class KE, class A, class Predicate> friend typename concurrent_flat_set<K, H, KE, A>::size_type erase_if( concurrent_flat_set<K, H, KE, A>& set, Predicate pred); template<class Archive, class K, class H, class KE, class A> friend void serialize( Archive& ar, concurrent_flat_set<K, H, KE, A>& c, unsigned int version); public: using key_type = Key; using value_type = typename type_policy::value_type; using init_type = typename type_policy::init_type; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using hasher = typename boost::unordered::detail::type_identity<Hash>::type; using key_equal = typename boost::unordered::detail::type_identity<Pred>::type; using allocator_type = typename boost::unordered::detail::type_identity<Allocator>::type; using reference = value_type&; using const_reference = value_type const&; using pointer = typename boost::allocator_pointer<allocator_type>::type; using const_pointer = typename boost::allocator_const_pointer<allocator_type>::type; static constexpr size_type bulk_visit_size = table_type::bulk_visit_size; concurrent_flat_set() : concurrent_flat_set(detail::foa::default_bucket_count) { } explicit concurrent_flat_set(size_type n, const hasher& hf = hasher(), const key_equal& eql = key_equal(), const allocator_type& a = allocator_type()) : table_(n, hf, eql, a) { } template <class InputIterator> concurrent_flat_set(InputIterator f, InputIterator l, size_type n = detail::foa::default_bucket_count, const hasher& hf = hasher(), const key_equal& eql = key_equal(), const allocator_type& a = allocator_type()) : table_(n, hf, eql, a) { this->insert(f, l); } concurrent_flat_set(concurrent_flat_set const& rhs) : table_(rhs.table_, boost::allocator_select_on_container_copy_construction( rhs.get_allocator())) { } concurrent_flat_set(concurrent_flat_set&& rhs) : table_(std::move(rhs.table_)) { } template <class InputIterator> concurrent_flat_set( InputIterator f, InputIterator l, allocator_type const& a) : concurrent_flat_set(f, l, 0, hasher(), key_equal(), a) { } explicit concurrent_flat_set(allocator_type const& a) : table_(detail::foa::default_bucket_count, hasher(), key_equal(), a) { } concurrent_flat_set( concurrent_flat_set const& rhs, allocator_type const& a) : table_(rhs.table_, a) { } concurrent_flat_set(concurrent_flat_set&& rhs, allocator_type const& a) : table_(std::move(rhs.table_), a) { } concurrent_flat_set(std::initializer_list<value_type> il, size_type n = detail::foa::default_bucket_count, const hasher& hf = hasher(), const key_equal& eql = key_equal(), const allocator_type& a = allocator_type()) : concurrent_flat_set(n, hf, eql, a) { this->insert(il.begin(), il.end()); } concurrent_flat_set(size_type n, const allocator_type& a) : concurrent_flat_set(n, hasher(), key_equal(), a) { } concurrent_flat_set( size_type n, const hasher& hf, const allocator_type& a) : concurrent_flat_set(n, hf, key_equal(), a) { } template <typename InputIterator> concurrent_flat_set( InputIterator f, InputIterator l, size_type n, const allocator_type& a) : concurrent_flat_set(f, l, n, hasher(), key_equal(), a) { } template <typename InputIterator> concurrent_flat_set(InputIterator f, InputIterator l, size_type n, const hasher& hf, const allocator_type& a) : concurrent_flat_set(f, l, n, hf, key_equal(), a) { } concurrent_flat_set( std::initializer_list<value_type> il, const allocator_type& a) : concurrent_flat_set( il, detail::foa::default_bucket_count, hasher(), key_equal(), a) { } concurrent_flat_set(std::initializer_list<value_type> il, size_type n, const allocator_type& a) : concurrent_flat_set(il, n, hasher(), key_equal(), a) { } concurrent_flat_set(std::initializer_list<value_type> il, size_type n, const hasher& hf, const allocator_type& a) : concurrent_flat_set(il, n, hf, key_equal(), a) { } concurrent_flat_set( unordered_flat_set<Key, Hash, Pred, Allocator>&& other) : table_(std::move(other.table_)) { } ~concurrent_flat_set() = default; concurrent_flat_set& operator=(concurrent_flat_set const& rhs) { table_ = rhs.table_; return *this; } concurrent_flat_set& operator=(concurrent_flat_set&& rhs) noexcept(boost::allocator_is_always_equal<Allocator>::type::value || boost::allocator_propagate_on_container_move_assignment< Allocator>::type::value) { table_ = std::move(rhs.table_); return *this; } concurrent_flat_set& operator=(std::initializer_list<value_type> ilist) { table_ = ilist; return *this; } /// Capacity /// size_type size() const noexcept { return table_.size(); } size_type max_size() const noexcept { return table_.max_size(); } BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return size() == 0; } template <class F> BOOST_FORCEINLINE size_type visit(key_type const& k, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit(k, f); } template <class F> BOOST_FORCEINLINE size_type cvisit(key_type const& k, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit(k, f); } template <class K, class F> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type visit(K&& k, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit(std::forward<K>(k), f); } template <class K, class F> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type cvisit(K&& k, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit(std::forward<K>(k), f); } template<class FwdIterator, class F> BOOST_FORCEINLINE size_t visit(FwdIterator first, FwdIterator last, F f) const { BOOST_UNORDERED_STATIC_ASSERT_BULK_VISIT_ITERATOR(FwdIterator) BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit(first, last, f); } template<class FwdIterator, class F> BOOST_FORCEINLINE size_t cvisit(FwdIterator first, FwdIterator last, F f) const { BOOST_UNORDERED_STATIC_ASSERT_BULK_VISIT_ITERATOR(FwdIterator) BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit(first, last, f); } template <class F> size_type visit_all(F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit_all(f); } template <class F> size_type cvisit_all(F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.cvisit_all(f); } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template <class ExecPolicy, class F> typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value, void>::type visit_all(ExecPolicy&& p, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) table_.visit_all(p, f); } template <class ExecPolicy, class F> typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value, void>::type cvisit_all(ExecPolicy&& p, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) table_.cvisit_all(p, f); } #endif template <class F> bool visit_while(F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.visit_while(f); } template <class F> bool cvisit_while(F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.cvisit_while(f); } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template <class ExecPolicy, class F> typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value, bool>::type visit_while(ExecPolicy&& p, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) return table_.visit_while(p, f); } template <class ExecPolicy, class F> typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value, bool>::type cvisit_while(ExecPolicy&& p, F f) const { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) return table_.cvisit_while(p, f); } #endif /// Modifiers /// BOOST_FORCEINLINE bool insert(value_type const& obj) { return table_.insert(obj); } BOOST_FORCEINLINE bool insert(value_type&& obj) { return table_.insert(std::move(obj)); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, bool >::type insert(K&& k) { return table_.try_emplace(std::forward<K>(k)); } template <class InputIterator> void insert(InputIterator begin, InputIterator end) { for (auto pos = begin; pos != end; ++pos) { table_.emplace(*pos); } } void insert(std::initializer_list<value_type> ilist) { this->insert(ilist.begin(), ilist.end()); } template <class F> BOOST_FORCEINLINE bool insert_or_visit(value_type const& obj, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.insert_or_cvisit(obj, f); } template <class F> BOOST_FORCEINLINE bool insert_or_visit(value_type&& obj, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.insert_or_cvisit(std::move(obj), f); } template <class K, class F> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, bool >::type insert_or_visit(K&& k, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.try_emplace_or_cvisit(std::forward<K>(k), f); } template <class InputIterator, class F> void insert_or_visit(InputIterator first, InputIterator last, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) for (; first != last; ++first) { table_.emplace_or_cvisit(*first, f); } } template <class F> void insert_or_visit(std::initializer_list<value_type> ilist, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) this->insert_or_cvisit(ilist.begin(), ilist.end(), f); } template <class F> BOOST_FORCEINLINE bool insert_or_cvisit(value_type const& obj, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.insert_or_cvisit(obj, f); } template <class F> BOOST_FORCEINLINE bool insert_or_cvisit(value_type&& obj, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.insert_or_cvisit(std::move(obj), f); } template <class K, class F> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, bool >::type insert_or_cvisit(K&& k, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) return table_.try_emplace_or_cvisit(std::forward<K>(k), f); } template <class InputIterator, class F> void insert_or_cvisit(InputIterator first, InputIterator last, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) for (; first != last; ++first) { table_.emplace_or_cvisit(*first, f); } } template <class F> void insert_or_cvisit(std::initializer_list<value_type> ilist, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) this->insert_or_cvisit(ilist.begin(), ilist.end(), f); } template <class... Args> BOOST_FORCEINLINE bool emplace(Args&&... args) { return table_.emplace(std::forward<Args>(args)...); } template <class Arg, class... Args> BOOST_FORCEINLINE bool emplace_or_visit(Arg&& arg, Args&&... args) { BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...) return table_.emplace_or_cvisit( std::forward<Arg>(arg), std::forward<Args>(args)...); } template <class Arg, class... Args> BOOST_FORCEINLINE bool emplace_or_cvisit(Arg&& arg, Args&&... args) { BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...) return table_.emplace_or_cvisit( std::forward<Arg>(arg), std::forward<Args>(args)...); } BOOST_FORCEINLINE size_type erase(key_type const& k) { return table_.erase(k); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type erase(K&& k) { return table_.erase(std::forward<K>(k)); } template <class F> BOOST_FORCEINLINE size_type erase_if(key_type const& k, F f) { return table_.erase_if(k, f); } template <class K, class F> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value && !detail::is_execution_policy<K>::value, size_type>::type erase_if(K&& k, F f) { return table_.erase_if(std::forward<K>(k), f); } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template <class ExecPolicy, class F> typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value, void>::type erase_if(ExecPolicy&& p, F f) { BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) table_.erase_if(p, f); } #endif template <class F> size_type erase_if(F f) { return table_.erase_if(f); } void swap(concurrent_flat_set& other) noexcept( boost::allocator_is_always_equal<Allocator>::type::value || boost::allocator_propagate_on_container_swap<Allocator>::type::value) { return table_.swap(other.table_); } void clear() noexcept { table_.clear(); } template <typename H2, typename P2> size_type merge(concurrent_flat_set<Key, H2, P2, Allocator>& x) { BOOST_ASSERT(get_allocator() == x.get_allocator()); return table_.merge(x.table_); } template <typename H2, typename P2> size_type merge(concurrent_flat_set<Key, H2, P2, Allocator>&& x) { return merge(x); } BOOST_FORCEINLINE size_type count(key_type const& k) const { return table_.count(k); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type count(K const& k) { return table_.count(k); } BOOST_FORCEINLINE bool contains(key_type const& k) const { return table_.contains(k); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, bool>::type contains(K const& k) const { return table_.contains(k); } /// Hash Policy /// size_type bucket_count() const noexcept { return table_.capacity(); } float load_factor() const noexcept { return table_.load_factor(); } float max_load_factor() const noexcept { return table_.max_load_factor(); } void max_load_factor(float) {} size_type max_load() const noexcept { return table_.max_load(); } void rehash(size_type n) { table_.rehash(n); } void reserve(size_type n) { table_.reserve(n); } /// Observers /// allocator_type get_allocator() const noexcept { return table_.get_allocator(); } hasher hash_function() const { return table_.hash_function(); } key_equal key_eq() const { return table_.key_eq(); } }; template <class Key, class Hash, class KeyEqual, class Allocator> bool operator==( concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs) { return lhs.table_ == rhs.table_; } template <class Key, class Hash, class KeyEqual, class Allocator> bool operator!=( concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs) { return !(lhs == rhs); } template <class Key, class Hash, class Pred, class Alloc> void swap(concurrent_flat_set<Key, Hash, Pred, Alloc>& x, concurrent_flat_set<Key, Hash, Pred, Alloc>& y) noexcept(noexcept(x.swap(y))) { x.swap(y); } template <class K, class H, class P, class A, class Predicate> typename concurrent_flat_set<K, H, P, A>::size_type erase_if( concurrent_flat_set<K, H, P, A>& c, Predicate pred) { return c.table_.erase_if(pred); } template<class Archive, class K, class H, class KE, class A> void serialize( Archive& ar, concurrent_flat_set<K, H, KE, A>& c, unsigned int) { ar & core::make_nvp("table",c.table_); } #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES template <class InputIterator, class Hash = boost::hash<typename std::iterator_traits<InputIterator>::value_type>, class Pred = std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, class Allocator = std::allocator< typename std::iterator_traits<InputIterator>::value_type>, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set(InputIterator, InputIterator, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> concurrent_flat_set< typename std::iterator_traits<InputIterator>::value_type, Hash, Pred, Allocator>; template <class T, class Hash = boost::hash<T>, class Pred = std::equal_to<T>, class Allocator = std::allocator<T>, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set(std::initializer_list<T>, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> concurrent_flat_set< T, Hash, Pred, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set(InputIterator, InputIterator, std::size_t, Allocator) -> concurrent_flat_set< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set(InputIterator, InputIterator, Allocator) -> concurrent_flat_set< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class InputIterator, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set( InputIterator, InputIterator, std::size_t, Hash, Allocator) -> concurrent_flat_set< typename std::iterator_traits<InputIterator>::value_type, Hash, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set(std::initializer_list<T>, std::size_t, Allocator) -> concurrent_flat_set<T, boost::hash<T>,std::equal_to<T>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set(std::initializer_list<T >, Allocator) -> concurrent_flat_set<T, boost::hash<T>, std::equal_to<T>, Allocator>; template <class T, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > concurrent_flat_set(std::initializer_list<T >, std::size_t,Hash, Allocator) -> concurrent_flat_set<T, Hash, std::equal_to<T>, Allocator>; #endif } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_CONCURRENT_FLAT_SET_HPP
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_node_set_fwd.hpp
// Copyright (C) 2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_NODE_SET_FWD_HPP_INCLUDED #define BOOST_UNORDERED_NODE_SET_FWD_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/container_hash/hash_fwd.hpp> #include <functional> #include <memory> namespace boost { namespace unordered { template <class Key, class Hash = boost::hash<Key>, class KeyEqual = std::equal_to<Key>, class Allocator = std::allocator<Key> > class unordered_node_set; template <class Key, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_node_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_node_set<Key, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_node_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_node_set<Key, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class Hash, class KeyEqual, class Allocator> void swap(unordered_node_set<Key, Hash, KeyEqual, Allocator>& lhs, unordered_node_set<Key, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))); } // namespace unordered using boost::unordered::unordered_node_set; } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_node_map.hpp
// Copyright (C) 2022-2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_UNORDERED_NODE_MAP_HPP_INCLUDED #define BOOST_UNORDERED_UNORDERED_NODE_MAP_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/unordered/detail/foa/element_type.hpp> #include <boost/unordered/detail/foa/node_handle.hpp> #include <boost/unordered/detail/foa/node_map_types.hpp> #include <boost/unordered/detail/foa/table.hpp> #include <boost/unordered/detail/serialize_container.hpp> #include <boost/unordered/detail/throw_exception.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/unordered_node_map_fwd.hpp> #include <boost/core/allocator_access.hpp> #include <boost/container_hash/hash.hpp> #include <initializer_list> #include <iterator> #include <stdexcept> #include <type_traits> #include <utility> namespace boost { namespace unordered { #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4714) /* marked as __forceinline not inlined */ #endif namespace detail { template <class TypePolicy, class Allocator> struct node_map_handle : public detail::foa::node_handle_base<TypePolicy, Allocator> { private: using base_type = detail::foa::node_handle_base<TypePolicy, Allocator>; using typename base_type::type_policy; template <class Key, class T, class Hash, class Pred, class Alloc> friend class boost::unordered::unordered_node_map; public: using key_type = typename TypePolicy::key_type; using mapped_type = typename TypePolicy::mapped_type; constexpr node_map_handle() noexcept = default; node_map_handle(node_map_handle&& nh) noexcept = default; node_map_handle& operator=(node_map_handle&&) noexcept = default; key_type& key() const { BOOST_ASSERT(!this->empty()); return const_cast<key_type&>(this->data().first); } mapped_type& mapped() const { BOOST_ASSERT(!this->empty()); return const_cast<mapped_type&>(this->data().second); } }; } // namespace detail template <class Key, class T, class Hash, class KeyEqual, class Allocator> class unordered_node_map { using map_types = detail::foa::node_map_types<Key, T, typename boost::allocator_void_pointer<Allocator>::type>; using table_type = detail::foa::table<map_types, Hash, KeyEqual, typename boost::allocator_rebind<Allocator, std::pair<Key const, T> >::type>; table_type table_; template <class K, class V, class H, class KE, class A> bool friend operator==(unordered_node_map<K, V, H, KE, A> const& lhs, unordered_node_map<K, V, H, KE, A> const& rhs); template <class K, class V, class H, class KE, class A, class Pred> typename unordered_node_map<K, V, H, KE, A>::size_type friend erase_if( unordered_node_map<K, V, H, KE, A>& set, Pred pred); public: using key_type = Key; using mapped_type = T; using value_type = typename map_types::value_type; using init_type = typename map_types::init_type; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using hasher = typename boost::unordered::detail::type_identity<Hash>::type; using key_equal = typename boost::unordered::detail::type_identity<KeyEqual>::type; using allocator_type = typename boost::unordered::detail::type_identity<Allocator>::type; using reference = value_type&; using const_reference = value_type const&; using pointer = typename boost::allocator_pointer<allocator_type>::type; using const_pointer = typename boost::allocator_const_pointer<allocator_type>::type; using iterator = typename table_type::iterator; using const_iterator = typename table_type::const_iterator; using node_type = detail::node_map_handle<map_types, typename boost::allocator_rebind<Allocator, typename map_types::value_type>::type>; using insert_return_type = detail::foa::insert_return_type<iterator, node_type>; unordered_node_map() : unordered_node_map(0) {} explicit unordered_node_map(size_type n, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : table_(n, h, pred, a) { } unordered_node_map(size_type n, allocator_type const& a) : unordered_node_map(n, hasher(), key_equal(), a) { } unordered_node_map(size_type n, hasher const& h, allocator_type const& a) : unordered_node_map(n, h, key_equal(), a) { } template <class InputIterator> unordered_node_map( InputIterator f, InputIterator l, allocator_type const& a) : unordered_node_map(f, l, size_type(0), hasher(), key_equal(), a) { } explicit unordered_node_map(allocator_type const& a) : unordered_node_map(0, a) { } template <class Iterator> unordered_node_map(Iterator first, Iterator last, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_node_map(n, h, pred, a) { this->insert(first, last); } template <class Iterator> unordered_node_map( Iterator first, Iterator last, size_type n, allocator_type const& a) : unordered_node_map(first, last, n, hasher(), key_equal(), a) { } template <class Iterator> unordered_node_map(Iterator first, Iterator last, size_type n, hasher const& h, allocator_type const& a) : unordered_node_map(first, last, n, h, key_equal(), a) { } unordered_node_map(unordered_node_map const& other) : table_(other.table_) { } unordered_node_map( unordered_node_map const& other, allocator_type const& a) : table_(other.table_, a) { } unordered_node_map(unordered_node_map&& other) noexcept(std::is_nothrow_move_constructible<table_type>::value) : table_(std::move(other.table_)) { } unordered_node_map(unordered_node_map&& other, allocator_type const& al) : table_(std::move(other.table_), al) { } unordered_node_map(std::initializer_list<value_type> ilist, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_node_map(ilist.begin(), ilist.end(), n, h, pred, a) { } unordered_node_map( std::initializer_list<value_type> il, allocator_type const& a) : unordered_node_map(il, size_type(0), hasher(), key_equal(), a) { } unordered_node_map(std::initializer_list<value_type> init, size_type n, allocator_type const& a) : unordered_node_map(init, n, hasher(), key_equal(), a) { } unordered_node_map(std::initializer_list<value_type> init, size_type n, hasher const& h, allocator_type const& a) : unordered_node_map(init, n, h, key_equal(), a) { } ~unordered_node_map() = default; unordered_node_map& operator=(unordered_node_map const& other) { table_ = other.table_; return *this; } unordered_node_map& operator=(unordered_node_map&& other) noexcept( noexcept(std::declval<table_type&>() = std::declval<table_type&&>())) { table_ = std::move(other.table_); return *this; } allocator_type get_allocator() const noexcept { return table_.get_allocator(); } /// Iterators /// iterator begin() noexcept { return table_.begin(); } const_iterator begin() const noexcept { return table_.begin(); } const_iterator cbegin() const noexcept { return table_.cbegin(); } iterator end() noexcept { return table_.end(); } const_iterator end() const noexcept { return table_.end(); } const_iterator cend() const noexcept { return table_.cend(); } /// Capacity /// BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return table_.empty(); } size_type size() const noexcept { return table_.size(); } size_type max_size() const noexcept { return table_.max_size(); } /// Modifiers /// void clear() noexcept { table_.clear(); } template <class Ty> BOOST_FORCEINLINE auto insert(Ty&& value) -> decltype(table_.insert(std::forward<Ty>(value))) { return table_.insert(std::forward<Ty>(value)); } BOOST_FORCEINLINE std::pair<iterator, bool> insert(init_type&& value) { return table_.insert(std::move(value)); } template <class Ty> BOOST_FORCEINLINE auto insert(const_iterator, Ty&& value) -> decltype(table_.insert(std::forward<Ty>(value)).first) { return table_.insert(std::forward<Ty>(value)).first; } BOOST_FORCEINLINE iterator insert(const_iterator, init_type&& value) { return table_.insert(std::move(value)).first; } template <class InputIterator> BOOST_FORCEINLINE void insert(InputIterator first, InputIterator last) { for (auto pos = first; pos != last; ++pos) { table_.emplace(*pos); } } void insert(std::initializer_list<value_type> ilist) { this->insert(ilist.begin(), ilist.end()); } insert_return_type insert(node_type&& nh) { if (nh.empty()) { return {end(), false, node_type{}}; } BOOST_ASSERT(get_allocator() == nh.get_allocator()); auto itp = table_.insert(std::move(nh.element())); if (itp.second) { nh.reset(); return {itp.first, true, node_type{}}; } else { return {itp.first, false, std::move(nh)}; } } iterator insert(const_iterator, node_type&& nh) { if (nh.empty()) { return end(); } BOOST_ASSERT(get_allocator() == nh.get_allocator()); auto itp = table_.insert(std::move(nh.element())); if (itp.second) { nh.reset(); return itp.first; } else { return itp.first; } } template <class M> std::pair<iterator, bool> insert_or_assign(key_type const& key, M&& obj) { auto ibp = table_.try_emplace(key, std::forward<M>(obj)); if (ibp.second) { return ibp; } ibp.first->second = std::forward<M>(obj); return ibp; } template <class M> std::pair<iterator, bool> insert_or_assign(key_type&& key, M&& obj) { auto ibp = table_.try_emplace(std::move(key), std::forward<M>(obj)); if (ibp.second) { return ibp; } ibp.first->second = std::forward<M>(obj); return ibp; } template <class K, class M> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, std::pair<iterator, bool> >::type insert_or_assign(K&& k, M&& obj) { auto ibp = table_.try_emplace(std::forward<K>(k), std::forward<M>(obj)); if (ibp.second) { return ibp; } ibp.first->second = std::forward<M>(obj); return ibp; } template <class M> iterator insert_or_assign(const_iterator, key_type const& key, M&& obj) { return this->insert_or_assign(key, std::forward<M>(obj)).first; } template <class M> iterator insert_or_assign(const_iterator, key_type&& key, M&& obj) { return this->insert_or_assign(std::move(key), std::forward<M>(obj)) .first; } template <class K, class M> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, iterator>::type insert_or_assign(const_iterator, K&& k, M&& obj) { return this->insert_or_assign(std::forward<K>(k), std::forward<M>(obj)) .first; } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> emplace(Args&&... args) { return table_.emplace(std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE iterator emplace_hint(const_iterator, Args&&... args) { return table_.emplace(std::forward<Args>(args)...).first; } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> try_emplace( key_type const& key, Args&&... args) { return table_.try_emplace(key, std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> try_emplace( key_type&& key, Args&&... args) { return table_.try_emplace(std::move(key), std::forward<Args>(args)...); } template <class K, class... Args> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::transparent_non_iterable<K, unordered_node_map>::value, std::pair<iterator, bool> >::type try_emplace(K&& key, Args&&... args) { return table_.try_emplace( std::forward<K>(key), std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE iterator try_emplace( const_iterator, key_type const& key, Args&&... args) { return table_.try_emplace(key, std::forward<Args>(args)...).first; } template <class... Args> BOOST_FORCEINLINE iterator try_emplace( const_iterator, key_type&& key, Args&&... args) { return table_.try_emplace(std::move(key), std::forward<Args>(args)...) .first; } template <class K, class... Args> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::transparent_non_iterable<K, unordered_node_map>::value, iterator>::type try_emplace(const_iterator, K&& key, Args&&... args) { return table_ .try_emplace(std::forward<K>(key), std::forward<Args>(args)...) .first; } BOOST_FORCEINLINE typename table_type::erase_return_type erase( iterator pos) { return table_.erase(pos); } BOOST_FORCEINLINE typename table_type::erase_return_type erase( const_iterator pos) { return table_.erase(pos); } iterator erase(const_iterator first, const_iterator last) { while (first != last) { this->erase(first++); } return iterator{detail::foa::const_iterator_cast_tag{}, last}; } BOOST_FORCEINLINE size_type erase(key_type const& key) { return table_.erase(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_node_map>::value, size_type>::type erase(K const& key) { return table_.erase(key); } void swap(unordered_node_map& rhs) noexcept( noexcept(std::declval<table_type&>().swap(std::declval<table_type&>()))) { table_.swap(rhs.table_); } node_type extract(const_iterator pos) { BOOST_ASSERT(pos != end()); node_type nh; auto elem = table_.extract(pos); nh.emplace(std::move(elem), get_allocator()); return nh; } node_type extract(key_type const& key) { auto pos = find(key); return pos != end() ? extract(pos) : node_type(); } template <class K> typename std::enable_if< boost::unordered::detail::transparent_non_iterable<K, unordered_node_map>::value, node_type>::type extract(K const& key) { auto pos = find(key); return pos != end() ? extract(pos) : node_type(); } template <class H2, class P2> void merge( unordered_node_map<key_type, mapped_type, H2, P2, allocator_type>& source) { BOOST_ASSERT(get_allocator() == source.get_allocator()); table_.merge(source.table_); } template <class H2, class P2> void merge( unordered_node_map<key_type, mapped_type, H2, P2, allocator_type>&& source) { BOOST_ASSERT(get_allocator() == source.get_allocator()); table_.merge(std::move(source.table_)); } /// Lookup /// mapped_type& at(key_type const& key) { auto pos = table_.find(key); if (pos != table_.end()) { return pos->second; } // TODO: someday refactor this to conditionally serialize the key and // include it in the error message // boost::unordered::detail::throw_out_of_range( "key was not found in unordered_node_map"); } mapped_type const& at(key_type const& key) const { auto pos = table_.find(key); if (pos != table_.end()) { return pos->second; } boost::unordered::detail::throw_out_of_range( "key was not found in unordered_node_map"); } template <class K> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, mapped_type&>::type at(K&& key) { auto pos = table_.find(std::forward<K>(key)); if (pos != table_.end()) { return pos->second; } boost::unordered::detail::throw_out_of_range( "key was not found in unordered_node_map"); } template <class K> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, mapped_type const&>::type at(K&& key) const { auto pos = table_.find(std::forward<K>(key)); if (pos != table_.end()) { return pos->second; } boost::unordered::detail::throw_out_of_range( "key was not found in unordered_node_map"); } BOOST_FORCEINLINE mapped_type& operator[](key_type const& key) { return table_.try_emplace(key).first->second; } BOOST_FORCEINLINE mapped_type& operator[](key_type&& key) { return table_.try_emplace(std::move(key)).first->second; } template <class K> typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, mapped_type&>::type operator[](K&& key) { return table_.try_emplace(std::forward<K>(key)).first->second; } BOOST_FORCEINLINE size_type count(key_type const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type count(K const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } BOOST_FORCEINLINE iterator find(key_type const& key) { return table_.find(key); } BOOST_FORCEINLINE const_iterator find(key_type const& key) const { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, iterator>::type find(K const& key) { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, const_iterator>::type find(K const& key) const { return table_.find(key); } BOOST_FORCEINLINE bool contains(key_type const& key) const { return this->find(key) != this->end(); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, bool>::type contains(K const& key) const { return this->find(key) != this->end(); } std::pair<iterator, iterator> equal_range(key_type const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } std::pair<const_iterator, const_iterator> equal_range( key_type const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<iterator, iterator> >::type equal_range(K const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<const_iterator, const_iterator> >::type equal_range(K const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } /// Hash Policy /// size_type bucket_count() const noexcept { return table_.capacity(); } float load_factor() const noexcept { return table_.load_factor(); } float max_load_factor() const noexcept { return table_.max_load_factor(); } void max_load_factor(float) {} size_type max_load() const noexcept { return table_.max_load(); } void rehash(size_type n) { table_.rehash(n); } void reserve(size_type n) { table_.reserve(n); } /// Observers /// hasher hash_function() const { return table_.hash_function(); } key_equal key_eq() const { return table_.key_eq(); } }; template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& rhs) { return lhs.table_ == rhs.table_; } template <class Key, class T, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& lhs, unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& rhs) { return !(lhs == rhs); } template <class Key, class T, class Hash, class KeyEqual, class Allocator> void swap(unordered_node_map<Key, T, Hash, KeyEqual, Allocator>& lhs, unordered_node_map<Key, T, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))) { lhs.swap(rhs); } template <class Key, class T, class Hash, class KeyEqual, class Allocator, class Pred> typename unordered_node_map<Key, T, Hash, KeyEqual, Allocator>::size_type erase_if( unordered_node_map<Key, T, Hash, KeyEqual, Allocator>& map, Pred pred) { return erase_if(map.table_, pred); } template <class Archive, class Key, class T, class Hash, class KeyEqual, class Allocator> void serialize(Archive& ar, unordered_node_map<Key, T, Hash, KeyEqual, Allocator>& map, unsigned int version) { detail::serialize_container(ar, map, version); } #if defined(BOOST_MSVC) #pragma warning(pop) /* C4714 */ #endif #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES template <class InputIterator, class Hash = boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >, class Pred = std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, class Allocator = std::allocator< boost::unordered::detail::iter_to_alloc_t<InputIterator> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map(InputIterator, InputIterator, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_node_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, Hash, Pred, Allocator>; template <class Key, class T, class Hash = boost::hash<std::remove_const_t<Key> >, class Pred = std::equal_to<std::remove_const_t<Key> >, class Allocator = std::allocator<std::pair<const Key, T> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map(std::initializer_list<std::pair<Key, T> >, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_node_map<std::remove_const_t<Key>, T, Hash, Pred, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map(InputIterator, InputIterator, std::size_t, Allocator) -> unordered_node_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >, std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map(InputIterator, InputIterator, Allocator) -> unordered_node_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >, std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, Allocator>; template <class InputIterator, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map( InputIterator, InputIterator, std::size_t, Hash, Allocator) -> unordered_node_map<boost::unordered::detail::iter_key_t<InputIterator>, boost::unordered::detail::iter_val_t<InputIterator>, Hash, std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >, Allocator>; template <class Key, class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map(std::initializer_list<std::pair<Key, T> >, std::size_t, Allocator) -> unordered_node_map<std::remove_const_t<Key>, T, boost::hash<std::remove_const_t<Key> >, std::equal_to<std::remove_const_t<Key> >, Allocator>; template <class Key, class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map(std::initializer_list<std::pair<Key, T> >, Allocator) -> unordered_node_map<std::remove_const_t<Key>, T, boost::hash<std::remove_const_t<Key> >, std::equal_to<std::remove_const_t<Key> >, Allocator>; template <class Key, class T, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_node_map(std::initializer_list<std::pair<Key, T> >, std::size_t, Hash, Allocator) -> unordered_node_map<std::remove_const_t<Key>, T, Hash, std::equal_to<std::remove_const_t<Key> >, Allocator>; #endif } // namespace unordered } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_flat_set_fwd.hpp
// Copyright (C) 2022 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_FLAT_SET_FWD_HPP_INCLUDED #define BOOST_UNORDERED_FLAT_SET_FWD_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/container_hash/hash_fwd.hpp> #include <functional> #include <memory> namespace boost { namespace unordered { template <class Key, class Hash = boost::hash<Key>, class KeyEqual = std::equal_to<Key>, class Allocator = std::allocator<Key> > class unordered_flat_set; template <class Key, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class Hash, class KeyEqual, class Allocator> void swap(unordered_flat_set<Key, Hash, KeyEqual, Allocator>& lhs, unordered_flat_set<Key, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))); } // namespace unordered using boost::unordered::unordered_flat_set; } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/unordered_flat_set.hpp
// Copyright (C) 2022-2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_UNORDERED_FLAT_SET_HPP_INCLUDED #define BOOST_UNORDERED_UNORDERED_FLAT_SET_HPP_INCLUDED #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/unordered/concurrent_flat_set_fwd.hpp> #include <boost/unordered/detail/foa/flat_set_types.hpp> #include <boost/unordered/detail/foa/table.hpp> #include <boost/unordered/detail/serialize_container.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/unordered_flat_set_fwd.hpp> #include <boost/core/allocator_access.hpp> #include <boost/container_hash/hash.hpp> #include <initializer_list> #include <iterator> #include <type_traits> #include <utility> namespace boost { namespace unordered { #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4714) /* marked as __forceinline not inlined */ #endif template <class Key, class Hash, class KeyEqual, class Allocator> class unordered_flat_set { template <class Key2, class Hash2, class KeyEqual2, class Allocator2> friend class concurrent_flat_set; using set_types = detail::foa::flat_set_types<Key>; using table_type = detail::foa::table<set_types, Hash, KeyEqual, typename boost::allocator_rebind<Allocator, typename set_types::value_type>::type>; table_type table_; template <class K, class H, class KE, class A> bool friend operator==(unordered_flat_set<K, H, KE, A> const& lhs, unordered_flat_set<K, H, KE, A> const& rhs); template <class K, class H, class KE, class A, class Pred> typename unordered_flat_set<K, H, KE, A>::size_type friend erase_if( unordered_flat_set<K, H, KE, A>& set, Pred pred); public: using key_type = Key; using value_type = typename set_types::value_type; using init_type = typename set_types::init_type; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using hasher = Hash; using key_equal = KeyEqual; using allocator_type = Allocator; using reference = value_type&; using const_reference = value_type const&; using pointer = typename boost::allocator_pointer<allocator_type>::type; using const_pointer = typename boost::allocator_const_pointer<allocator_type>::type; using iterator = typename table_type::iterator; using const_iterator = typename table_type::const_iterator; unordered_flat_set() : unordered_flat_set(0) {} explicit unordered_flat_set(size_type n, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : table_(n, h, pred, a) { } unordered_flat_set(size_type n, allocator_type const& a) : unordered_flat_set(n, hasher(), key_equal(), a) { } unordered_flat_set(size_type n, hasher const& h, allocator_type const& a) : unordered_flat_set(n, h, key_equal(), a) { } template <class InputIterator> unordered_flat_set( InputIterator f, InputIterator l, allocator_type const& a) : unordered_flat_set(f, l, size_type(0), hasher(), key_equal(), a) { } explicit unordered_flat_set(allocator_type const& a) : unordered_flat_set(0, a) { } template <class Iterator> unordered_flat_set(Iterator first, Iterator last, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_flat_set(n, h, pred, a) { this->insert(first, last); } template <class InputIt> unordered_flat_set( InputIt first, InputIt last, size_type n, allocator_type const& a) : unordered_flat_set(first, last, n, hasher(), key_equal(), a) { } template <class Iterator> unordered_flat_set(Iterator first, Iterator last, size_type n, hasher const& h, allocator_type const& a) : unordered_flat_set(first, last, n, h, key_equal(), a) { } unordered_flat_set(unordered_flat_set const& other) : table_(other.table_) { } unordered_flat_set( unordered_flat_set const& other, allocator_type const& a) : table_(other.table_, a) { } unordered_flat_set(unordered_flat_set&& other) noexcept(std::is_nothrow_move_constructible<table_type>::value) : table_(std::move(other.table_)) { } unordered_flat_set(unordered_flat_set&& other, allocator_type const& al) : table_(std::move(other.table_), al) { } unordered_flat_set(std::initializer_list<value_type> ilist, size_type n = 0, hasher const& h = hasher(), key_equal const& pred = key_equal(), allocator_type const& a = allocator_type()) : unordered_flat_set(ilist.begin(), ilist.end(), n, h, pred, a) { } unordered_flat_set( std::initializer_list<value_type> il, allocator_type const& a) : unordered_flat_set(il, size_type(0), hasher(), key_equal(), a) { } unordered_flat_set(std::initializer_list<value_type> init, size_type n, allocator_type const& a) : unordered_flat_set(init, n, hasher(), key_equal(), a) { } unordered_flat_set(std::initializer_list<value_type> init, size_type n, hasher const& h, allocator_type const& a) : unordered_flat_set(init, n, h, key_equal(), a) { } unordered_flat_set( concurrent_flat_set<Key, Hash, KeyEqual, Allocator>&& other) : table_(std::move(other.table_)) { } ~unordered_flat_set() = default; unordered_flat_set& operator=(unordered_flat_set const& other) { table_ = other.table_; return *this; } unordered_flat_set& operator=(unordered_flat_set&& other) noexcept( noexcept(std::declval<table_type&>() = std::declval<table_type&&>())) { table_ = std::move(other.table_); return *this; } allocator_type get_allocator() const noexcept { return table_.get_allocator(); } /// Iterators /// iterator begin() noexcept { return table_.begin(); } const_iterator begin() const noexcept { return table_.begin(); } const_iterator cbegin() const noexcept { return table_.cbegin(); } iterator end() noexcept { return table_.end(); } const_iterator end() const noexcept { return table_.end(); } const_iterator cend() const noexcept { return table_.cend(); } /// Capacity /// BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept { return table_.empty(); } size_type size() const noexcept { return table_.size(); } size_type max_size() const noexcept { return table_.max_size(); } /// Modifiers /// void clear() noexcept { table_.clear(); } BOOST_FORCEINLINE std::pair<iterator, bool> insert( value_type const& value) { return table_.insert(value); } BOOST_FORCEINLINE std::pair<iterator, bool> insert(value_type&& value) { return table_.insert(std::move(value)); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_flat_set>::value, std::pair<iterator, bool> >::type insert(K&& k) { return table_.try_emplace(std::forward<K>(k)); } BOOST_FORCEINLINE iterator insert(const_iterator, value_type const& value) { return table_.insert(value).first; } BOOST_FORCEINLINE iterator insert(const_iterator, value_type&& value) { return table_.insert(std::move(value)).first; } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_flat_set>::value, iterator>::type insert(const_iterator, K&& k) { return table_.try_emplace(std::forward<K>(k)).first; } template <class InputIterator> void insert(InputIterator first, InputIterator last) { for (auto pos = first; pos != last; ++pos) { table_.emplace(*pos); } } void insert(std::initializer_list<value_type> ilist) { this->insert(ilist.begin(), ilist.end()); } template <class... Args> BOOST_FORCEINLINE std::pair<iterator, bool> emplace(Args&&... args) { return table_.emplace(std::forward<Args>(args)...); } template <class... Args> BOOST_FORCEINLINE iterator emplace_hint(const_iterator, Args&&... args) { return table_.emplace(std::forward<Args>(args)...).first; } BOOST_FORCEINLINE typename table_type::erase_return_type erase( const_iterator pos) { return table_.erase(pos); } iterator erase(const_iterator first, const_iterator last) { while (first != last) { this->erase(first++); } return iterator{detail::foa::const_iterator_cast_tag{}, last}; } BOOST_FORCEINLINE size_type erase(key_type const& key) { return table_.erase(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::transparent_non_iterable<K, unordered_flat_set>::value, size_type>::type erase(K const& key) { return table_.erase(key); } void swap(unordered_flat_set& rhs) noexcept( noexcept(std::declval<table_type&>().swap(std::declval<table_type&>()))) { table_.swap(rhs.table_); } template <class H2, class P2> void merge(unordered_flat_set<key_type, H2, P2, allocator_type>& source) { table_.merge(source.table_); } template <class H2, class P2> void merge(unordered_flat_set<key_type, H2, P2, allocator_type>&& source) { table_.merge(std::move(source.table_)); } /// Lookup /// BOOST_FORCEINLINE size_type count(key_type const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } template <class K> BOOST_FORCEINLINE typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, size_type>::type count(K const& key) const { auto pos = table_.find(key); return pos != table_.end() ? 1 : 0; } BOOST_FORCEINLINE iterator find(key_type const& key) { return table_.find(key); } BOOST_FORCEINLINE const_iterator find(key_type const& key) const { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, iterator>::type find(K const& key) { return table_.find(key); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, const_iterator>::type find(K const& key) const { return table_.find(key); } BOOST_FORCEINLINE bool contains(key_type const& key) const { return this->find(key) != this->end(); } template <class K> BOOST_FORCEINLINE typename std::enable_if< boost::unordered::detail::are_transparent<K, hasher, key_equal>::value, bool>::type contains(K const& key) const { return this->find(key) != this->end(); } std::pair<iterator, iterator> equal_range(key_type const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } std::pair<const_iterator, const_iterator> equal_range( key_type const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<iterator, iterator> >::type equal_range(K const& key) { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } template <class K> typename std::enable_if< detail::are_transparent<K, hasher, key_equal>::value, std::pair<const_iterator, const_iterator> >::type equal_range(K const& key) const { auto pos = table_.find(key); if (pos == table_.end()) { return {pos, pos}; } auto next = pos; ++next; return {pos, next}; } /// Hash Policy /// size_type bucket_count() const noexcept { return table_.capacity(); } float load_factor() const noexcept { return table_.load_factor(); } float max_load_factor() const noexcept { return table_.max_load_factor(); } void max_load_factor(float) {} size_type max_load() const noexcept { return table_.max_load(); } void rehash(size_type n) { table_.rehash(n); } void reserve(size_type n) { table_.reserve(n); } /// Observers /// hasher hash_function() const { return table_.hash_function(); } key_equal key_eq() const { return table_.key_eq(); } }; template <class Key, class Hash, class KeyEqual, class Allocator> bool operator==( unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs) { return lhs.table_ == rhs.table_; } template <class Key, class Hash, class KeyEqual, class Allocator> bool operator!=( unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs) { return !(lhs == rhs); } template <class Key, class Hash, class KeyEqual, class Allocator> void swap(unordered_flat_set<Key, Hash, KeyEqual, Allocator>& lhs, unordered_flat_set<Key, Hash, KeyEqual, Allocator>& rhs) noexcept(noexcept(lhs.swap(rhs))) { lhs.swap(rhs); } template <class Key, class Hash, class KeyEqual, class Allocator, class Pred> typename unordered_flat_set<Key, Hash, KeyEqual, Allocator>::size_type erase_if(unordered_flat_set<Key, Hash, KeyEqual, Allocator>& set, Pred pred) { return erase_if(set.table_, pred); } template <class Archive, class Key, class Hash, class KeyEqual, class Allocator> void serialize(Archive& ar, unordered_flat_set<Key, Hash, KeyEqual, Allocator>& set, unsigned int version) { detail::serialize_container(ar, set, version); } #if defined(BOOST_MSVC) #pragma warning(pop) /* C4714 */ #endif #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES template <class InputIterator, class Hash = boost::hash<typename std::iterator_traits<InputIterator>::value_type>, class Pred = std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, class Allocator = std::allocator< typename std::iterator_traits<InputIterator>::value_type>, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set(InputIterator, InputIterator, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_flat_set< typename std::iterator_traits<InputIterator>::value_type, Hash, Pred, Allocator>; template <class T, class Hash = boost::hash<T>, class Pred = std::equal_to<T>, class Allocator = std::allocator<T>, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_pred_v<Pred> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set(std::initializer_list<T>, std::size_t = boost::unordered::detail::foa::default_bucket_count, Hash = Hash(), Pred = Pred(), Allocator = Allocator()) -> unordered_flat_set<T, Hash, Pred, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set(InputIterator, InputIterator, std::size_t, Allocator) -> unordered_flat_set< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class InputIterator, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set( InputIterator, InputIterator, std::size_t, Hash, Allocator) -> unordered_flat_set< typename std::iterator_traits<InputIterator>::value_type, Hash, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set(std::initializer_list<T>, std::size_t, Allocator) -> unordered_flat_set<T, boost::hash<T>, std::equal_to<T>, Allocator>; template <class T, class Hash, class Allocator, class = std::enable_if_t<detail::is_hash_v<Hash> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set(std::initializer_list<T>, std::size_t, Hash, Allocator) -> unordered_flat_set<T, Hash, std::equal_to<T>, Allocator>; template <class InputIterator, class Allocator, class = std::enable_if_t<detail::is_input_iterator_v<InputIterator> >, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set(InputIterator, InputIterator, Allocator) -> unordered_flat_set< typename std::iterator_traits<InputIterator>::value_type, boost::hash<typename std::iterator_traits<InputIterator>::value_type>, std::equal_to<typename std::iterator_traits<InputIterator>::value_type>, Allocator>; template <class T, class Allocator, class = std::enable_if_t<detail::is_allocator_v<Allocator> > > unordered_flat_set(std::initializer_list<T>, Allocator) -> unordered_flat_set<T, boost::hash<T>, std::equal_to<T>, Allocator>; #endif } // namespace unordered } // namespace boost #endif
0
repos/unordered/include/boost
repos/unordered/include/boost/unordered/concurrent_flat_set_fwd.hpp
/* Fast open-addressing concurrent hashset. * * Copyright 2023 Christian Mazakas. * Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_CONCURRENT_FLAT_SET_FWD_HPP #define BOOST_UNORDERED_CONCURRENT_FLAT_SET_FWD_HPP #include <boost/container_hash/hash_fwd.hpp> #include <functional> #include <memory> namespace boost { namespace unordered { template <class Key, class Hash = boost::hash<Key>, class Pred = std::equal_to<Key>, class Allocator = std::allocator<Key> > class concurrent_flat_set; template <class Key, class Hash, class KeyEqual, class Allocator> bool operator==( concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class Hash, class KeyEqual, class Allocator> bool operator!=( concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs, concurrent_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs); template <class Key, class Hash, class Pred, class Alloc> void swap(concurrent_flat_set<Key, Hash, Pred, Alloc>& x, concurrent_flat_set<Key, Hash, Pred, Alloc>& y) noexcept(noexcept(x.swap(y))); template <class K, class H, class P, class A, class Predicate> typename concurrent_flat_set<K, H, P, A>::size_type erase_if( concurrent_flat_set<K, H, P, A>& c, Predicate pred); } // namespace unordered using boost::unordered::concurrent_flat_set; } // namespace boost #endif // BOOST_UNORDERED_CONCURRENT_FLAT_SET_FWD_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/serialize_tracked_address.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_SERIALIZE_TRACKED_ADDRESS_HPP #define BOOST_UNORDERED_DETAIL_SERIALIZE_TRACKED_ADDRESS_HPP #include <boost/unordered/detail/bad_archive_exception.hpp> #include <boost/core/pointer_traits.hpp> #include <boost/core/serialization.hpp> #include <boost/throw_exception.hpp> #include <type_traits> namespace boost{ namespace unordered{ namespace detail{ /* Tracked address serialization to support iterator serialization as described * in serialize_container.hpp. The underlying technique is to reinterpret_cast * T pointers to serialization_tracker<T> pointers, which, when dereferenced * and serialized, do not emit any serialization payload to the * archive, but activate object tracking on the relevant addresses for later * use with serialize_tracked_address(). */ template<typename T> struct serialization_tracker { /* An attempt to construct a serialization_tracker means a stray address * in the archive, that is, one without a previously tracked address. */ serialization_tracker(){throw_exception(bad_archive_exception());} template<typename Archive> void serialize(Archive&,unsigned int){} /* no data emitted */ }; template<typename Archive,typename Ptr> void track_address(Archive& ar,Ptr p) { typedef typename boost::pointer_traits<Ptr> ptr_traits; typedef typename std::remove_const< typename ptr_traits::element_type>::type element_type; if(p){ ar&core::make_nvp( "address", *reinterpret_cast<serialization_tracker<element_type>*>( const_cast<element_type*>( boost::to_address(p)))); } } template<typename Archive,typename Ptr> void serialize_tracked_address(Archive& ar,Ptr& p,std::true_type /* save */) { typedef typename boost::pointer_traits<Ptr> ptr_traits; typedef typename std::remove_const< typename ptr_traits::element_type>::type element_type; typedef serialization_tracker<element_type> tracker; tracker* pt= const_cast<tracker*>( reinterpret_cast<const tracker*>( const_cast<const element_type*>( boost::to_address(p)))); ar<<core::make_nvp("pointer",pt); } template<typename Archive,typename Ptr> void serialize_tracked_address(Archive& ar,Ptr& p,std::false_type /* load */) { typedef typename boost::pointer_traits<Ptr> ptr_traits; typedef typename std::remove_const< typename ptr_traits::element_type>::type element_type; typedef serialization_tracker<element_type> tracker; tracker* pt; ar>>core::make_nvp("pointer",pt); element_type* pn=const_cast<element_type*>( reinterpret_cast<const element_type*>( const_cast<const tracker*>(pt))); p=pn?ptr_traits::pointer_to(*pn):0; } template<typename Archive,typename Ptr> void serialize_tracked_address(Archive& ar,Ptr& p) { serialize_tracked_address( ar,p, std::integral_constant<bool,Archive::is_saving::value>()); } } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/implementation.hpp
// Copyright (C) 2003-2004 Jeremy B. Maitin-Shepard. // Copyright (C) 2005-2016 Daniel James // Copyright (C) 2022-2024 Joaquin M Lopez Munoz. // Copyright (C) 2022-2023 Christian Mazakas // Copyright (C) 2024 Braden Ganetsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_IMPLEMENTATION_HPP #define BOOST_UNORDERED_DETAIL_IMPLEMENTATION_HPP #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/unordered/detail/allocator_constructed.hpp> #include <boost/unordered/detail/fca.hpp> #include <boost/unordered/detail/opt_storage.hpp> #include <boost/unordered/detail/serialize_tracked_address.hpp> #include <boost/unordered/detail/static_assert.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/assert.hpp> #include <boost/core/allocator_traits.hpp> #include <boost/core/bit.hpp> #include <boost/core/invoke_swap.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/core/pointer_traits.hpp> #include <boost/core/serialization.hpp> #include <boost/mp11/algorithm.hpp> #include <boost/mp11/list.hpp> #include <boost/throw_exception.hpp> #include <algorithm> #include <cmath> #include <iterator> #include <limits> #include <stdexcept> #include <type_traits> #include <utility> #include <tuple> // std::forward_as_tuple namespace boost { namespace tuples { struct null_type; } } // namespace boost // BOOST_UNORDERED_SUPPRESS_DEPRECATED // // Define to stop deprecation attributes #if defined(BOOST_UNORDERED_SUPPRESS_DEPRECATED) #define BOOST_UNORDERED_DEPRECATED(msg) #endif // BOOST_UNORDERED_DEPRECATED // // Wrapper around various depreaction attributes. #if defined(__has_cpp_attribute) && \ (!defined(__cplusplus) || __cplusplus >= 201402) #if __has_cpp_attribute(deprecated) && !defined(BOOST_UNORDERED_DEPRECATED) #define BOOST_UNORDERED_DEPRECATED(msg) [[deprecated(msg)]] #endif #endif #if !defined(BOOST_UNORDERED_DEPRECATED) #if defined(__GNUC__) && __GNUC__ >= 4 #define BOOST_UNORDERED_DEPRECATED(msg) __attribute__((deprecated)) #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define BOOST_UNORDERED_DEPRECATED(msg) __declspec(deprecated(msg)) #elif defined(_MSC_VER) && _MSC_VER >= 1310 #define BOOST_UNORDERED_DEPRECATED(msg) __declspec(deprecated) #else #define BOOST_UNORDERED_DEPRECATED(msg) #endif #endif namespace boost { namespace unordered { using std::piecewise_construct; using std::piecewise_construct_t; namespace detail { template <typename Types> struct table; static const float minimum_max_load_factor = 1e-3f; static const std::size_t default_bucket_count = 0; struct move_tag { }; struct empty_emplace { }; struct no_key { no_key() {} template <class T> no_key(T const&) {} }; struct converting_key { }; namespace func { template <class T> inline void ignore_unused_variable_warning(T const&) { } } // namespace func ////////////////////////////////////////////////////////////////////////// // iterator SFINAE template <typename I> struct is_forward : std::is_base_of<std::forward_iterator_tag, typename std::iterator_traits<I>::iterator_category> { }; template <typename I, typename ReturnType> struct enable_if_forward : std::enable_if<boost::unordered::detail::is_forward<I>::value, ReturnType> { }; template <typename I, typename ReturnType> struct disable_if_forward : std::enable_if<!boost::unordered::detail::is_forward<I>::value, ReturnType> { }; } // namespace detail } // namespace unordered } // namespace boost namespace boost { namespace unordered { namespace detail { ////////////////////////////////////////////////////////////////////////// // insert_size/initial_size template <class I> inline typename boost::unordered::detail::enable_if_forward<I, std::size_t>::type insert_size(I i, I j) { return static_cast<std::size_t>(std::distance(i, j)); } template <class I> inline typename boost::unordered::detail::disable_if_forward<I, std::size_t>::type insert_size(I, I) { return 1; } template <class I> inline std::size_t initial_size(I i, I j, std::size_t num_buckets = boost::unordered::detail::default_bucket_count) { return (std::max)( boost::unordered::detail::insert_size(i, j), num_buckets); } ////////////////////////////////////////////////////////////////////////// // compressed template <typename T, int Index> struct compressed_base : boost::empty_value<T> { compressed_base(T const& x) : empty_value<T>(boost::empty_init_t(), x) { } compressed_base(T& x, move_tag) : empty_value<T>(boost::empty_init_t(), std::move(x)) { } T& get() { return empty_value<T>::get(); } T const& get() const { return empty_value<T>::get(); } }; template <typename T, int Index> struct generate_base : boost::unordered::detail::compressed_base<T, Index> { typedef compressed_base<T, Index> type; generate_base() : type() {} }; template <typename T1, typename T2> struct compressed : private boost::unordered::detail::generate_base<T1, 1>::type, private boost::unordered::detail::generate_base<T2, 2>::type { typedef typename generate_base<T1, 1>::type base1; typedef typename generate_base<T2, 2>::type base2; typedef T1 first_type; typedef T2 second_type; first_type& first() { return static_cast<base1*>(this)->get(); } first_type const& first() const { return static_cast<base1 const*>(this)->get(); } second_type& second() { return static_cast<base2*>(this)->get(); } second_type const& second() const { return static_cast<base2 const*>(this)->get(); } template <typename First, typename Second> compressed(First const& x1, Second const& x2) : base1(x1), base2(x2) { } compressed(compressed const& x) : base1(x.first()), base2(x.second()) {} compressed(compressed& x, move_tag m) : base1(x.first(), m), base2(x.second(), m) { } void assign(compressed const& x) { first() = x.first(); second() = x.second(); } void move_assign(compressed& x) { first() = std::move(x.first()); second() = std::move(x.second()); } void swap(compressed& x) { boost::core::invoke_swap(first(), x.first()); boost::core::invoke_swap(second(), x.second()); } private: // Prevent assignment just to make use of assign or // move_assign explicit. compressed& operator=(compressed const&); }; ////////////////////////////////////////////////////////////////////////// // pair_traits // // Used to get the types from a pair without instantiating it. template <typename Pair> struct pair_traits { typedef typename Pair::first_type first_type; typedef typename Pair::second_type second_type; }; template <typename T1, typename T2> struct pair_traits<std::pair<T1, T2> > { typedef T1 first_type; typedef T2 second_type; }; #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4512) // assignment operator could not be generated. #pragma warning(disable : 4345) // behavior change: an object of POD type // constructed with an initializer of the form () // will be default-initialized. #endif ////////////////////////////////////////////////////////////////////////// // Bits and pieces for implementing traits template <typename T> typename std::add_lvalue_reference<T>::type make(); struct choice2 { typedef char (&type)[2]; }; struct choice1 : choice2 { typedef char (&type)[1]; }; choice1 choose(); typedef choice1::type yes_type; typedef choice2::type no_type; struct private_type { private_type const& operator,(int) const; }; template <typename T> no_type is_private_type(T const&); yes_type is_private_type(private_type const&); struct convert_from_anything { template <typename T> convert_from_anything(T const&); }; } // namespace detail } // namespace unordered } // namespace boost //////////////////////////////////////////////////////////////////////////////// // // Some utilities for implementing allocator_traits, but useful elsewhere so // they're always defined. namespace boost { namespace unordered { namespace detail { //////////////////////////////////////////////////////////////////////////// // Explicitly call a destructor #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4100) // unreferenced formal parameter #endif namespace func { template <class T> inline void destroy(T* x) { x->~T(); } } // namespace func #if defined(BOOST_MSVC) #pragma warning(pop) #endif ////////////////////////////////////////////////////////////////////////// // value_base // // Space used to store values. template <typename ValueType> struct value_base { typedef ValueType value_type; opt_storage<value_type> data_; value_base() : data_() {} void* address() { return this; } value_type& value() { return *(ValueType*)this; } value_type const& value() const { return *(ValueType const*)this; } value_type* value_ptr() { return (ValueType*)this; } value_type const* value_ptr() const { return (ValueType const*)this; } private: value_base& operator=(value_base const&); }; ////////////////////////////////////////////////////////////////////////// // optional // TODO: Use std::optional when available. template <typename T> class optional { boost::unordered::detail::value_base<T> value_; bool has_value_; void destroy() { if (has_value_) { boost::unordered::detail::func::destroy(value_.value_ptr()); has_value_ = false; } } void move(optional<T>& x) { BOOST_ASSERT(!has_value_ && x.has_value_); new (value_.value_ptr()) T(std::move(x.value_.value())); boost::unordered::detail::func::destroy(x.value_.value_ptr()); has_value_ = true; x.has_value_ = false; } public: optional() noexcept : has_value_(false) {} optional(optional const&) = delete; optional& operator=(optional const&) = delete; optional(optional<T>&& x) : has_value_(false) { if (x.has_value_) { move(x); } } explicit optional(T const& x) : has_value_(true) { new (value_.value_ptr()) T(x); } optional& operator=(optional<T>&& x) { destroy(); if (x.has_value_) { move(x); } return *this; } ~optional() { destroy(); } bool has_value() const { return has_value_; } T& operator*() { return value_.value(); } T const& operator*() const { return value_.value(); } T* operator->() { return value_.value_ptr(); } T const* operator->() const { return value_.value_ptr(); } bool operator==(optional<T> const& x) const { return has_value_ ? x.has_value_ && value_.value() == x.value_.value() : !x.has_value_; } bool operator!=(optional<T> const& x) const { return !((*this) == x); } void swap(optional<T>& x) { if (has_value_ != x.has_value_) { if (has_value_) { x.move(*this); } else { move(x); } } else if (has_value_) { boost::core::invoke_swap(value_.value(), x.value_.value()); } } friend void swap(optional<T>& x, optional<T>& y) { x.swap(y); } }; } // namespace detail } // namespace unordered } // namespace boost //////////////////////////////////////////////////////////////////////////////// // // Allocator traits // namespace boost { namespace unordered { namespace detail { template <typename Alloc> struct allocator_traits : boost::allocator_traits<Alloc> { }; template <typename Alloc, typename T> struct rebind_wrap : boost::allocator_rebind<Alloc, T> { }; } // namespace detail } // namespace unordered } // namespace boost namespace boost { namespace unordered { namespace detail { namespace func { //////////////////////////////////////////////////////////////////////// // Trait to check for piecewise construction. template <typename A0> struct use_piecewise { static choice1::type test(choice1, std::piecewise_construct_t); static choice2::type test(choice2, ...); enum { value = sizeof(choice1::type) == sizeof(test(choose(), boost::unordered::detail::make<A0>())) }; }; //////////////////////////////////////////////////////////////////////// // Construct from variadic parameters template <typename Alloc, typename T, typename... Args> inline void construct_from_args( Alloc& alloc, T* address, Args&&... args) { boost::allocator_construct( alloc, address, std::forward<Args>(args)...); } // For backwards compatibility, implement a special case for // piecewise_construct with boost::tuple template <typename A0> struct detect_std_tuple { template <class... Args> static choice1::type test(choice1, std::tuple<Args...> const&); static choice2::type test(choice2, ...); enum { value = sizeof(choice1::type) == sizeof(test(choose(), boost::unordered::detail::make<A0>())) }; }; // Special case for piecewise_construct template <template <class...> class Tuple, class... Args, std::size_t... Is, class... TupleArgs> std::tuple<typename std::add_lvalue_reference<Args>::type...> to_std_tuple_impl(boost::mp11::mp_list<Args...>, Tuple<TupleArgs...>& tuple, boost::mp11::index_sequence<Is...>) { (void)tuple; using std::get; return std::tuple<typename std::add_lvalue_reference<Args>::type...>( get<Is>(tuple)...); } template <class T> using add_lvalue_reference_t = typename std::add_lvalue_reference<T>::type; template <template <class...> class Tuple, class... Args> boost::mp11::mp_transform<add_lvalue_reference_t, boost::mp11::mp_remove<std::tuple<Args...>, boost::tuples::null_type> > to_std_tuple(Tuple<Args...>& tuple) { using list = boost::mp11::mp_remove<boost::mp11::mp_list<Args...>, boost::tuples::null_type>; using list_size = boost::mp11::mp_size<list>; using index_seq = boost::mp11::make_index_sequence<list_size::value>; return to_std_tuple_impl(list{}, tuple, index_seq{}); } template <typename Alloc, typename A, typename B, typename A0, typename A1, typename A2> inline typename std::enable_if<use_piecewise<A0>::value && !detect_std_tuple<A1>::value && !detect_std_tuple<A2>::value, void>::type construct_from_args( Alloc& alloc, std::pair<A, B>* address, A0&&, A1&& a1, A2&& a2) { boost::allocator_construct(alloc, address, std::piecewise_construct, to_std_tuple(a1), to_std_tuple(a2)); } } // namespace func } // namespace detail } // namespace unordered } // namespace boost namespace boost { namespace unordered { namespace detail { /////////////////////////////////////////////////////////////////// // // Node construction template <typename NodeAlloc> struct node_constructor { typedef NodeAlloc node_allocator; typedef boost::unordered::detail::allocator_traits<NodeAlloc> node_allocator_traits; typedef typename node_allocator_traits::value_type node; typedef typename node_allocator_traits::pointer node_pointer; typedef typename node::value_type value_type; node_allocator& alloc_; node_pointer node_; node_constructor(node_allocator& n) : alloc_(n), node_() {} ~node_constructor(); void create_node(); // no throw node_pointer release() { BOOST_ASSERT(node_); node_pointer p = node_; node_ = node_pointer(); return p; } private: node_constructor(node_constructor const&); node_constructor& operator=(node_constructor const&); }; template <typename Alloc> node_constructor<Alloc>::~node_constructor() { if (node_) { boost::unordered::detail::func::destroy(boost::to_address(node_)); node_allocator_traits::deallocate(alloc_, node_, 1); } } template <typename Alloc> void node_constructor<Alloc>::create_node() { BOOST_ASSERT(!node_); node_ = node_allocator_traits::allocate(alloc_, 1); new ((void*)boost::to_address(node_)) node(); } template <typename NodeAlloc> struct node_tmp { typedef typename boost::allocator_value_type<NodeAlloc>::type node; typedef typename boost::allocator_pointer<NodeAlloc>::type node_pointer; typedef typename node::value_type value_type; typedef typename boost::allocator_rebind<NodeAlloc, value_type>::type value_allocator; NodeAlloc& alloc_; node_pointer node_; explicit node_tmp(node_pointer n, NodeAlloc& a) : alloc_(a), node_(n) {} ~node_tmp(); // no throw node_pointer release() { node_pointer p = node_; node_ = node_pointer(); return p; } }; template <typename Alloc> node_tmp<Alloc>::~node_tmp() { if (node_) { value_allocator val_alloc(alloc_); boost::allocator_destroy(val_alloc, node_->value_ptr()); boost::allocator_deallocate(alloc_, node_, 1); } } } // namespace detail } // namespace unordered } // namespace boost namespace boost { namespace unordered { namespace detail { namespace func { // Some nicer construct_node functions, might try to // improve implementation later. template <typename Alloc, typename... Args> inline typename boost::allocator_pointer<Alloc>::type construct_node_from_args(Alloc& alloc, Args&&... args) { typedef typename boost::allocator_value_type<Alloc>::type node; typedef typename node::value_type value_type; typedef typename boost::allocator_rebind<Alloc, value_type>::type value_allocator; value_allocator val_alloc(alloc); node_constructor<Alloc> a(alloc); a.create_node(); construct_from_args( val_alloc, a.node_->value_ptr(), std::forward<Args>(args)...); return a.release(); } template <typename Alloc, typename U> inline typename boost::allocator_pointer<Alloc>::type construct_node( Alloc& alloc, U&& x) { node_constructor<Alloc> a(alloc); a.create_node(); typedef typename boost::allocator_value_type<Alloc>::type node; typedef typename node::value_type value_type; typedef typename boost::allocator_rebind<Alloc, value_type>::type value_allocator; value_allocator val_alloc(alloc); boost::allocator_construct( val_alloc, a.node_->value_ptr(), std::forward<U>(x)); return a.release(); } template <typename Alloc, typename Key> inline typename boost::allocator_pointer<Alloc>::type construct_node_pair(Alloc& alloc, Key&& k) { node_constructor<Alloc> a(alloc); a.create_node(); typedef typename boost::allocator_value_type<Alloc>::type node; typedef typename node::value_type value_type; typedef typename boost::allocator_rebind<Alloc, value_type>::type value_allocator; value_allocator val_alloc(alloc); boost::allocator_construct(val_alloc, a.node_->value_ptr(), std::piecewise_construct, std::forward_as_tuple(std::forward<Key>(k)), std::forward_as_tuple()); return a.release(); } template <typename Alloc, typename Key, typename Mapped> inline typename boost::allocator_pointer<Alloc>::type construct_node_pair(Alloc& alloc, Key&& k, Mapped&& m) { node_constructor<Alloc> a(alloc); a.create_node(); typedef typename boost::allocator_value_type<Alloc>::type node; typedef typename node::value_type value_type; typedef typename boost::allocator_rebind<Alloc, value_type>::type value_allocator; value_allocator val_alloc(alloc); boost::allocator_construct(val_alloc, a.node_->value_ptr(), std::piecewise_construct, std::forward_as_tuple(std::forward<Key>(k)), std::forward_as_tuple(std::forward<Mapped>(m))); return a.release(); } template <typename Alloc, typename Key, typename... Args> inline typename boost::allocator_pointer<Alloc>::type construct_node_pair_from_args(Alloc& alloc, Key&& k, Args&&... args) { node_constructor<Alloc> a(alloc); a.create_node(); typedef typename boost::allocator_value_type<Alloc>::type node; typedef typename node::value_type value_type; typedef typename boost::allocator_rebind<Alloc, value_type>::type value_allocator; value_allocator val_alloc(alloc); boost::allocator_construct(val_alloc, a.node_->value_ptr(), std::piecewise_construct, std::forward_as_tuple(std::forward<Key>(k)), std::forward_as_tuple(std::forward<Args>(args)...)); return a.release(); } template <typename T, typename Alloc, typename Key> inline typename boost::allocator_pointer<Alloc>::type construct_node_from_key(T*, Alloc& alloc, Key&& k) { return construct_node(alloc, std::forward<Key>(k)); } template <typename T, typename V, typename Alloc, typename Key> inline typename boost::allocator_pointer<Alloc>::type construct_node_from_key(std::pair<T const, V>*, Alloc& alloc, Key&& k) { return construct_node_pair(alloc, std::forward<Key>(k)); } } // namespace func } // namespace detail } // namespace unordered } // namespace boost #if defined(BOOST_MSVC) #pragma warning(pop) #endif namespace boost { namespace unordered { namespace detail { ////////////////////////////////////////////////////////////////////////// // Functions // // This double buffers the storage for the hash function and key equality // predicate in order to have exception safe copy/swap. To do so, // use 'construct_spare' to construct in the spare space, and then when // ready to use 'switch_functions' to switch to the new functions. // If an exception is thrown between these two calls, use // 'cleanup_spare_functions' to destroy the unused constructed functions. #if defined(_GLIBCXX_HAVE_BUILTIN_LAUNDER) // gcc-12 warns when accessing the `current_functions` of our `functions` // class below with `-Wmaybe-unitialized`. By laundering the pointer, we // silence the warning and assure the compiler that a valid object exists // in that region of storage. This warning is also generated in C++03 // which does not have `std::launder`. The compiler builtin is always // available, regardless of the C++ standard used when compiling. template <class T> T* launder(T* p) noexcept { return __builtin_launder(p); } #else template <class T> T* launder(T* p) noexcept { return p; } #endif template <class H, class P> class functions { public: static const bool nothrow_move_assignable = std::is_nothrow_move_assignable<H>::value && std::is_nothrow_move_assignable<P>::value; static const bool nothrow_move_constructible = std::is_nothrow_move_constructible<H>::value && std::is_nothrow_move_constructible<P>::value; static const bool nothrow_swappable = boost::unordered::detail::is_nothrow_swappable<H>::value && boost::unordered::detail::is_nothrow_swappable<P>::value; private: functions& operator=(functions const&); typedef compressed<H, P> function_pair; unsigned char current_; // 0/1 - Currently active functions // +2 - Both constructed opt_storage<function_pair> funcs_[2]; public: functions(H const& hf, P const& eq) : current_(0) { construct_functions(current_, hf, eq); } functions(functions const& bf) : current_(0) { construct_functions(current_, bf.current_functions()); } functions(functions& bf, boost::unordered::detail::move_tag) : current_(0) { construct_functions(current_, bf.current_functions(), std::integral_constant<bool, nothrow_move_constructible>()); } ~functions() { BOOST_ASSERT(!(current_ & 2)); destroy_functions(current_); } H const& hash_function() const { return current_functions().first(); } P const& key_eq() const { return current_functions().second(); } function_pair const& current_functions() const { return *::boost::unordered::detail::launder( static_cast<function_pair const*>( static_cast<void const*>(funcs_[current_ & 1].address()))); } function_pair& current_functions() { return *::boost::unordered::detail::launder( static_cast<function_pair*>( static_cast<void*>(funcs_[current_ & 1].address()))); } void construct_spare_functions(function_pair const& f) { BOOST_ASSERT(!(current_ & 2)); construct_functions(current_ ^ 1, f); current_ |= 2; } void cleanup_spare_functions() { if (current_ & 2) { current_ = static_cast<unsigned char>(current_ & 1); destroy_functions(current_ ^ 1); } } void switch_functions() { BOOST_ASSERT(current_ & 2); destroy_functions(static_cast<unsigned char>(current_ & 1)); current_ ^= 3; } private: void construct_functions(unsigned char which, H const& hf, P const& eq) { BOOST_ASSERT(!(which & 2)); new ((void*)&funcs_[which]) function_pair(hf, eq); } void construct_functions( unsigned char which, function_pair const& f, std::false_type = {}) { BOOST_ASSERT(!(which & 2)); new ((void*)&funcs_[which]) function_pair(f); } void construct_functions( unsigned char which, function_pair& f, std::true_type) { BOOST_ASSERT(!(which & 2)); new ((void*)&funcs_[which]) function_pair(f, boost::unordered::detail::move_tag()); } void destroy_functions(unsigned char which) { BOOST_ASSERT(!(which & 2)); boost::unordered::detail::func::destroy( (function_pair*)(&funcs_[which])); } }; #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4127) // conditional expression is constant #endif ////////////////////////////////////////////////////////////////////////// // convert double to std::size_t inline std::size_t double_to_size(double f) { return f >= static_cast<double>( (std::numeric_limits<std::size_t>::max)()) ? (std::numeric_limits<std::size_t>::max)() : static_cast<std::size_t>(f); } ////////////////////////////////////////////////////////////////////////// // iterator definitions namespace iterator_detail { template <class Node, class Bucket> class c_iterator; template <class Node, class Bucket> class iterator { public: typedef typename Node::value_type value_type; typedef value_type element_type; typedef value_type* pointer; typedef value_type& reference; typedef std::ptrdiff_t difference_type; typedef std::forward_iterator_tag iterator_category; iterator() : p(), itb() {} reference operator*() const noexcept { return dereference(); } pointer operator->() const noexcept { pointer x = std::addressof(p->value()); return x; } iterator& operator++() noexcept { increment(); return *this; } iterator operator++(int) noexcept { iterator old = *this; increment(); return old; } bool operator==(iterator const& other) const noexcept { return equal(other); } bool operator!=(iterator const& other) const noexcept { return !equal(other); } bool operator==( boost::unordered::detail::iterator_detail::c_iterator<Node, Bucket> const& other) const noexcept { return equal(other); } bool operator!=( boost::unordered::detail::iterator_detail::c_iterator<Node, Bucket> const& other) const noexcept { return !equal(other); } private: typedef typename Node::node_pointer node_pointer; typedef grouped_bucket_iterator<Bucket> bucket_iterator; node_pointer p; bucket_iterator itb; template <class Types> friend struct boost::unordered::detail::table; template <class N, class B> friend class c_iterator; iterator(node_pointer p_, bucket_iterator itb_) : p(p_), itb(itb_) {} value_type& dereference() const noexcept { return p->value(); } bool equal(const iterator& x) const noexcept { return (p == x.p); } bool equal( const boost::unordered::detail::iterator_detail::c_iterator<Node, Bucket>& x) const noexcept { return (p == x.p); } void increment() noexcept { p = p->next; if (!p) { p = (++itb)->next; } } template <typename Archive> friend void serialization_track(Archive& ar, const iterator& x) { if (x.p) { track_address(ar, x.p); serialization_track(ar, x.itb); } } friend class boost::serialization::access; template <typename Archive> void serialize(Archive& ar, unsigned int) { if (!p) itb = bucket_iterator(); serialize_tracked_address(ar, p); ar& core::make_nvp("bucket_iterator", itb); } }; template <class Node, class Bucket> class c_iterator { public: typedef typename Node::value_type value_type; typedef value_type const element_type; typedef value_type const* pointer; typedef value_type const& reference; typedef std::ptrdiff_t difference_type; typedef std::forward_iterator_tag iterator_category; c_iterator() : p(), itb() {} c_iterator(iterator<Node, Bucket> it) : p(it.p), itb(it.itb) {} reference operator*() const noexcept { return dereference(); } pointer operator->() const noexcept { pointer x = std::addressof(p->value()); return x; } c_iterator& operator++() noexcept { increment(); return *this; } c_iterator operator++(int) noexcept { c_iterator old = *this; increment(); return old; } bool operator==(c_iterator const& other) const noexcept { return equal(other); } bool operator!=(c_iterator const& other) const noexcept { return !equal(other); } bool operator==( boost::unordered::detail::iterator_detail::iterator<Node, Bucket> const& other) const noexcept { return equal(other); } bool operator!=( boost::unordered::detail::iterator_detail::iterator<Node, Bucket> const& other) const noexcept { return !equal(other); } private: typedef typename Node::node_pointer node_pointer; typedef grouped_bucket_iterator<Bucket> bucket_iterator; node_pointer p; bucket_iterator itb; template <class Types> friend struct boost::unordered::detail::table; template <class, class> friend class iterator; c_iterator(node_pointer p_, bucket_iterator itb_) : p(p_), itb(itb_) { } value_type const& dereference() const noexcept { return p->value(); } bool equal(const c_iterator& x) const noexcept { return (p == x.p); } void increment() noexcept { p = p->next; if (!p) { p = (++itb)->next; } } template <typename Archive> friend void serialization_track(Archive& ar, const c_iterator& x) { if (x.p) { track_address(ar, x.p); serialization_track(ar, x.itb); } } friend class boost::serialization::access; template <typename Archive> void serialize(Archive& ar, unsigned int) { if (!p) itb = bucket_iterator(); serialize_tracked_address(ar, p); ar& core::make_nvp("bucket_iterator", itb); } }; } // namespace iterator_detail ////////////////////////////////////////////////////////////////////////// // table structure used by the containers template <typename Types> struct table : boost::unordered::detail::functions<typename Types::hasher, typename Types::key_equal> { private: table(table const&); table& operator=(table const&); public: typedef typename Types::hasher hasher; typedef typename Types::key_equal key_equal; typedef typename Types::const_key_type const_key_type; typedef typename Types::extractor extractor; typedef typename Types::value_type value_type; typedef typename Types::table table_impl; typedef boost::unordered::detail::functions<typename Types::hasher, typename Types::key_equal> functions; typedef typename Types::value_allocator value_allocator; typedef typename boost::allocator_void_pointer<value_allocator>::type void_pointer; typedef node<value_type, void_pointer> node_type; typedef boost::unordered::detail::grouped_bucket_array< bucket<node_type, void_pointer>, value_allocator, prime_fmod_size<> > bucket_array_type; typedef typename bucket_array_type::node_allocator_type node_allocator_type; typedef typename boost::allocator_pointer<node_allocator_type>::type node_pointer; typedef boost::unordered::detail::node_constructor<node_allocator_type> node_constructor; typedef boost::unordered::detail::node_tmp<node_allocator_type> node_tmp; typedef typename bucket_array_type::bucket_type bucket_type; typedef typename bucket_array_type::iterator bucket_iterator; typedef typename bucket_array_type::local_iterator l_iterator; typedef typename bucket_array_type::const_local_iterator cl_iterator; typedef std::size_t size_type; typedef iterator_detail::iterator<node_type, bucket_type> iterator; typedef iterator_detail::c_iterator<node_type, bucket_type> c_iterator; typedef std::pair<iterator, bool> emplace_return; //////////////////////////////////////////////////////////////////////// // Members std::size_t size_; float mlf_; std::size_t max_load_; bucket_array_type buckets_; public: //////////////////////////////////////////////////////////////////////// // Data access size_type bucket_count() const { return buckets_.bucket_count(); } template <class Key> iterator next_group(Key const& k, c_iterator n) const { c_iterator last = this->end(); while (n != last && this->key_eq()(k, extractor::extract(*n))) { ++n; } return iterator(n.p, n.itb); } template <class Key> std::size_t group_count(Key const& k) const { if (size_ == 0) { return 0; } std::size_t c = 0; std::size_t const key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); bool found = false; for (node_pointer pos = itb->next; pos; pos = pos->next) { if (this->key_eq()(k, this->get_key(pos))) { ++c; found = true; } else if (found) { break; } } return c; } node_allocator_type const& node_alloc() const { return buckets_.get_node_allocator(); } node_allocator_type& node_alloc() { return buckets_.get_node_allocator(); } std::size_t max_bucket_count() const { typedef typename bucket_array_type::size_policy size_policy; return size_policy::size(size_policy::size_index( boost::allocator_max_size(this->node_alloc()))); } iterator begin() const { if (size_ == 0) { return end(); } bucket_iterator itb = buckets_.begin(); return iterator(itb->next, itb); } iterator end() const { return iterator(); } l_iterator begin(std::size_t bucket_index) const { return buckets_.begin(bucket_index); } std::size_t hash_to_bucket(std::size_t hash_value) const { return buckets_.position(hash_value); } std::size_t bucket_size(std::size_t index) const { std::size_t count = 0; if (size_ > 0) { bucket_iterator itb = buckets_.at(index); node_pointer n = itb->next; while (n) { ++count; n = n->next; } } return count; } //////////////////////////////////////////////////////////////////////// // Load methods void recalculate_max_load() { // From 6.3.1/13: // Only resize when size >= mlf_ * count std::size_t const bc = buckets_.bucket_count(); // it's important we do the `bc == 0` check here because the `mlf_` // can be specified to be infinity. The operation `n * INF` is `INF` // for all `n > 0` but NaN for `n == 0`. // max_load_ = bc == 0 ? 0 : boost::unordered::detail::double_to_size( static_cast<double>(mlf_) * static_cast<double>(bc)); } void max_load_factor(float z) { BOOST_ASSERT(z > 0); mlf_ = (std::max)(z, minimum_max_load_factor); recalculate_max_load(); } //////////////////////////////////////////////////////////////////////// // Constructors table() : functions(hasher(), key_equal()), size_(0), mlf_(1.0f), max_load_(0) { } table(std::size_t num_buckets, hasher const& hf, key_equal const& eq, value_allocator const& a) : functions(hf, eq), size_(0), mlf_(1.0f), max_load_(0), buckets_(num_buckets, a) { recalculate_max_load(); } table(table const& x, value_allocator const& a) : functions(x), size_(0), mlf_(x.mlf_), max_load_(0), buckets_(x.size_, a) { recalculate_max_load(); } table(table& x, boost::unordered::detail::move_tag m) : functions(x, m), size_(x.size_), mlf_(x.mlf_), max_load_(x.max_load_), buckets_(std::move(x.buckets_)) { x.size_ = 0; x.max_load_ = 0; } table(table& x, value_allocator const& a, boost::unordered::detail::move_tag m) : functions(x, m), size_(0), mlf_(x.mlf_), max_load_(0), buckets_(x.bucket_count(), a) { recalculate_max_load(); } //////////////////////////////////////////////////////////////////////// // Swap and Move void swap_allocators(table& other, std::false_type) { boost::unordered::detail::func::ignore_unused_variable_warning(other); // According to 23.2.1.8, if propagate_on_container_swap is // false the behaviour is undefined unless the allocators // are equal. BOOST_ASSERT(node_alloc() == other.node_alloc()); } // Not nothrow swappable void swap(table& x, std::false_type) { if (this == &x) { return; } this->construct_spare_functions(x.current_functions()); BOOST_TRY { x.construct_spare_functions(this->current_functions()); } BOOST_CATCH(...) { this->cleanup_spare_functions(); BOOST_RETHROW } BOOST_CATCH_END this->switch_functions(); x.switch_functions(); buckets_.swap(x.buckets_); boost::core::invoke_swap(size_, x.size_); std::swap(mlf_, x.mlf_); std::swap(max_load_, x.max_load_); } // Nothrow swappable void swap(table& x, std::true_type) { buckets_.swap(x.buckets_); boost::core::invoke_swap(size_, x.size_); std::swap(mlf_, x.mlf_); std::swap(max_load_, x.max_load_); this->current_functions().swap(x.current_functions()); } // Only swaps the allocators if propagate_on_container_swap. // If not propagate_on_container_swap and allocators aren't // equal, behaviour is undefined. void swap(table& x) { BOOST_ASSERT(boost::allocator_propagate_on_container_swap< node_allocator_type>::type::value || node_alloc() == x.node_alloc()); swap(x, std::integral_constant<bool, functions::nothrow_swappable>()); } // Only call with nodes allocated with the currect allocator, or // one that is equal to it. (Can't assert because other's // allocators might have already been moved). void move_buckets_from(table& other) { buckets_ = std::move(other.buckets_); size_ = other.size_; max_load_ = other.max_load_; other.size_ = 0; other.max_load_ = 0; } // For use in the constructor when allocators might be different. void move_construct_buckets(table& src) { if (this->node_alloc() == src.node_alloc()) { move_buckets_from(src); return; } if (src.size_ == 0) { return; } BOOST_ASSERT(buckets_.bucket_count() == src.buckets_.bucket_count()); this->reserve(src.size_); for (iterator pos = src.begin(); pos != src.end(); ++pos) { node_tmp b(detail::func::construct_node( this->node_alloc(), std::move(pos.p->value())), this->node_alloc()); const_key_type& k = this->get_key(b.node_); std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); buckets_.insert_node(itb, b.release()); ++size_; } } //////////////////////////////////////////////////////////////////////// // Delete/destruct ~table() { delete_buckets(); } void delete_node(node_pointer p) { node_allocator_type alloc = this->node_alloc(); value_allocator val_alloc(alloc); boost::allocator_destroy(val_alloc, p->value_ptr()); boost::unordered::detail::func::destroy(boost::to_address(p)); boost::allocator_deallocate(alloc, p, 1); } void delete_buckets() { iterator pos = begin(), last = this->end(); for (; pos != last;) { node_pointer p = pos.p; bucket_iterator itb = pos.itb; ++pos; buckets_.extract_node(itb, p); delete_node(p); --size_; } buckets_.clear(); } //////////////////////////////////////////////////////////////////////// // Clear void clear_impl(); //////////////////////////////////////////////////////////////////////// // Assignment template <typename UniqueType> void assign(table const& x, UniqueType is_unique) { typedef typename boost::allocator_propagate_on_container_copy_assignment< node_allocator_type>::type pocca; if (this != &x) { assign(x, is_unique, std::integral_constant<bool, pocca::value>()); } } template <typename UniqueType> void assign(table const& x, UniqueType is_unique, std::false_type) { // Strong exception safety. this->construct_spare_functions(x.current_functions()); BOOST_TRY { mlf_ = x.mlf_; recalculate_max_load(); this->reserve_for_insert(x.size_); this->clear_impl(); } BOOST_CATCH(...) { this->cleanup_spare_functions(); BOOST_RETHROW } BOOST_CATCH_END this->switch_functions(); copy_buckets(x, is_unique); } template <typename UniqueType> void assign(table const& x, UniqueType is_unique, std::true_type) { if (node_alloc() == x.node_alloc()) { buckets_.reset_allocator(x.node_alloc()); assign(x, is_unique, std::false_type()); } else { bucket_array_type new_buckets(x.size_, x.node_alloc()); this->construct_spare_functions(x.current_functions()); this->switch_functions(); // Delete everything with current allocators before assigning // the new ones. delete_buckets(); buckets_.reset_allocator(x.node_alloc()); buckets_ = std::move(new_buckets); // Copy over other data, all no throw. mlf_ = x.mlf_; reserve(x.size_); // Finally copy the elements. if (x.size_) { copy_buckets(x, is_unique); } } } template <typename UniqueType> void move_assign(table& x, UniqueType is_unique) { if (this != &x) { move_assign(x, is_unique, std::integral_constant<bool, boost::allocator_propagate_on_container_move_assignment< node_allocator_type>::type::value>()); } } // Propagate allocator template <typename UniqueType> void move_assign(table& x, UniqueType, std::true_type) { if (!functions::nothrow_move_assignable) { this->construct_spare_functions(x.current_functions()); this->switch_functions(); } else { this->current_functions().move_assign(x.current_functions()); } delete_buckets(); buckets_.reset_allocator(x.buckets_.get_node_allocator()); mlf_ = x.mlf_; move_buckets_from(x); } // Don't propagate allocator template <typename UniqueType> void move_assign(table& x, UniqueType is_unique, std::false_type) { if (node_alloc() == x.node_alloc()) { move_assign_equal_alloc(x); } else { move_assign_realloc(x, is_unique); } } void move_assign_equal_alloc(table& x) { if (!functions::nothrow_move_assignable) { this->construct_spare_functions(x.current_functions()); this->switch_functions(); } else { this->current_functions().move_assign(x.current_functions()); } delete_buckets(); mlf_ = x.mlf_; move_buckets_from(x); } template <typename UniqueType> void move_assign_realloc(table& x, UniqueType is_unique) { this->construct_spare_functions(x.current_functions()); BOOST_TRY { mlf_ = x.mlf_; recalculate_max_load(); if (x.size_ > 0) { this->reserve_for_insert(x.size_); } this->clear_impl(); } BOOST_CATCH(...) { this->cleanup_spare_functions(); BOOST_RETHROW } BOOST_CATCH_END this->switch_functions(); move_assign_buckets(x, is_unique); } // Accessors const_key_type& get_key(node_pointer n) const { return extractor::extract(n->value()); } template <class Key> std::size_t hash(Key const& k) const { return this->hash_function()(k); } // Find Node template <class Key> node_pointer find_node_impl(Key const& x, bucket_iterator itb) const { node_pointer p = node_pointer(); if (itb != buckets_.end()) { key_equal const& pred = this->key_eq(); p = itb->next; for (; p; p = p->next) { if (pred(x, extractor::extract(p->value()))) { break; } } } return p; } template <class Key> node_pointer find_node(Key const& k) const { std::size_t const key_hash = this->hash(k); return find_node_impl(k, buckets_.at(buckets_.position(key_hash))); } node_pointer find_node(const_key_type& k, bucket_iterator itb) const { return find_node_impl(k, itb); } template <class Key> iterator find(Key const& k) const { return this->transparent_find( k, this->hash_function(), this->key_eq()); } template <class Key, class Hash, class Pred> inline iterator transparent_find( Key const& k, Hash const& h, Pred const& pred) const { if (size_ > 0) { std::size_t const key_hash = h(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); for (node_pointer p = itb->next; p; p = p->next) { if (BOOST_LIKELY(pred(k, extractor::extract(p->value())))) { return iterator(p, itb); } } } return this->end(); } template <class Key> node_pointer* find_prev(Key const& key, bucket_iterator itb) { if (size_ > 0) { key_equal pred = this->key_eq(); for (node_pointer* pp = std::addressof(itb->next); *pp; pp = std::addressof((*pp)->next)) { if (pred(key, extractor::extract((*pp)->value()))) { return pp; } } } typedef node_pointer* node_pointer_pointer; return node_pointer_pointer(); } // Extract and erase template <class Key> node_pointer extract_by_key_impl(Key const& k) { iterator it = this->find(k); if (it == this->end()) { return node_pointer(); } buckets_.extract_node(it.itb, it.p); --size_; return it.p; } // Reserve and rehash void transfer_node( node_pointer p, bucket_type&, bucket_array_type& new_buckets) { const_key_type& key = extractor::extract(p->value()); std::size_t const h = this->hash(key); bucket_iterator itnewb = new_buckets.at(new_buckets.position(h)); new_buckets.insert_node(itnewb, p); } static std::size_t min_buckets(std::size_t num_elements, float mlf) { std::size_t num_buckets = static_cast<std::size_t>( std::ceil(static_cast<float>(num_elements) / mlf)); if (num_buckets == 0 && num_elements > 0) { // mlf == inf num_buckets = 1; } return num_buckets; } void rehash(std::size_t); void reserve(std::size_t); void reserve_for_insert(std::size_t); void rehash_impl(std::size_t); //////////////////////////////////////////////////////////////////////// // Unique keys // equals bool equals_unique(table const& other) const { if (this->size_ != other.size_) return false; c_iterator pos = this->begin(); c_iterator last = this->end(); while (pos != last) { node_pointer p = pos.p; node_pointer p2 = other.find_node(this->get_key(p)); if (!p2 || !(p->value() == p2->value())) { return false; } ++pos; } return true; } // Emplace/Insert template <typename... Args> iterator emplace_hint_unique( c_iterator hint, const_key_type& k, Args&&... args) { if (hint.p && this->key_eq()(k, this->get_key(hint.p))) { return iterator(hint.p, hint.itb); } else { return emplace_unique(k, std::forward<Args>(args)...).first; } } template <typename... Args> emplace_return emplace_unique(const_key_type& k, Args&&... args) { std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer pos = this->find_node_impl(k, itb); if (pos) { return emplace_return(iterator(pos, itb), false); } else { node_tmp b(boost::unordered::detail::func::construct_node_from_args( this->node_alloc(), std::forward<Args>(args)...), this->node_alloc()); if (size_ + 1 > max_load_) { reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } node_pointer p = b.release(); buckets_.insert_node(itb, p); ++size_; return emplace_return(iterator(p, itb), true); } } template <typename... Args> iterator emplace_hint_unique(c_iterator hint, no_key, Args&&... args) { node_tmp b(boost::unordered::detail::func::construct_node_from_args( this->node_alloc(), std::forward<Args>(args)...), this->node_alloc()); const_key_type& k = this->get_key(b.node_); if (hint.p && this->key_eq()(k, this->get_key(hint.p))) { return iterator(hint.p, hint.itb); } std::size_t const key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer p = this->find_node_impl(k, itb); if (p) { return iterator(p, itb); } if (size_ + 1 > max_load_) { this->reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } p = b.release(); buckets_.insert_node(itb, p); ++size_; return iterator(p, itb); } template <typename... Args> emplace_return emplace_unique(no_key, Args&&... args) { node_tmp b(boost::unordered::detail::func::construct_node_from_args( this->node_alloc(), std::forward<Args>(args)...), this->node_alloc()); const_key_type& k = this->get_key(b.node_); std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer pos = this->find_node_impl(k, itb); if (pos) { return emplace_return(iterator(pos, itb), false); } else { if (size_ + 1 > max_load_) { reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } node_pointer p = b.release(); buckets_.insert_node(itb, p); ++size_; return emplace_return(iterator(p, itb), true); } } template <typename K, typename V> emplace_return emplace_unique(converting_key, K&& k, V&& v) { using alloc_cted = allocator_constructed<node_allocator_type, typename Types::key_type>; alloc_cted key(this->node_alloc(), std::forward<K>(k)); return emplace_unique( key.value(), std::move(key.value()), std::forward<V>(v)); } template <typename Key> emplace_return try_emplace_unique(Key&& k) { std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer pos = this->find_node_impl(k, itb); if (pos) { return emplace_return(iterator(pos, itb), false); } else { node_allocator_type alloc = node_alloc(); value_type* dispatch = BOOST_NULLPTR; node_tmp tmp(detail::func::construct_node_from_key( dispatch, alloc, std::forward<Key>(k)), alloc); if (size_ + 1 > max_load_) { reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } node_pointer p = tmp.release(); buckets_.insert_node(itb, p); ++size_; return emplace_return(iterator(p, itb), true); } } template <typename Key> iterator try_emplace_hint_unique(c_iterator hint, Key&& k) { if (hint.p && this->key_eq()(extractor::extract(*hint), k)) { return iterator(hint.p, hint.itb); } else { return try_emplace_unique(k).first; } } template <typename Key, typename... Args> emplace_return try_emplace_unique(Key&& k, Args&&... args) { std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer pos = this->find_node_impl(k, itb); if (pos) { return emplace_return(iterator(pos, itb), false); } node_tmp b( boost::unordered::detail::func::construct_node_pair_from_args( this->node_alloc(), k, std::forward<Args>(args)...), this->node_alloc()); if (size_ + 1 > max_load_) { reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } pos = b.release(); buckets_.insert_node(itb, pos); ++size_; return emplace_return(iterator(pos, itb), true); } template <typename Key, typename... Args> iterator try_emplace_hint_unique( c_iterator hint, Key&& k, Args&&... args) { if (hint.p && this->key_eq()(hint->first, k)) { return iterator(hint.p, hint.itb); } else { return try_emplace_unique(k, std::forward<Args>(args)...).first; } } template <typename Key, typename M> emplace_return insert_or_assign_unique(Key&& k, M&& obj) { std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer p = this->find_node_impl(k, itb); if (p) { p->value().second = std::forward<M>(obj); return emplace_return(iterator(p, itb), false); } node_tmp b( boost::unordered::detail::func::construct_node_pair( this->node_alloc(), std::forward<Key>(k), std::forward<M>(obj)), node_alloc()); if (size_ + 1 > max_load_) { reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } p = b.release(); buckets_.insert_node(itb, p); ++size_; return emplace_return(iterator(p, itb), true); } template <typename NodeType, typename InsertReturnType> void move_insert_node_type_unique( NodeType& np, InsertReturnType& result) { if (!np) { result.position = this->end(); result.inserted = false; return; } const_key_type& k = this->get_key(np.ptr_); std::size_t const key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer p = this->find_node_impl(k, itb); if (p) { iterator pos(p, itb); result.node = std::move(np); result.position = pos; result.inserted = false; return; } this->reserve_for_insert(size_ + 1); p = np.ptr_; itb = buckets_.at(buckets_.position(key_hash)); buckets_.insert_node(itb, p); np.ptr_ = node_pointer(); ++size_; result.position = iterator(p, itb); result.inserted = true; } template <typename NodeType> iterator move_insert_node_type_with_hint_unique( c_iterator hint, NodeType& np) { if (!np) { return this->end(); } const_key_type& k = this->get_key(np.ptr_); if (hint.p && this->key_eq()(k, this->get_key(hint.p))) { return iterator(hint.p, hint.itb); } std::size_t const key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer p = this->find_node_impl(k, itb); if (p) { return iterator(p, itb); } p = np.ptr_; if (size_ + 1 > max_load_) { this->reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } buckets_.insert_node(itb, p); ++size_; np.ptr_ = node_pointer(); return iterator(p, itb); } template <typename Types2> void merge_unique(boost::unordered::detail::table<Types2>& other) { typedef boost::unordered::detail::table<Types2> other_table; BOOST_UNORDERED_STATIC_ASSERT( (std::is_same<node_type, typename other_table::node_type>::value)); BOOST_ASSERT(this->node_alloc() == other.node_alloc()); if (other.size_ == 0) { return; } this->reserve_for_insert(size_ + other.size_); iterator last = other.end(); for (iterator pos = other.begin(); pos != last;) { const_key_type& key = other.get_key(pos.p); std::size_t const key_hash = this->hash(key); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); if (this->find_node_impl(key, itb)) { ++pos; continue; } iterator old = pos; ++pos; node_pointer p = other.extract_by_iterator_unique(old); buckets_.insert_node(itb, p); ++size_; } } //////////////////////////////////////////////////////////////////////// // Insert range methods // // if hash function throws, or inserting > 1 element, basic exception // safety strong otherwise template <class InputIt> void insert_range_unique(no_key, InputIt i, InputIt j) { hasher const& hf = this->hash_function(); node_allocator_type alloc = this->node_alloc(); for (; i != j; ++i) { node_tmp tmp(detail::func::construct_node(alloc, *i), alloc); value_type const& value = tmp.node_->value(); const_key_type& key = extractor::extract(value); std::size_t const h = hf(key); bucket_iterator itb = buckets_.at(buckets_.position(h)); node_pointer it = find_node_impl(key, itb); if (it) { continue; } if (size_ + 1 > max_load_) { reserve(size_ + 1); itb = buckets_.at(buckets_.position(h)); } node_pointer nptr = tmp.release(); buckets_.insert_node(itb, nptr); ++size_; } } //////////////////////////////////////////////////////////////////////// // Extract inline node_pointer extract_by_iterator_unique(c_iterator i) { node_pointer p = i.p; bucket_iterator itb = i.itb; buckets_.extract_node(itb, p); --size_; return p; } //////////////////////////////////////////////////////////////////////// // Erase // template <class Key> std::size_t erase_key_unique_impl(Key const& k) { bucket_iterator itb = buckets_.at(buckets_.position(this->hash(k))); node_pointer* pp = this->find_prev(k, itb); if (!pp) { return 0; } node_pointer p = *pp; buckets_.extract_node_after(itb, pp); this->delete_node(p); --size_; return 1; } iterator erase_node(c_iterator pos) { c_iterator next = pos; ++next; bucket_iterator itb = pos.itb; node_pointer* pp = std::addressof(itb->next); while (*pp != pos.p) { pp = std::addressof((*pp)->next); } buckets_.extract_node_after(itb, pp); this->delete_node(pos.p); --size_; return iterator(next.p, next.itb); } iterator erase_nodes_range(c_iterator first, c_iterator last) { if (first == last) { return iterator(last.p, last.itb); } // though `first` stores of a copy of a pointer to a node, we wish to // mutate the pointers stored internally by the singly-linked list in // each bucket group so we have to retrieve it manually by iterating // bucket_iterator itb = first.itb; node_pointer* pp = std::addressof(itb->next); while (*pp != first.p) { pp = std::addressof((*pp)->next); } while (*pp != last.p) { node_pointer p = *pp; *pp = (*pp)->next; this->delete_node(p); --size_; bool const at_end = !(*pp); bool const is_empty_bucket = !itb->next; if (at_end) { if (is_empty_bucket) { buckets_.unlink_bucket(itb++); } else { ++itb; } pp = std::addressof(itb->next); } } return iterator(last.p, last.itb); } //////////////////////////////////////////////////////////////////////// // fill_buckets_unique void copy_buckets(table const& src, std::true_type) { BOOST_ASSERT(size_ == 0); this->reserve_for_insert(src.size_); for (iterator pos = src.begin(); pos != src.end(); ++pos) { value_type const& value = *pos; const_key_type& key = extractor::extract(value); std::size_t const key_hash = this->hash(key); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_allocator_type alloc = this->node_alloc(); node_tmp tmp(detail::func::construct_node(alloc, value), alloc); buckets_.insert_node(itb, tmp.release()); ++size_; } } void move_assign_buckets(table& src, std::true_type) { BOOST_ASSERT(size_ == 0); BOOST_ASSERT(max_load_ >= src.size_); iterator last = src.end(); node_allocator_type alloc = this->node_alloc(); for (iterator pos = src.begin(); pos != last; ++pos) { value_type value = std::move(*pos); const_key_type& key = extractor::extract(value); std::size_t const key_hash = this->hash(key); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_tmp tmp( detail::func::construct_node(alloc, std::move(value)), alloc); buckets_.insert_node(itb, tmp.release()); ++size_; } } //////////////////////////////////////////////////////////////////////// // Equivalent keys // Equality bool equals_equiv(table const& other) const { if (this->size_ != other.size_) return false; iterator last = this->end(); for (iterator n1 = this->begin(); n1 != last;) { const_key_type& k = extractor::extract(*n1); iterator n2 = other.find(k); if (n2 == other.end()) { return false; } iterator end1 = this->next_group(k, n1); iterator end2 = other.next_group(k, n2); if (!group_equals_equiv(n1, end1, n2, end2)) { return false; } n1 = end1; } return true; } static bool group_equals_equiv( iterator n1, iterator end1, iterator n2, iterator end2) { for (;;) { if (*n1 != *n2) break; ++n1; ++n2; if (n1 == end1) return n2 == end2; if (n2 == end2) return false; } for (iterator n1a = n1, n2a = n2;;) { ++n1a; ++n2a; if (n1a == end1) { if (n2a == end2) break; else return false; } if (n2a == end2) return false; } iterator start = n1; for (; n1 != end1; ++n1) { value_type const& v = *n1; if (!find_equiv(start, n1, v)) { std::size_t matches = count_equal_equiv(n2, end2, v); if (!matches) return false; iterator t = n1; if (matches != 1 + count_equal_equiv(++t, end1, v)) return false; } } return true; } static bool find_equiv(iterator n, iterator last, value_type const& v) { for (; n != last; ++n) if (*n == v) return true; return false; } static std::size_t count_equal_equiv( iterator n, iterator last, value_type const& v) { std::size_t count = 0; for (; n != last; ++n) if (*n == v) ++count; return count; } // Emplace/Insert iterator emplace_equiv(node_pointer n) { node_tmp a(n, this->node_alloc()); const_key_type& k = this->get_key(a.node_); std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer hint = this->find_node_impl(k, itb); if (size_ + 1 > max_load_) { this->reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } node_pointer p = a.release(); buckets_.insert_node_hint(itb, p, hint); ++size_; return iterator(p, itb); } iterator emplace_hint_equiv(c_iterator hint, node_pointer n) { node_tmp a(n, this->node_alloc()); const_key_type& k = this->get_key(a.node_); bucket_iterator itb = hint.itb; node_pointer p = hint.p; std::size_t key_hash = 0u; bool const needs_rehash = (size_ + 1 > max_load_); bool const usable_hint = (p && this->key_eq()(k, this->get_key(p))); if (!usable_hint) { key_hash = this->hash(k); itb = buckets_.at(buckets_.position(key_hash)); p = this->find_node_impl(k, itb); } else if (usable_hint && needs_rehash) { key_hash = this->hash(k); } if (needs_rehash) { this->reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } a.release(); buckets_.insert_node_hint(itb, n, p); ++size_; return iterator(n, itb); } void emplace_no_rehash_equiv(node_pointer n) { BOOST_ASSERT(size_ + 1 <= max_load_); node_tmp a(n, this->node_alloc()); const_key_type& k = this->get_key(a.node_); std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer hint = this->find_node_impl(k, itb); node_pointer p = a.release(); buckets_.insert_node_hint(itb, p, hint); ++size_; } template <typename NodeType> iterator move_insert_node_type_equiv(NodeType& np) { iterator result; if (np) { this->reserve_for_insert(size_ + 1); const_key_type& k = this->get_key(np.ptr_); std::size_t key_hash = this->hash(k); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer hint = this->find_node_impl(k, itb); buckets_.insert_node_hint(itb, np.ptr_, hint); ++size_; result = iterator(np.ptr_, itb); np.ptr_ = node_pointer(); } return result; } template <typename NodeType> iterator move_insert_node_type_with_hint_equiv( c_iterator hint, NodeType& np) { iterator result; if (np) { bucket_iterator itb = hint.itb; node_pointer pos = hint.p; const_key_type& k = this->get_key(np.ptr_); std::size_t key_hash = this->hash(k); if (size_ + 1 > max_load_) { this->reserve(size_ + 1); itb = buckets_.at(buckets_.position(key_hash)); } if (hint.p && this->key_eq()(k, this->get_key(hint.p))) { } else { itb = buckets_.at(buckets_.position(key_hash)); pos = this->find_node_impl(k, itb); } buckets_.insert_node_hint(itb, np.ptr_, pos); ++size_; result = iterator(np.ptr_, itb); np.ptr_ = node_pointer(); } return result; } //////////////////////////////////////////////////////////////////////// // Insert range methods // if hash function throws, or inserting > 1 element, basic exception // safety. Strong otherwise template <class I> typename boost::unordered::detail::enable_if_forward<I, void>::type insert_range_equiv(I i, I j) { if (i == j) return; std::size_t distance = static_cast<std::size_t>(std::distance(i, j)); if (distance == 1) { emplace_equiv(boost::unordered::detail::func::construct_node( this->node_alloc(), *i)); } else { // Only require basic exception safety here this->reserve_for_insert(size_ + distance); for (; i != j; ++i) { emplace_no_rehash_equiv( boost::unordered::detail::func::construct_node( this->node_alloc(), *i)); } } } template <class I> typename boost::unordered::detail::disable_if_forward<I, void>::type insert_range_equiv(I i, I j) { for (; i != j; ++i) { emplace_equiv(boost::unordered::detail::func::construct_node( this->node_alloc(), *i)); } } //////////////////////////////////////////////////////////////////////// // Extract inline node_pointer extract_by_iterator_equiv(c_iterator n) { node_pointer p = n.p; bucket_iterator itb = n.itb; buckets_.extract_node(itb, p); --size_; return p; } //////////////////////////////////////////////////////////////////////// // Erase // // no throw template <class Key> std::size_t erase_key_equiv_impl(Key const& k) { std::size_t deleted_count = 0; bucket_iterator itb = buckets_.at(buckets_.position(this->hash(k))); node_pointer* pp = this->find_prev(k, itb); if (pp) { while (*pp && this->key_eq()(this->get_key(*pp), k)) { node_pointer p = *pp; *pp = (*pp)->next; this->delete_node(p); --size_; ++deleted_count; } if (!itb->next) { buckets_.unlink_bucket(itb); } } return deleted_count; } std::size_t erase_key_equiv(const_key_type& k) { return this->erase_key_equiv_impl(k); } //////////////////////////////////////////////////////////////////////// // fill_buckets void copy_buckets(table const& src, std::false_type) { BOOST_ASSERT(size_ == 0); this->reserve_for_insert(src.size_); iterator last = src.end(); for (iterator pos = src.begin(); pos != last; ++pos) { value_type const& value = *pos; const_key_type& key = extractor::extract(value); std::size_t const key_hash = this->hash(key); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_allocator_type alloc = this->node_alloc(); node_tmp tmp(detail::func::construct_node(alloc, value), alloc); node_pointer hint = this->find_node_impl(key, itb); buckets_.insert_node_hint(itb, tmp.release(), hint); ++size_; } } void move_assign_buckets(table& src, std::false_type) { BOOST_ASSERT(size_ == 0); BOOST_ASSERT(max_load_ >= src.size_); iterator last = src.end(); node_allocator_type alloc = this->node_alloc(); for (iterator pos = src.begin(); pos != last; ++pos) { value_type value = std::move(*pos); const_key_type& key = extractor::extract(value); std::size_t const key_hash = this->hash(key); bucket_iterator itb = buckets_.at(buckets_.position(key_hash)); node_pointer hint = this->find_node_impl(key, itb); node_tmp tmp( detail::func::construct_node(alloc, std::move(value)), alloc); buckets_.insert_node_hint(itb, tmp.release(), hint); ++size_; } } }; ////////////////////////////////////////////////////////////////////////// // Clear template <typename Types> inline void table<Types>::clear_impl() { bucket_iterator itb = buckets_.begin(), last = buckets_.end(); for (; itb != last;) { bucket_iterator next_itb = itb; ++next_itb; node_pointer* pp = std::addressof(itb->next); while (*pp) { node_pointer p = *pp; buckets_.extract_node_after(itb, pp); this->delete_node(p); --size_; } itb = next_itb; } } ////////////////////////////////////////////////////////////////////////// // Reserve & Rehash // if hash function throws, basic exception safety // strong otherwise. template <typename Types> inline void table<Types>::rehash(std::size_t num_buckets) { num_buckets = buckets_.bucket_count_for( (std::max)(min_buckets(size_, mlf_), num_buckets)); if (num_buckets != this->bucket_count()) { this->rehash_impl(num_buckets); } } template <class Types> inline void table<Types>::reserve(std::size_t num_elements) { std::size_t num_buckets = min_buckets(num_elements, mlf_); this->rehash(num_buckets); } template <class Types> inline void table<Types>::reserve_for_insert(std::size_t num_elements) { if (num_elements > max_load_) { std::size_t const num_buckets = static_cast<std::size_t>( 1.0f + std::ceil(static_cast<float>(num_elements) / mlf_)); this->rehash_impl(num_buckets); } } template <class Types> inline void table<Types>::rehash_impl(std::size_t num_buckets) { bucket_array_type new_buckets( num_buckets, buckets_.get_allocator()); BOOST_TRY { boost::unordered::detail::span<bucket_type> bspan = buckets_.raw(); bucket_type* pos = bspan.data; std::size_t size = bspan.size; bucket_type* last = pos + size; for (; pos != last; ++pos) { bucket_type& b = *pos; for (node_pointer p = b.next; p;) { node_pointer next_p = p->next; transfer_node(p, b, new_buckets); p = next_p; b.next = p; } } } BOOST_CATCH(...) { for (bucket_iterator pos = new_buckets.begin(); pos != new_buckets.end(); ++pos) { bucket_type& b = *pos; for (node_pointer p = b.next; p;) { node_pointer next_p = p->next; delete_node(p); --size_; p = next_p; } } buckets_.unlink_empty_buckets(); BOOST_RETHROW } BOOST_CATCH_END buckets_ = std::move(new_buckets); recalculate_max_load(); } #if defined(BOOST_MSVC) #pragma warning(pop) #endif //////////////////////////////////////////////////////////////////////// // key extractors // // no throw // // 'extract_key' is called with the emplace parameters to return a // key if available or 'no_key' is one isn't and will need to be // constructed. This could be done by overloading the emplace // implementation // for the different cases, but that's a bit tricky on compilers without // variadic templates. template <typename Key, typename T> struct is_key { template <typename T2> static choice1::type test(T2 const&); static choice2::type test(Key const&); enum { value = sizeof(test(boost::unordered::detail::make<T>())) == sizeof(choice2::type) }; typedef typename std::conditional<value, Key const&, no_key>::type type; }; template <class ValueType> struct set_extractor { typedef ValueType value_type; typedef ValueType key_type; static key_type const& extract(value_type const& v) { return v; } static key_type const& extract(value_type&& v) { return v; } static no_key extract() { return no_key(); } template <class Arg> static no_key extract(Arg const&) { return no_key(); } template <class Arg1, class Arg2, class... Args> static no_key extract(Arg1 const&, Arg2 const&, Args const&...) { return no_key(); } }; template <class ValueType> struct map_extractor { typedef ValueType value_type; typedef typename std::remove_const<typename boost::unordered::detail:: pair_traits<ValueType>::first_type>::type key_type; static key_type const& extract(value_type const& v) { return v.first; } template <class Second> static key_type const& extract(std::pair<key_type, Second> const& v) { return v.first; } template <class Second> static key_type const& extract( std::pair<key_type const, Second> const& v) { return v.first; } template <class Arg1> static key_type const& extract(key_type const& k, Arg1 const&) { return k; } static no_key extract() { return no_key(); } template <class Arg> static no_key extract(Arg const&) { return no_key(); } template <class Arg1, class Arg2> static typename std::conditional< (is_similar<Arg1, key_type>::value || is_complete_and_move_constructible<key_type>::value), converting_key, no_key>::type extract(Arg1 const&, Arg2 const&) { return {}; } template <class Arg1, class Arg2, class Arg3, class... Args> static no_key extract( Arg1 const&, Arg2 const&, Arg3 const&, Args const&...) { return no_key(); } template <template <class...> class Tuple, typename T2> static no_key extract( std::piecewise_construct_t, Tuple<> const&, T2 const&) { return no_key(); } template <template <typename...> class Tuple, typename T, typename T2, typename... Args> static auto extract( std::piecewise_construct_t, Tuple<T, Args...> const& k, T2 const&) -> typename std::enable_if< !std::is_same<T, boost::tuples::null_type>::value, typename is_key<key_type, T>::type>::type { using std::get; return typename is_key<key_type, T>::type(get<0>(k)); } }; template <class Container, class Predicate> typename Container::size_type erase_if(Container& c, Predicate& pred) { typedef typename Container::size_type size_type; typedef typename Container::iterator iterator; size_type const size = c.size(); for (iterator pos = c.begin(), last = c.end(); pos != last;) { if (pred(*pos)) { pos = c.erase(pos); } else { ++pos; } } return (size - c.size()); } } // namespace detail } // namespace unordered } // namespace boost #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/serialization_version.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_SERIALIZATION_VERSION_HPP #define BOOST_UNORDERED_DETAIL_SERIALIZATION_VERSION_HPP #include <boost/config.hpp> #include <boost/core/serialization.hpp> namespace boost{ namespace unordered{ namespace detail{ /* boost::serialization::load_construct_adl(ar,t,version) requires user code * to pass the serialization version for t, when this information is really * stored in the archive. serialization_version<T> circumvents this design * error by acting as a regular serializable type with the same serialization * version as T; loading/saving serialization_version<T> does nothing with * the archive data itself but captures the stored serialization version * at load() time. */ template<typename T> struct serialization_version { serialization_version(): value(boost::serialization::version<serialization_version>::value){} serialization_version& operator=(unsigned int x){value=x;return *this;}; operator unsigned int()const{return value;} private: friend class boost::serialization::access; template<class Archive> void serialize(Archive& ar,unsigned int version) { core::split_member(ar,*this,version); } template<class Archive> void save(Archive&,unsigned int)const{} template<class Archive> void load(Archive&,unsigned int version) { this->value=version; } unsigned int value; }; } /* namespace detail */ } /* namespace unordered */ namespace serialization{ template<typename T> struct version<boost::unordered::detail::serialization_version<T> > { BOOST_STATIC_CONSTANT(int,value=version<T>::value); }; } /* namespace serialization */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/narrow_cast.hpp
/* Copyright 2022 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_NARROW_CAST_HPP #define BOOST_UNORDERED_DETAIL_NARROW_CAST_HPP #include <boost/unordered/detail/static_assert.hpp> #include <boost/config.hpp> #include <type_traits> namespace boost{ namespace unordered{ namespace detail{ template<typename To,typename From> constexpr To narrow_cast(From x) noexcept { BOOST_UNORDERED_STATIC_ASSERT(std::is_integral<From>::value); BOOST_UNORDERED_STATIC_ASSERT(std::is_integral<To>::value); BOOST_UNORDERED_STATIC_ASSERT(sizeof(From)>=sizeof(To)); return static_cast<To>( x #if defined(__MSVC_RUNTIME_CHECKS) /* Avoids VS's "Run-Time Check Failure #1 - A cast to a smaller data type * has caused a loss of data." */ &static_cast<typename std::make_unsigned<To>::type>(~static_cast<To>(0)) #endif ); } } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/static_assert.hpp
// Copyright 2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_STATIC_ASSERT_HPP #define BOOST_UNORDERED_DETAIL_STATIC_ASSERT_HPP #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #define BOOST_UNORDERED_STATIC_ASSERT(...) \ static_assert(__VA_ARGS__, #__VA_ARGS__) #endif // BOOST_UNORDERED_DETAIL_STATIC_ASSERT_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/allocator_constructed.hpp
/* Copyright 2024 Braden Ganetsky. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_ALLOCATOR_CONSTRUCTED_HPP #define BOOST_UNORDERED_DETAIL_ALLOCATOR_CONSTRUCTED_HPP #include <boost/core/allocator_traits.hpp> #include <boost/unordered/detail/opt_storage.hpp> namespace boost { namespace unordered { namespace detail { struct allocator_policy { template <class Allocator, class T, class... Args> static void construct(Allocator& a, T* p, Args&&... args) { boost::allocator_construct(a, p, std::forward<Args>(args)...); } template <class Allocator, class T> static void destroy(Allocator& a, T* p) { boost::allocator_destroy(a, p); } }; /* constructs a stack-based object with the given policy and allocator */ template <class Allocator, class T, class Policy = allocator_policy> class allocator_constructed { opt_storage<T> storage; Allocator alloc; public: template <class... Args> allocator_constructed(Allocator const& alloc_, Args&&... args) : alloc(alloc_) { Policy::construct( alloc, storage.address(), std::forward<Args>(args)...); } ~allocator_constructed() { Policy::destroy(alloc, storage.address()); } T& value() { return *storage.address(); } }; } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/bad_archive_exception.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_BAD_ARCHIVE_EXCEPTION_HPP #define BOOST_UNORDERED_DETAIL_BAD_ARCHIVE_EXCEPTION_HPP #include <stdexcept> namespace boost{ namespace unordered{ namespace detail{ struct bad_archive_exception:std::runtime_error { bad_archive_exception():std::runtime_error("Invalid or corrupted archive"){} }; } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/map.hpp
// Copyright (C) 2005-2016 Daniel James // Copyright (C) 2022 Christian Mazakas // Copyright (C) 2024 Braden Ganetsky // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <boost/unordered/detail/implementation.hpp> #include <boost/unordered/unordered_map_fwd.hpp> namespace boost { namespace unordered { namespace detail { template <typename A, typename K, typename M, typename H, typename P> struct map { typedef boost::unordered::detail::map<A, K, M, H, P> types; typedef std::pair<K const, M> value_type; typedef H hasher; typedef P key_equal; typedef K key_type; typedef K const const_key_type; typedef typename ::boost::unordered::detail::rebind_wrap<A, value_type>::type value_allocator; typedef boost::unordered::detail::allocator_traits<value_allocator> value_allocator_traits; typedef boost::unordered::detail::table<types> table; typedef boost::unordered::detail::map_extractor<value_type> extractor; typedef typename boost::allocator_void_pointer<value_allocator>::type void_pointer; typedef boost::unordered::node_handle_map< node<value_type, void_pointer>, K, M, A> node_type; typedef typename table::iterator iterator; typedef boost::unordered::insert_return_type_map<iterator, node_type> insert_return_type; }; template <typename K, typename M, typename H, typename P, typename A> class instantiate_map { typedef boost::unordered_map<K, M, H, P, A> container; container x; typename container::node_type node_type; typename container::insert_return_type insert_return_type; }; template <typename K, typename M, typename H, typename P, typename A> class instantiate_multimap { typedef boost::unordered_multimap<K, M, H, P, A> container; container x; typename container::node_type node_type; }; } } }
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/type_traits.hpp
// Copyright (C) 2022-2023 Christian Mazakas // Copyright (C) 2024 Braden Ganetsky // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_TYPE_TRAITS_HPP #define BOOST_UNORDERED_DETAIL_TYPE_TRAITS_HPP #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/config/workaround.hpp> #if !defined(BOOST_NO_CXX17_DEDUCTION_GUIDES) #include <iterator> #endif #include <type_traits> #include <utility> // BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES #if !defined(BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES) #if !defined(BOOST_NO_CXX17_DEDUCTION_GUIDES) #define BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES 1 #endif #endif #if !defined(BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES) #define BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES 0 #endif namespace boost { namespace unordered { namespace detail { template <class T> struct type_identity { using type = T; }; template <typename... Ts> struct make_void { typedef void type; }; template <typename... Ts> using void_t = typename make_void<Ts...>::type; template <class T, class = void> struct is_complete : std::false_type { }; template <class T> struct is_complete<T, void_t<int[sizeof(T)]> > : std::true_type { }; template <class T> using is_complete_and_move_constructible = typename std::conditional<is_complete<T>::value, std::is_move_constructible<T>, std::false_type>::type; #if BOOST_WORKAROUND(BOOST_LIBSTDCXX_VERSION, < 50000) /* std::is_trivially_default_constructible not provided */ template <class T> struct is_trivially_default_constructible : public std::integral_constant<bool, std::is_default_constructible<T>::value && std::has_trivial_default_constructor<T>::value> { }; #else using std::is_trivially_default_constructible; #endif #if BOOST_WORKAROUND(BOOST_LIBSTDCXX_VERSION, < 50000) /* std::is_trivially_copy_constructible not provided */ template <class T> struct is_trivially_copy_constructible : public std::integral_constant<bool, std::is_copy_constructible<T>::value && std::has_trivial_copy_constructor<T>::value> { }; #else using std::is_trivially_copy_constructible; #endif #if BOOST_WORKAROUND(BOOST_LIBSTDCXX_VERSION, < 50000) /* std::is_trivially_copy_assignable not provided */ template <class T> struct is_trivially_copy_assignable : public std::integral_constant<bool, std::is_copy_assignable<T>::value && std::has_trivial_copy_assign<T>::value> { }; #else using std::is_trivially_copy_assignable; #endif namespace type_traits_detail { using std::swap; template <class T, class = void> struct is_nothrow_swappable_helper { constexpr static bool const value = false; }; template <class T> struct is_nothrow_swappable_helper<T, void_t<decltype(swap(std::declval<T&>(), std::declval<T&>()))> > { constexpr static bool const value = noexcept(swap(std::declval<T&>(), std::declval<T&>())); }; } // namespace type_traits_detail template <class T> struct is_nothrow_swappable : public std::integral_constant<bool, type_traits_detail::is_nothrow_swappable_helper<T>::value> { }; //////////////////////////////////////////////////////////////////////////// // Type checkers used for the transparent member functions added by C++20 // and up template <class, class = void> struct is_transparent : public std::false_type { }; template <class T> struct is_transparent<T, boost::unordered::detail::void_t<typename T::is_transparent> > : public std::true_type { }; template <class, class Hash, class KeyEqual> struct are_transparent { static bool const value = is_transparent<Hash>::value && is_transparent<KeyEqual>::value; }; template <class Key, class UnorderedMap> struct transparent_non_iterable { typedef typename UnorderedMap::hasher hash; typedef typename UnorderedMap::key_equal key_equal; typedef typename UnorderedMap::iterator iterator; typedef typename UnorderedMap::const_iterator const_iterator; static bool const value = are_transparent<Key, hash, key_equal>::value && !std::is_convertible<Key, iterator>::value && !std::is_convertible<Key, const_iterator>::value; }; template <class T> using remove_cvref_t = typename std::remove_cv<typename std::remove_reference<T>::type>::type; template <class T, class U> using is_similar = std::is_same<remove_cvref_t<T>, remove_cvref_t<U> >; template <class, class...> struct is_similar_to_any : std::false_type { }; template <class T, class U, class... Us> struct is_similar_to_any<T, U, Us...> : std::conditional<is_similar<T, U>::value, is_similar<T, U>, is_similar_to_any<T, Us...> >::type { }; #if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES // https://eel.is/c++draft/container.requirements#container.alloc.reqmts-34 // https://eel.is/c++draft/container.requirements#unord.req.general-243 template <class InputIterator> constexpr bool const is_input_iterator_v = !std::is_integral<InputIterator>::value; template <class A, class = void> struct is_allocator { constexpr static bool const value = false; }; template <class A> struct is_allocator<A, boost::unordered::detail::void_t<typename A::value_type, decltype(std::declval<A&>().allocate(std::size_t{}))> > { constexpr static bool const value = true; }; template <class A> constexpr bool const is_allocator_v = is_allocator<A>::value; template <class H> constexpr bool const is_hash_v = !std::is_integral<H>::value && !is_allocator_v<H>; template <class P> constexpr bool const is_pred_v = !is_allocator_v<P>; template <typename T> using iter_key_t = typename std::iterator_traits<T>::value_type::first_type; template <typename T> using iter_val_t = typename std::iterator_traits<T>::value_type::second_type; template <typename T> using iter_to_alloc_t = typename std::pair<iter_key_t<T> const, iter_val_t<T> >; #endif } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_TYPE_TRAITS_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/opt_storage.hpp
// Copyright (C) 2023 Christian Mazakas // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_OPT_STORAGE_HPP #define BOOST_UNORDERED_DETAIL_OPT_STORAGE_HPP #include <boost/config.hpp> #include <memory> namespace boost { namespace unordered { namespace detail { template <class T> union opt_storage { BOOST_ATTRIBUTE_NO_UNIQUE_ADDRESS T t_; opt_storage() {} ~opt_storage() {} T* address() noexcept { return std::addressof(t_); } T const* address() const noexcept { return std::addressof(t_); } }; } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_OPT_STORAGE_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/serialize_container.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_SERIALIZE_CONTAINER_HPP #define BOOST_UNORDERED_DETAIL_SERIALIZE_CONTAINER_HPP #include <boost/core/serialization.hpp> #include <boost/throw_exception.hpp> #include <boost/unordered/detail/archive_constructed.hpp> #include <boost/unordered/detail/bad_archive_exception.hpp> #include <boost/unordered/detail/serialization_version.hpp> #include <cstddef> namespace boost{ namespace unordered{ namespace detail{ /* serialize_container(ar,x,v) serializes any of the unordered associative * containers in Boost.Unordered. Iterator serialization is also supported * through the following protocol: * - At saving time, for each iterator it in [x.begin(),x.end()), * serialization_track(ar,it) is ADL-called to instruct the archive to * track the positions internally pointed to by the iterator via * track_address(). * - At loading time, these addresses are mapped to those of the equivalent * reconstructed positions using again serialization_track(ar,it). * - Serializing an iterator reduces to serializing pointers to previously * tracked addresses via serialize_address(). */ template<typename Iterator> std::pair<Iterator,bool> adapt_insert_return_type(Iterator it) { return std::pair<Iterator,bool>(it,true); } template<typename Iterator> std::pair<Iterator,bool> adapt_insert_return_type(std::pair<Iterator,bool> p) { return p; } template<typename Set,bool IsSaving> struct load_or_save_unordered_set; template<typename Set> struct load_or_save_unordered_set<Set,true> /* save */ { template<typename Archive> void operator()(Archive& ar,const Set& x,unsigned int)const { typedef typename Set::value_type value_type; typedef typename Set::const_iterator const_iterator; const std::size_t s=x.size(); const serialization_version<value_type> value_version; ar<<core::make_nvp("count",s); ar<<core::make_nvp("value_version",value_version); for(const_iterator first=x.begin(),last=x.end();first!=last;++first){ core::save_construct_data_adl(ar,std::addressof(*first),value_version); ar<<core::make_nvp("item",*first); serialization_track(ar,first); } } }; template<typename Set> struct load_or_save_unordered_set<Set,false> /* load */ { template<typename Archive> void operator()(Archive& ar,Set& x,unsigned int)const { typedef typename Set::value_type value_type; typedef typename Set::iterator iterator; std::size_t s; serialization_version<value_type> value_version; ar>>core::make_nvp("count",s); ar>>core::make_nvp("value_version",value_version); x.clear(); x.reserve(s); /* critical so that iterator tracking is stable */ for(std::size_t n=0;n<s;++n){ archive_constructed<value_type> value("item",ar,value_version); std::pair<iterator,bool> p=adapt_insert_return_type( x.insert(std::move(value.get()))); if(!p.second)throw_exception(bad_archive_exception()); ar.reset_object_address( std::addressof(*p.first),std::addressof(value.get())); serialization_track(ar,p.first); } } }; template<typename Map,bool IsSaving> struct load_or_save_unordered_map; template<typename Map> struct load_or_save_unordered_map<Map,true> /* save */ { template<typename Archive> void operator()(Archive& ar,const Map& x,unsigned int)const { typedef typename std::remove_const< typename Map::key_type>::type key_type; typedef typename std::remove_const< typename Map::mapped_type>::type mapped_type; typedef typename Map::const_iterator const_iterator; const std::size_t s=x.size(); const serialization_version<key_type> key_version; const serialization_version<mapped_type> mapped_version; ar<<core::make_nvp("count",s); ar<<core::make_nvp("key_version",key_version); ar<<core::make_nvp("mapped_version",mapped_version); for(const_iterator first=x.begin(),last=x.end();first!=last;++first){ /* To remain lib-independent from Boost.Serialization and not rely on * the user having included the serialization code for std::pair * (boost/serialization/utility.hpp), we serialize the key and the * mapped value separately. */ core::save_construct_data_adl( ar,std::addressof(first->first),key_version); ar<<core::make_nvp("key",first->first); core::save_construct_data_adl( ar,std::addressof(first->second),mapped_version); ar<<core::make_nvp("mapped",first->second); serialization_track(ar,first); } } }; template<typename Map> struct load_or_save_unordered_map<Map,false> /* load */ { template<typename Archive> void operator()(Archive& ar,Map& x,unsigned int)const { typedef typename std::remove_const< typename Map::key_type>::type key_type; typedef typename std::remove_const< typename Map::mapped_type>::type mapped_type; typedef typename Map::iterator iterator; std::size_t s; serialization_version<key_type> key_version; serialization_version<mapped_type> mapped_version; ar>>core::make_nvp("count",s); ar>>core::make_nvp("key_version",key_version); ar>>core::make_nvp("mapped_version",mapped_version); x.clear(); x.reserve(s); /* critical so that iterator tracking is stable */ for(std::size_t n=0;n<s;++n){ archive_constructed<key_type> key("key",ar,key_version); archive_constructed<mapped_type> mapped("mapped",ar,mapped_version); std::pair<iterator,bool> p=adapt_insert_return_type( x.emplace(std::move(key.get()),std::move(mapped.get()))); if(!p.second)throw_exception(bad_archive_exception()); ar.reset_object_address( std::addressof(p.first->first),std::addressof(key.get())); ar.reset_object_address( std::addressof(p.first->second),std::addressof(mapped.get())); serialization_track(ar,p.first); } } }; template<typename Container,bool IsSet,bool IsSaving> struct load_or_save_container; template<typename Set,bool IsSaving> struct load_or_save_container<Set,true,IsSaving>: load_or_save_unordered_set<Set,IsSaving>{}; template<typename Map,bool IsSaving> struct load_or_save_container<Map,false,IsSaving>: load_or_save_unordered_map<Map,IsSaving>{}; template<typename Archive,typename Container> void serialize_container(Archive& ar,Container& x,unsigned int version) { load_or_save_container< Container, std::is_same< typename Container::key_type,typename Container::value_type>::value, Archive::is_saving::value>()(ar,x,version); } } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/mulx.hpp
#ifndef BOOST_UNORDERED_DETAIL_MULX_HPP #define BOOST_UNORDERED_DETAIL_MULX_HPP // Copyright 2022 Peter Dimov. // Copyright 2022 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt) #include <boost/cstdint.hpp> #include <climits> #include <cstddef> #if defined(_MSC_VER) && !defined(__clang__) # include <intrin.h> #endif namespace boost { namespace unordered { namespace detail { // Bit mixer based on the mulx primitive #if defined(_MSC_VER) && defined(_M_X64) && !defined(__clang__) __forceinline boost::uint64_t mulx64( boost::uint64_t x, boost::uint64_t y ) { boost::uint64_t r2; boost::uint64_t r = _umul128( x, y, &r2 ); return r ^ r2; } #elif defined(_MSC_VER) && defined(_M_ARM64) && !defined(__clang__) __forceinline boost::uint64_t mulx64( boost::uint64_t x, boost::uint64_t y ) { boost::uint64_t r = x * y; boost::uint64_t r2 = __umulh( x, y ); return r ^ r2; } #elif defined(__SIZEOF_INT128__) inline boost::uint64_t mulx64( boost::uint64_t x, boost::uint64_t y ) { __uint128_t r = (__uint128_t)x * y; return (boost::uint64_t)r ^ (boost::uint64_t)( r >> 64 ); } #else inline boost::uint64_t mulx64( boost::uint64_t x, boost::uint64_t y ) { boost::uint64_t x1 = (boost::uint32_t)x; boost::uint64_t x2 = x >> 32; boost::uint64_t y1 = (boost::uint32_t)y; boost::uint64_t y2 = y >> 32; boost::uint64_t r3 = x2 * y2; boost::uint64_t r2a = x1 * y2; r3 += r2a >> 32; boost::uint64_t r2b = x2 * y1; r3 += r2b >> 32; boost::uint64_t r1 = x1 * y1; boost::uint64_t r2 = (r1 >> 32) + (boost::uint32_t)r2a + (boost::uint32_t)r2b; r1 = (r2 << 32) + (boost::uint32_t)r1; r3 += r2 >> 32; return r1 ^ r3; } #endif inline boost::uint32_t mulx32( boost::uint32_t x, boost::uint32_t y ) { boost::uint64_t r = (boost::uint64_t)x * y; #if defined(__MSVC_RUNTIME_CHECKS) return (boost::uint32_t)(r & UINT32_MAX) ^ (boost::uint32_t)(r >> 32); #else return (boost::uint32_t)r ^ (boost::uint32_t)(r >> 32); #endif } #if defined(SIZE_MAX) #if ((((SIZE_MAX >> 16) >> 16) >> 16) >> 15) != 0 #define BOOST_UNORDERED_64B_ARCHITECTURE /* >64 bits assumed as 64 bits */ #endif #elif defined(UINTPTR_MAX) /* used as proxy for std::size_t */ #if ((((UINTPTR_MAX >> 16) >> 16) >> 16) >> 15) != 0 #define BOOST_UNORDERED_64B_ARCHITECTURE #endif #endif inline std::size_t mulx( std::size_t x ) noexcept { #if defined(BOOST_UNORDERED_64B_ARCHITECTURE) // multiplier is phi return (std::size_t)mulx64( (boost::uint64_t)x, 0x9E3779B97F4A7C15ull ); #else /* 32 bits assumed */ // multiplier from https://arxiv.org/abs/2001.05304 return mulx32( x, 0xE817FB2Du ); #endif } #ifdef BOOST_UNORDERED_64B_ARCHITECTURE #undef BOOST_UNORDERED_64B_ARCHITECTURE #endif } // namespace detail } // namespace unordered } // namespace boost #endif // #ifndef BOOST_UNORDERED_DETAIL_MULX_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/throw_exception.hpp
// Copyright (C) 2023 Braden Ganetsky // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_THROW_EXCEPTION_HPP #define BOOST_UNORDERED_DETAIL_THROW_EXCEPTION_HPP #include <boost/config.hpp> #if defined(BOOST_HAS_PRAGMA_ONCE) #pragma once #endif #include <boost/throw_exception.hpp> #include <stdexcept> namespace boost { namespace unordered { namespace detail { BOOST_NOINLINE BOOST_NORETURN inline void throw_out_of_range( char const* message) { boost::throw_exception(std::out_of_range(message)); } } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_THROW_EXCEPTION_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/set.hpp
// Copyright (C) 2005-2016 Daniel James // Copyright (C) 2022 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <boost/unordered/detail/implementation.hpp> #include <boost/unordered/unordered_set_fwd.hpp> namespace boost { namespace unordered { namespace detail { template <typename A, typename T, typename H, typename P> struct set { typedef boost::unordered::detail::set<A, T, H, P> types; typedef T value_type; typedef H hasher; typedef P key_equal; typedef T const const_key_type; typedef typename ::boost::unordered::detail::rebind_wrap<A, value_type>::type value_allocator; typedef boost::unordered::detail::allocator_traits<value_allocator> value_allocator_traits; typedef boost::unordered::detail::table<types> table; typedef boost::unordered::detail::set_extractor<value_type> extractor; typedef typename boost::allocator_void_pointer<value_allocator>::type void_pointer; typedef boost::unordered::node_handle_set< node<value_type, void_pointer>, T, A> node_type; typedef typename table::c_iterator iterator; typedef boost::unordered::insert_return_type_set<iterator, node_type> insert_return_type; }; template <typename T, typename H, typename P, typename A> class instantiate_set { typedef boost::unordered_set<T, H, P, A> container; container x; typename container::node_type node_type; typename container::insert_return_type insert_return_type; }; template <typename T, typename H, typename P, typename A> class instantiate_multiset { typedef boost::unordered_multiset<T, H, P, A> container; container x; typename container::node_type node_type; }; } } }
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/archive_constructed.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_ARCHIVE_CONSTRUCTED_HPP #define BOOST_UNORDERED_DETAIL_ARCHIVE_CONSTRUCTED_HPP #include <boost/unordered/detail/opt_storage.hpp> #include <boost/config.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/core/noncopyable.hpp> #include <boost/core/serialization.hpp> namespace boost{ namespace unordered{ namespace detail{ /* constructs a stack-based object from a serialization archive */ template<typename T> struct archive_constructed:private noncopyable { template<class Archive> archive_constructed(const char* name,Archive& ar,unsigned int version) { core::load_construct_data_adl(ar,std::addressof(get()),version); BOOST_TRY{ ar>>core::make_nvp(name,get()); } BOOST_CATCH(...){ get().~T(); BOOST_RETHROW; } BOOST_CATCH_END } ~archive_constructed() { get().~T(); } #if defined(BOOST_GCC)&&(BOOST_GCC>=4*10000+6*100) #define BOOST_UNORDERED_IGNORE_WSTRICT_ALIASING #endif #if defined(BOOST_UNORDERED_IGNORE_WSTRICT_ALIASING) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" #endif T& get(){return *space.address();} #if defined(BOOST_UNORDERED_IGNORE_WSTRICT_ALIASING) #pragma GCC diagnostic pop #undef BOOST_UNORDERED_IGNORE_WSTRICT_ALIASING #endif private: opt_storage<T> space; }; } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/concurrent_static_asserts.hpp
/* Copyright 2023 Christian Mazakas. * Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_CONCURRENT_STATIC_ASSERTS_HPP #define BOOST_UNORDERED_DETAIL_CONCURRENT_STATIC_ASSERTS_HPP #include <boost/config.hpp> #include <boost/mp11/algorithm.hpp> #include <boost/mp11/list.hpp> #include <functional> #include <iterator> #include <type_traits> #define BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F) \ static_assert(boost::unordered::detail::is_invocable<F, value_type&>::value, \ "The provided Callable must be invocable with value_type&"); #define BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) \ static_assert( \ boost::unordered::detail::is_invocable<F, value_type const&>::value, \ "The provided Callable must be invocable with value_type const&"); #if BOOST_CXX_VERSION >= 202002L #define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \ static_assert(!std::is_base_of<std::execution::parallel_unsequenced_policy, \ ExecPolicy>::value, \ "ExecPolicy must be sequenced."); \ static_assert( \ !std::is_base_of<std::execution::unsequenced_policy, ExecPolicy>::value, \ "ExecPolicy must be sequenced."); #else #define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \ static_assert(!std::is_base_of<std::execution::parallel_unsequenced_policy, \ ExecPolicy>::value, \ "ExecPolicy must be sequenced."); #endif #define BOOST_UNORDERED_DETAIL_COMMA , #define BOOST_UNORDERED_DETAIL_LAST_ARG(Arg, Args) \ mp11::mp_back<mp11::mp_list<Arg BOOST_UNORDERED_DETAIL_COMMA Args> > #define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args) \ BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE( \ BOOST_UNORDERED_DETAIL_LAST_ARG(Arg, Args)) #define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args) \ BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE( \ BOOST_UNORDERED_DETAIL_LAST_ARG(Arg, Args)) namespace boost { namespace unordered { namespace detail { template <class F, class... Args> struct is_invocable : std::is_constructible<std::function<void(Args...)>, std::reference_wrapper<typename std::remove_reference<F>::type> > { }; } // namespace detail } // namespace unordered } // namespace boost #if defined(BOOST_NO_CXX20_HDR_CONCEPTS) #define BOOST_UNORDERED_STATIC_ASSERT_FWD_ITERATOR(Iterator) \ static_assert( \ std::is_base_of< \ std::forward_iterator_tag, \ typename std::iterator_traits<Iterator>::iterator_category>::value, \ "The provided iterator must be at least forward"); #else #define BOOST_UNORDERED_STATIC_ASSERT_FWD_ITERATOR(Iterator) \ static_assert(std::forward_iterator<Iterator>, \ "The provided iterator must be at least forward"); #endif #define BOOST_UNORDERED_STATIC_ASSERT_KEY_COMPATIBLE_ITERATOR(Iterator) \ static_assert( \ std::is_same< \ typename std::iterator_traits<Iterator>::value_type, \ key_type>::value || \ detail::are_transparent< \ typename std::iterator_traits<Iterator>::value_type, \ hasher, key_equal>::value, \ "The provided iterator must dereference to a compatible key value"); #define BOOST_UNORDERED_STATIC_ASSERT_BULK_VISIT_ITERATOR(Iterator) \ BOOST_UNORDERED_STATIC_ASSERT_FWD_ITERATOR(Iterator) \ BOOST_UNORDERED_STATIC_ASSERT_KEY_COMPATIBLE_ITERATOR(Iterator) #endif // BOOST_UNORDERED_DETAIL_CONCURRENT_STATIC_ASSERTS_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/serialize_fca_container.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_SERIALIZE_FCA_CONTAINER_HPP #define BOOST_UNORDERED_DETAIL_SERIALIZE_FCA_CONTAINER_HPP #include <boost/unordered/detail/serialize_container.hpp> #if defined(BOOST_UNORDERED_ENABLE_SERIALIZATION_COMPATIBILITY_V0) #define BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER \ <boost/serialization/archive_input_unordered_map.hpp> #include BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #undef BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #define BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER \ <boost/serialization/archive_input_unordered_set.hpp> #include BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #undef BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #define BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER \ <boost/serialization/unordered_collections_load_imp.hpp> #include BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #undef BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #define BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER \ <boost/serialization/utility.hpp> #include BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #undef BOOST_UNORDERED_BLOCK_BOOSTDEP_HEADER #include <boost/unordered/unordered_map_fwd.hpp> #include <boost/unordered/unordered_set_fwd.hpp> #else #include <boost/throw_exception.hpp> #include <stdexcept> #endif namespace boost{ namespace unordered{ namespace detail{ /* Support for boost::unordered_[multi](map|set) loading from legacy archives. * Until Boost 1.84, serialization of these containers was provided from * Boost.Serialization via boost/serialization/boost_unordered_(map|set).hpp, * from that release on support is native in Boost.Unordered. To enable legacy * archive loading, BOOST_UNORDERED_ENABLE_SERIALIZATION_COMPATIBILITY_V0 * must be defined (it implies header dependency from Boost.Serialization). */ #if defined(BOOST_UNORDERED_ENABLE_SERIALIZATION_COMPATIBILITY_V0) template<typename Archive,typename Container> struct archive_input; template< typename Archive,typename K,typename T,typename H,typename P,typename A > struct archive_input<Archive,boost::unordered_map<K,T,H,P,A> >: boost::serialization::stl::archive_input_unordered_map< Archive, boost::unordered_map<K,T,H,P,A> > {}; template< typename Archive,typename K,typename T,typename H,typename P,typename A > struct archive_input<Archive,boost::unordered_multimap<K,T,H,P,A> >: boost::serialization::stl::archive_input_unordered_multimap< Archive, boost::unordered_multimap<K,T,H,P,A> > {}; template< typename Archive,typename K,typename H,typename P,typename A > struct archive_input<Archive,boost::unordered_set<K,H,P,A> >: boost::serialization::stl::archive_input_unordered_set< Archive, boost::unordered_set<K,H,P,A> > {}; template< typename Archive,typename K,typename H,typename P,typename A > struct archive_input<Archive,boost::unordered_multiset<K,H,P,A> >: boost::serialization::stl::archive_input_unordered_multiset< Archive, boost::unordered_multiset<K,H,P,A> > {}; #else struct legacy_archive_exception:std::runtime_error { legacy_archive_exception():std::runtime_error( "Legacy archive detected, define " "BOOST_UNORDERED_ENABLE_SERIALIZATION_COMPATIBILITY_V0 to load"){} }; #endif template<typename Container,bool IsSaving> struct load_or_save_fca_container; template<typename Container> struct load_or_save_fca_container<Container,true> /* save */ { template<typename Archive> void operator()(Archive& ar,Container& x,unsigned int version)const { serialize_container(ar,x,version); } }; template<typename Container> struct load_or_save_fca_container<Container,false> /* load */ { template<typename Archive> void operator()(Archive& ar,Container& x,unsigned int version)const { if(version==0){ #if defined(BOOST_UNORDERED_ENABLE_SERIALIZATION_COMPATIBILITY_V0) boost::serialization::stl::load_unordered_collection< Archive,Container,archive_input<Archive,Container> >(ar,x); #else throw_exception(legacy_archive_exception()); #endif } else{ serialize_container(ar,x,version); } } }; template<typename Archive,typename Container> void serialize_fca_container(Archive& ar,Container& x,unsigned int version) { load_or_save_fca_container<Container,Archive::is_saving::value>()( ar,x,version); } } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/prime_fmod.hpp
// Copyright (C) 2022 Joaquin M Lopez Munoz. // Copyright (C) 2022-2023 Christian Mazakas // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_PRIME_FMOD_HPP #define BOOST_UNORDERED_DETAIL_PRIME_FMOD_HPP #include <boost/unordered/detail/narrow_cast.hpp> #include <boost/config.hpp> #include <boost/cstdint.hpp> #include <climits> #include <cstddef> #if defined(SIZE_MAX) #if ((((SIZE_MAX >> 16) >> 16) >> 16) >> 15) != 0 #define BOOST_UNORDERED_FCA_HAS_64B_SIZE_T #endif #elif defined(UINTPTR_MAX) /* used as proxy for std::size_t */ #if ((((UINTPTR_MAX >> 16) >> 16) >> 16) >> 15) != 0 #define BOOST_UNORDERED_FCA_HAS_64B_SIZE_T #endif #endif #if defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) && defined(_MSC_VER) #include <intrin.h> #endif namespace boost { namespace unordered { namespace detail { template <class = void> struct prime_fmod_size { constexpr static std::size_t const sizes[] = {13ul, 29ul, 53ul, 97ul, 193ul, 389ul, 769ul, 1543ul, 3079ul, 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, #if !defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) 4294967291ul #else 6442450939ull, 12884901893ull, 25769803751ull, 51539607551ull, 103079215111ull, 206158430209ull, 412316860441ull, 824633720831ull, 1649267441651ull #endif }; constexpr static std::size_t const sizes_len = sizeof(sizes) / sizeof(sizes[0]); #if defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) constexpr static boost::uint64_t const inv_sizes32[] = { 1418980313362273202ull, 636094623231363849ull, 348051774975651918ull, 190172619316593316ull, 95578984837873325ull, 47420935922132524ull, 23987963684927896ull, 11955116055547344ull, 5991147799191151ull, 2998982941588287ull, 1501077717772769ull, 750081082979285ull, 375261795343686ull, 187625172388393ull, 93822606204624ull, 46909513691883ull, 23456218233098ull, 11728086747027ull, 5864041509391ull, 2932024948977ull, 1466014921160ull, 733007198436ull, 366503839517ull, 183251896093ull, 91625960335ull, 45812983922ull, 22906489714ull, 11453246088ull, 5726623060ull}; constexpr static std::size_t const inv_sizes32_len = sizeof(inv_sizes32) / sizeof(inv_sizes32[0]); #endif /* defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) */ template <std::size_t SizeIndex, std::size_t Size = sizes[SizeIndex]> static std::size_t position(std::size_t hash) { return hash % Size; } constexpr static std::size_t (*positions[])(std::size_t) = { #if !defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) position<0, sizes[0]>, position<1, sizes[1]>, position<2, sizes[2]>, position<3, sizes[3]>, position<4, sizes[4]>, position<5, sizes[5]>, position<6, sizes[6]>, position<7, sizes[7]>, position<8, sizes[8]>, position<9, sizes[9]>, position<10, sizes[10]>, position<11, sizes[11]>, position<12, sizes[12]>, position<13, sizes[13]>, position<14, sizes[14]>, position<15, sizes[15]>, position<16, sizes[16]>, position<17, sizes[17]>, position<18, sizes[18]>, position<19, sizes[19]>, position<20, sizes[20]>, position<21, sizes[21]>, position<22, sizes[22]>, position<23, sizes[23]>, position<24, sizes[24]>, position<25, sizes[25]>, position<26, sizes[26]>, position<27, sizes[27]>, position<28, sizes[28]>, position<29, sizes[29]>, #else position<29, sizes[29]>, position<30, sizes[30]>, position<31, sizes[31]>, position<32, sizes[32]>, position<33, sizes[33]>, position<34, sizes[34]>, position<35, sizes[35]>, position<36, sizes[36]>, position<37, sizes[37]>, #endif }; static inline std::size_t size_index(std::size_t n) { std::size_t i = 0; for (; i < (sizes_len - 1); ++i) { if (sizes[i] >= n) { break; } } return i; } static inline std::size_t size(std::size_t size_index) { return sizes[size_index]; } #if defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) // We emulate the techniques taken from: // Faster Remainder by Direct Computation: Applications to Compilers and // Software Libraries // https://arxiv.org/abs/1902.01961 // // In essence, use fancy math to directly calculate the remainder (aka // modulo) exploiting how compilers transform division // static inline boost::uint64_t get_remainder( boost::uint64_t fractional, boost::uint32_t d) { #if defined(_MSC_VER) // use MSVC intrinsics when available to avoid promotion to 128 bits return __umulh(fractional, d); #elif defined(BOOST_HAS_INT128) return static_cast<boost::uint64_t>( ((boost::uint128_type)fractional * d) >> 64); #else // portable implementation in the absence of boost::uint128_type on 64 // bits, which happens at least in GCC 4.5 and prior boost::uint64_t r1 = (fractional & UINT32_MAX) * d; boost::uint64_t r2 = (fractional >> 32) * d; r2 += r1 >> 32; return r2 >> 32; #endif /* defined(_MSC_VER) */ } static inline boost::uint32_t fast_modulo( boost::uint32_t a, boost::uint64_t M, boost::uint32_t d) { boost::uint64_t fractional = M * a; return (boost::uint32_t)(get_remainder(fractional, d)); } #endif /* defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) */ static inline std::size_t position( std::size_t hash, std::size_t size_index) { #if defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) std::size_t sizes_under_32bit = inv_sizes32_len; if (BOOST_LIKELY(size_index < sizes_under_32bit)) { return fast_modulo(narrow_cast<boost::uint32_t>(hash) + narrow_cast<boost::uint32_t>(hash >> 32), inv_sizes32[size_index], boost::uint32_t(sizes[size_index])); } else { return positions[size_index - sizes_under_32bit](hash); } #else return positions[size_index](hash); #endif /* defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) */ } }; // prime_fmod_size #if defined(BOOST_NO_CXX17_INLINE_VARIABLES) // https://en.cppreference.com/w/cpp/language/static#Constant_static_members // If a const non-inline (since C++17) static data member or a constexpr // static data member (since C++11)(until C++17) is odr-used, a definition // at namespace scope is still required, but it cannot have an // initializer. template <class T> constexpr std::size_t prime_fmod_size<T>::sizes[]; #if defined(BOOST_UNORDERED_FCA_HAS_64B_SIZE_T) template <class T> constexpr boost::uint64_t prime_fmod_size<T>::inv_sizes32[]; #endif template <class T> constexpr std::size_t (*prime_fmod_size<T>::positions[])(std::size_t); #endif } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_PRIME_FMOD_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/fca.hpp
// Copyright (C) 2022-2024 Joaquin M Lopez Munoz. // Copyright (C) 2022 Christian Mazakas // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_FCA_HPP #define BOOST_UNORDERED_DETAIL_FCA_HPP /* The general structure of the fast closed addressing implementation is that we use straight-forward separate chaining (i.e. each bucket contains its own linked list) and then improve iteration time by adding an array of "bucket groups". A bucket group is a constant-width view into a subsection of the buckets array, containing a bitmask that indicates which one of the buckets in the subsection contains a list of nodes. This allows the code to test N buckets for occupancy in a single operation. Additional speed can be found by inter-linking occupied bucket groups with one another in a doubly-linked list. To this end, large swathes of the bucket groups array no longer need to be iterated and have their bitmasks examined for occupancy. A bucket group iterator contains a pointer to a bucket group along with a pointer into the buckets array. The iterator's bucket pointer is guaranteed to point to a bucket within the bucket group's view of the array. To advance the iterator, we need to determine if we need to skip to the next bucket group or simply move to the next occupied bucket as denoted by the bitmask. To accomplish this, we perform something roughly equivalent to this: ``` bucket_iterator itb = ... bucket_pointer p = itb.p bucket_group_pointer pbg = itb.pbg offset = p - pbg->buckets // because we wish to see if the _next_ bit in the mask is occupied, we'll // generate a testing mask from the current offset + 1 // testing_mask = reset_first_bits(offset + 1) n = ctz(pbg->bitmask & testing_mask) if (n < N) { p = pbg->buckets + n } else { pbg = pbg->next p = pbg->buckets + ctz(pbg->bitmask) } ``` `reset_first_bits` yields an unsigned integral with the first n bits set to 0 and then by counting the number of trailing zeroes when AND'd against the bucket group's bitmask, we can derive the offset into the buckets array. When the calculated offset is equal to N, we know we've reached the end of a bucket group and we can advance to the next one. This is a rough explanation for how iterator incrementation should work for a fixed width size of N as 3 for the bucket groups ``` N = 3 p = buckets pbg->bitmask = 0b101 pbg->buckets = buckets offset = p - pbg->buckets // => 0 testing_mask = reset_first_bits(offset + 1) // reset_first_bits(1) => 0b110 x = bitmask & testing_mask // => 0b101 & 0b110 => 0b100 ctz(x) // ctz(0b100) => 2 // 2 < 3 => p = pbg->buckets + 2 // increment again... offset = p - pbg->buckets // => 2 testing_mask = reset_first_bits(offset + 1) // reset_first_bits(3) => 0b000 bitmask & testing_mask // 0b101 & 0b000 => 0b000 ctz(0b000) => 3 // 3 < 3 is false now pbg = pbg->next initial_offset = ctz(pbg->bitmask) p = pbg->buckets + initial_offset ``` For `size_` number of buckets, there are `1 + (size_ / N)` bucket groups where `N` is the width of a bucket group, determined at compile-time. We allocate space for `size_ + 1` buckets, using the last one as a dummy bucket which is kept permanently empty so it can act as a sentinel value in the implementation of `iterator end();`. We set the last bucket group to act as a sentinel. ``` num_groups = size_ / N + 1 groups = allocate(num_groups) pbg = groups + (num_groups - 1) // not guaranteed to point to exactly N buckets pbg->buckets = buckets + N * (size_ / N) // this marks the true end of the bucket array buckets pbg->bitmask = set_bit(size_ % N) // links in on itself pbg->next = pbg->prev = pbg ``` To this end, we can devise a safe iteration scheme while also creating a useful sentinel to use as the end iterator. Otherwise, usage of the data structure is relatively straight-forward compared to normal separate chaining implementations. */ #include <boost/unordered/detail/prime_fmod.hpp> #include <boost/unordered/detail/serialize_tracked_address.hpp> #include <boost/unordered/detail/opt_storage.hpp> #include <boost/assert.hpp> #include <boost/core/allocator_access.hpp> #include <boost/core/bit.hpp> #include <boost/core/empty_value.hpp> #include <boost/core/invoke_swap.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/core/serialization.hpp> #include <boost/cstdint.hpp> #include <boost/config.hpp> #include <iterator> namespace boost { namespace unordered { namespace detail { template <class ValueType, class VoidPtr> struct node { typedef ValueType value_type; typedef typename boost::pointer_traits<VoidPtr>::template rebind_to< node>::type node_pointer; node_pointer next; opt_storage<value_type> buf; node() noexcept : next(), buf() {} value_type* value_ptr() noexcept { return buf.address(); } value_type& value() noexcept { return *buf.address(); } }; template <class Node, class VoidPtr> struct bucket { typedef typename boost::pointer_traits<VoidPtr>::template rebind_to< Node>::type node_pointer; typedef typename boost::pointer_traits<VoidPtr>::template rebind_to< bucket>::type bucket_pointer; node_pointer next; bucket() noexcept : next() {} }; template <class Bucket> struct bucket_group { typedef typename Bucket::bucket_pointer bucket_pointer; typedef typename boost::pointer_traits<bucket_pointer>::template rebind_to< bucket_group>::type bucket_group_pointer; BOOST_STATIC_CONSTANT(std::size_t, N = sizeof(std::size_t) * CHAR_BIT); bucket_pointer buckets; std::size_t bitmask; bucket_group_pointer next, prev; bucket_group() noexcept : buckets(), bitmask(0), next(), prev() {} ~bucket_group() {} }; inline std::size_t set_bit(std::size_t n) { return std::size_t(1) << n; } inline std::size_t reset_bit(std::size_t n) { return ~(std::size_t(1) << n); } inline std::size_t reset_first_bits(std::size_t n) // n>0 { return ~(~(std::size_t(0)) >> (sizeof(std::size_t) * 8 - n)); } template <class Bucket> struct grouped_bucket_iterator { public: typedef typename Bucket::bucket_pointer bucket_pointer; typedef typename boost::pointer_traits<bucket_pointer>::template rebind_to< bucket_group<Bucket> >::type bucket_group_pointer; typedef Bucket value_type; typedef typename boost::pointer_traits<bucket_pointer>::difference_type difference_type; typedef Bucket& reference; typedef Bucket* pointer; typedef std::forward_iterator_tag iterator_category; private: bucket_pointer p; bucket_group_pointer pbg; public: grouped_bucket_iterator() : p(), pbg() {} reference operator*() const noexcept { return dereference(); } pointer operator->() const noexcept { return boost::to_address(p); } grouped_bucket_iterator& operator++() noexcept { increment(); return *this; } grouped_bucket_iterator operator++(int) noexcept { grouped_bucket_iterator old = *this; increment(); return old; } bool operator==(grouped_bucket_iterator const& other) const noexcept { return equal(other); } bool operator!=(grouped_bucket_iterator const& other) const noexcept { return !equal(other); } private: template <typename, typename, typename> friend class grouped_bucket_array; BOOST_STATIC_CONSTANT(std::size_t, N = bucket_group<Bucket>::N); grouped_bucket_iterator(bucket_pointer p_, bucket_group_pointer pbg_) : p(p_), pbg(pbg_) { } Bucket& dereference() const noexcept { return *p; } bool equal(const grouped_bucket_iterator& x) const noexcept { return p == x.p; } void increment() noexcept { std::size_t const offset = static_cast<std::size_t>(p - pbg->buckets); std::size_t n = std::size_t(boost::core::countr_zero( pbg->bitmask & reset_first_bits(offset + 1))); if (n < N) { p = pbg->buckets + static_cast<difference_type>(n); } else { pbg = pbg->next; std::ptrdiff_t x = boost::core::countr_zero(pbg->bitmask); p = pbg->buckets + x; } } template <typename Archive> friend void serialization_track( Archive& ar, grouped_bucket_iterator const& x) { // requires: not at end() position track_address(ar, x.p); track_address(ar, x.pbg); } friend class boost::serialization::access; template <typename Archive> void serialize(Archive& ar, unsigned int) { // requires: not at end() position serialize_tracked_address(ar, p); serialize_tracked_address(ar, pbg); } }; template <class Node> struct const_grouped_local_bucket_iterator; template <class Node> struct grouped_local_bucket_iterator { typedef typename Node::node_pointer node_pointer; public: typedef typename Node::value_type value_type; typedef value_type element_type; typedef value_type* pointer; typedef value_type& reference; typedef std::ptrdiff_t difference_type; typedef std::forward_iterator_tag iterator_category; grouped_local_bucket_iterator() : p() {} reference operator*() const noexcept { return dereference(); } pointer operator->() const noexcept { return std::addressof(dereference()); } grouped_local_bucket_iterator& operator++() noexcept { increment(); return *this; } grouped_local_bucket_iterator operator++(int) noexcept { grouped_local_bucket_iterator old = *this; increment(); return old; } bool operator==( grouped_local_bucket_iterator const& other) const noexcept { return equal(other); } bool operator!=( grouped_local_bucket_iterator const& other) const noexcept { return !equal(other); } private: template <typename, typename, typename> friend class grouped_bucket_array; template <class> friend struct const_grouped_local_bucket_iterator; grouped_local_bucket_iterator(node_pointer p_) : p(p_) {} value_type& dereference() const noexcept { return p->value(); } bool equal(const grouped_local_bucket_iterator& x) const noexcept { return p == x.p; } void increment() noexcept { p = p->next; } node_pointer p; }; template <class Node> struct const_grouped_local_bucket_iterator { typedef typename Node::node_pointer node_pointer; public: typedef typename Node::value_type const value_type; typedef value_type const element_type; typedef value_type const* pointer; typedef value_type const& reference; typedef std::ptrdiff_t difference_type; typedef std::forward_iterator_tag iterator_category; const_grouped_local_bucket_iterator() : p() {} const_grouped_local_bucket_iterator( grouped_local_bucket_iterator<Node> it) : p(it.p) { } reference operator*() const noexcept { return dereference(); } pointer operator->() const noexcept { return std::addressof(dereference()); } const_grouped_local_bucket_iterator& operator++() noexcept { increment(); return *this; } const_grouped_local_bucket_iterator operator++(int) noexcept { const_grouped_local_bucket_iterator old = *this; increment(); return old; } bool operator==( const_grouped_local_bucket_iterator const& other) const noexcept { return equal(other); } bool operator!=( const_grouped_local_bucket_iterator const& other) const noexcept { return !equal(other); } private: template <typename, typename, typename> friend class grouped_bucket_array; const_grouped_local_bucket_iterator(node_pointer p_) : p(p_) {} value_type& dereference() const noexcept { return p->value(); } bool equal(const const_grouped_local_bucket_iterator& x) const noexcept { return p == x.p; } void increment() noexcept { p = p->next; } node_pointer p; }; template <class T> struct span { T* begin() const noexcept { return data; } T* end() const noexcept { return data + size; } T* data; std::size_t size; span(T* data_, std::size_t size_) : data(data_), size(size_) {} }; template <class Bucket, class Allocator, class SizePolicy> class grouped_bucket_array : boost::empty_value<typename boost::allocator_rebind<Allocator, node<typename boost::allocator_value_type<Allocator>::type, typename boost::allocator_void_pointer<Allocator>::type> >:: type> { typedef typename boost::allocator_value_type<Allocator>::type allocator_value_type; typedef typename boost::allocator_void_pointer<Allocator>::type void_pointer; typedef typename boost::allocator_difference_type<Allocator>::type difference_type; public: typedef typename boost::allocator_rebind<Allocator, node<allocator_value_type, void_pointer> >::type node_allocator_type; typedef node<allocator_value_type, void_pointer> node_type; typedef typename boost::allocator_pointer<node_allocator_type>::type node_pointer; typedef SizePolicy size_policy; private: typedef typename boost::allocator_rebind<Allocator, Bucket>::type bucket_allocator_type; typedef typename boost::allocator_pointer<bucket_allocator_type>::type bucket_pointer; typedef boost::pointer_traits<bucket_pointer> bucket_pointer_traits; typedef bucket_group<Bucket> group; typedef typename boost::allocator_rebind<Allocator, group>::type group_allocator_type; typedef typename boost::allocator_pointer<group_allocator_type>::type group_pointer; typedef typename boost::pointer_traits<group_pointer> group_pointer_traits; public: typedef Bucket value_type; typedef Bucket bucket_type; typedef std::size_t size_type; typedef Allocator allocator_type; typedef grouped_bucket_iterator<Bucket> iterator; typedef grouped_local_bucket_iterator<node_type> local_iterator; typedef const_grouped_local_bucket_iterator<node_type> const_local_iterator; private: std::size_t size_index_, size_; bucket_pointer buckets; group_pointer groups; public: static std::size_t bucket_count_for(std::size_t num_buckets) { if (num_buckets == 0) { return 0; } return size_policy::size(size_policy::size_index(num_buckets)); } grouped_bucket_array() : empty_value<node_allocator_type>( empty_init_t(), node_allocator_type()), size_index_(0), size_(0), buckets(), groups() { } grouped_bucket_array(size_type n, const Allocator& al) : empty_value<node_allocator_type>( empty_init_t(), node_allocator_type(al)), size_index_(0), size_(0), buckets(), groups() { if (n == 0) { return; } size_index_ = size_policy::size_index(n); size_ = size_policy::size(size_index_); bucket_allocator_type bucket_alloc = this->get_bucket_allocator(); group_allocator_type group_alloc = this->get_group_allocator(); size_type const num_buckets = buckets_len(); size_type const num_groups = groups_len(); buckets = boost::allocator_allocate(bucket_alloc, num_buckets); BOOST_TRY { groups = boost::allocator_allocate(group_alloc, num_groups); bucket_type* pb = boost::to_address(buckets); for (size_type i = 0; i < num_buckets; ++i) { new (pb + i) bucket_type(); } group* pg = boost::to_address(groups); for (size_type i = 0; i < num_groups; ++i) { new (pg + i) group(); } } BOOST_CATCH(...) { boost::allocator_deallocate(bucket_alloc, buckets, num_buckets); BOOST_RETHROW } BOOST_CATCH_END size_type const N = group::N; group_pointer pbg = groups + static_cast<difference_type>(num_groups - 1); pbg->buckets = buckets + static_cast<difference_type>(N * (size_ / N)); pbg->bitmask = set_bit(size_ % N); pbg->next = pbg->prev = pbg; } ~grouped_bucket_array() { this->deallocate(); } grouped_bucket_array(grouped_bucket_array const&) = delete; grouped_bucket_array& operator=(grouped_bucket_array const&) = delete; grouped_bucket_array(grouped_bucket_array&& other) noexcept : empty_value<node_allocator_type>( empty_init_t(), other.get_node_allocator()), size_index_(other.size_index_), size_(other.size_), buckets(other.buckets), groups(other.groups) { other.size_ = 0; other.size_index_ = 0; other.buckets = bucket_pointer(); other.groups = group_pointer(); } grouped_bucket_array& operator=(grouped_bucket_array&& other) noexcept { BOOST_ASSERT( this->get_node_allocator() == other.get_node_allocator()); if (this == std::addressof(other)) { return *this; } this->deallocate(); size_index_ = other.size_index_; size_ = other.size_; buckets = other.buckets; groups = other.groups; other.size_index_ = 0; other.size_ = 0; other.buckets = bucket_pointer(); other.groups = group_pointer(); return *this; } #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4100) // unreferenced formal parameter (dtor calls) #endif void deallocate() noexcept { if (buckets) { size_type const num_buckets = buckets_len(); bucket_type* pb = boost::to_address(buckets); (void)pb; // VS complains when dtor is trivial for (size_type i = 0; i < num_buckets; ++i) { (pb + i)->~bucket_type(); } bucket_allocator_type bucket_alloc = this->get_bucket_allocator(); boost::allocator_deallocate(bucket_alloc, buckets, num_buckets); buckets = bucket_pointer(); } if (groups) { size_type const num_groups = groups_len(); group* pg = boost::to_address(groups); (void)pg; // VS complains when dtor is trivial for (size_type i = 0; i < num_groups; ++i) { (pg + i)->~group(); } group_allocator_type group_alloc = this->get_group_allocator(); boost::allocator_deallocate(group_alloc, groups, num_groups); groups = group_pointer(); } } #if defined(BOOST_MSVC) #pragma warning(pop) #endif void swap(grouped_bucket_array& other) { std::swap(size_index_, other.size_index_); std::swap(size_, other.size_); std::swap(buckets, other.buckets); std::swap(groups, other.groups); bool b = boost::allocator_propagate_on_container_swap< allocator_type>::type::value; if (b) { boost::core::invoke_swap( get_node_allocator(), other.get_node_allocator()); } } node_allocator_type const& get_node_allocator() const { return empty_value<node_allocator_type>::get(); } node_allocator_type& get_node_allocator() { return empty_value<node_allocator_type>::get(); } bucket_allocator_type get_bucket_allocator() const { return bucket_allocator_type(this->get_node_allocator()); } group_allocator_type get_group_allocator() const { return group_allocator_type(this->get_node_allocator()); } Allocator get_allocator() const { return Allocator(this->get_node_allocator()); } size_type buckets_len() const noexcept { return size_ + 1; } size_type groups_len() const noexcept { return size_ / group::N + 1; } void reset_allocator(Allocator const& allocator_) { this->get_node_allocator() = node_allocator_type(allocator_); } size_type bucket_count() const { return size_; } iterator begin() const { return size_ == 0 ? end() : ++at(size_); } iterator end() const { // micro optimization: no need to return the bucket group // as end() is not incrementable iterator pbg; pbg.p = buckets + static_cast<difference_type>(this->buckets_len() - 1); return pbg; } local_iterator begin(size_type n) const { if (size_ == 0) { return this->end(n); } return local_iterator( (buckets + static_cast<difference_type>(n))->next); } local_iterator end(size_type) const { return local_iterator(); } size_type capacity() const noexcept { return size_; } iterator at(size_type n) const { if (size_ > 0) { std::size_t const N = group::N; iterator pbg(buckets + static_cast<difference_type>(n), groups + static_cast<difference_type>(n / N)); return pbg; } else { return this->end(); } } span<Bucket> raw() { BOOST_ASSERT(size_ == 0 || size_ < this->buckets_len()); return span<Bucket>(boost::to_address(buckets), size_); } size_type position(std::size_t hash) const { return size_policy::position(hash, size_index_); } void clear() { this->deallocate(); size_index_ = 0; size_ = 0; } void append_bucket_group(iterator itb) noexcept { std::size_t const N = group::N; bool const is_empty_bucket = (!itb->next); if (is_empty_bucket) { bucket_pointer pb = itb.p; group_pointer pbg = itb.pbg; std::size_t n = static_cast<std::size_t>(boost::to_address(pb) - &buckets[0]); bool const is_empty_group = (!pbg->bitmask); if (is_empty_group) { size_type const num_groups = this->groups_len(); group_pointer last_group = groups + static_cast<difference_type>(num_groups - 1); pbg->buckets = buckets + static_cast<difference_type>(N * (n / N)); pbg->next = last_group->next; pbg->next->prev = pbg; pbg->prev = last_group; pbg->prev->next = pbg; } pbg->bitmask |= set_bit(n % N); } } void insert_node(iterator itb, node_pointer p) noexcept { this->append_bucket_group(itb); p->next = itb->next; itb->next = p; } void insert_node_hint( iterator itb, node_pointer p, node_pointer hint) noexcept { this->append_bucket_group(itb); if (hint) { p->next = hint->next; hint->next = p; } else { p->next = itb->next; itb->next = p; } } void extract_node(iterator itb, node_pointer p) noexcept { node_pointer* pp = std::addressof(itb->next); while ((*pp) != p) pp = std::addressof((*pp)->next); *pp = p->next; if (!itb->next) unlink_bucket(itb); } void extract_node_after(iterator itb, node_pointer* pp) noexcept { *pp = (*pp)->next; if (!itb->next) unlink_bucket(itb); } void unlink_empty_buckets() noexcept { std::size_t const N = group::N; group_pointer pbg = groups, last = groups + static_cast<difference_type>( this->groups_len() - 1); for (; pbg != last; ++pbg) { if (!pbg->buckets) { continue; } for (std::size_t n = 0; n < N; ++n) { bucket_pointer bs = pbg->buckets; bucket_type& b = bs[static_cast<std::ptrdiff_t>(n)]; if (!b.next) pbg->bitmask &= reset_bit(n); } if (!pbg->bitmask && pbg->next) unlink_group(pbg); } // do not check end bucket for (std::size_t n = 0; n < size_ % N; ++n) { if (!pbg->buckets[static_cast<std::ptrdiff_t>(n)].next) pbg->bitmask &= reset_bit(n); } } void unlink_bucket(iterator itb) { typename iterator::bucket_pointer p = itb.p; typename iterator::bucket_group_pointer pbg = itb.pbg; if (!(pbg->bitmask &= reset_bit(static_cast<std::size_t>(p - pbg->buckets)))) unlink_group(pbg); } private: void unlink_group(group_pointer pbg) { pbg->next->prev = pbg->prev; pbg->prev->next = pbg->next; pbg->prev = pbg->next = group_pointer(); } }; } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_FCA_HPP
0
repos/unordered/include/boost/unordered
repos/unordered/include/boost/unordered/detail/xmx.hpp
/* 32b/64b xmx mix function. * * Copyright 2022 Peter Dimov. * Copyright 2022 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_XMX_HPP #define BOOST_UNORDERED_DETAIL_XMX_HPP #include <boost/cstdint.hpp> #include <climits> #include <cstddef> namespace boost{ namespace unordered{ namespace detail{ /* Bit mixer for improvement of statistical properties of hash functions. * The implementation is different on 64bit and 32bit architectures: * * - 64bit: same as xmx function in * http://jonkagstrom.com/bit-mixer-construction/index.html * - 32bit: generated by Hash Function Prospector * (https://github.com/skeeto/hash-prospector) and selected as the * best overall performer in benchmarks of Boost.Unordered flat containers. * Score assigned by Hash Prospector: 333.7934929677524 */ #if defined(SIZE_MAX) #if ((((SIZE_MAX >> 16) >> 16) >> 16) >> 15) != 0 #define BOOST_UNORDERED_64B_ARCHITECTURE /* >64 bits assumed as 64 bits */ #endif #elif defined(UINTPTR_MAX) /* used as proxy for std::size_t */ #if ((((UINTPTR_MAX >> 16) >> 16) >> 16) >> 15) != 0 #define BOOST_UNORDERED_64B_ARCHITECTURE #endif #endif static inline std::size_t xmx(std::size_t x)noexcept { #if defined(BOOST_UNORDERED_64B_ARCHITECTURE) boost::uint64_t z=(boost::uint64_t)x; z^=z>>23; z*=0xff51afd7ed558ccdull; z^=z>>23; return (std::size_t)z; #else /* 32 bits assumed */ x^=x>>18; x*=0x56b5aaadu; x^=x>>16; return x; #endif } #ifdef BOOST_UNORDERED_64B_ARCHITECTURE #undef BOOST_UNORDERED_64B_ARCHITECTURE #endif } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/tuple_rotate_right.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_FOA_TUPLE_ROTATE_RIGHT_HPP #define BOOST_UNORDERED_DETAIL_FOA_TUPLE_ROTATE_RIGHT_HPP #include <boost/mp11/algorithm.hpp> #include <boost/mp11/integer_sequence.hpp> #include <tuple> #include <utility> namespace boost{ namespace unordered{ namespace detail{ namespace foa{ template<typename Tuple> using tuple_rotate_right_return_type=mp11::mp_rotate_right_c< typename std::remove_cv<typename std::remove_reference<Tuple>::type>::type, 1 >; template<std::size_t... Is,typename Tuple> tuple_rotate_right_return_type<Tuple> tuple_rotate_right_aux(mp11::index_sequence<Is...>,Tuple&& x) { return tuple_rotate_right_return_type<Tuple>{ std::get<(Is+sizeof...(Is)-1)%sizeof...(Is)>(std::forward<Tuple>(x))...}; } template<typename Tuple> tuple_rotate_right_return_type<Tuple> tuple_rotate_right(Tuple&& x) { using RawTuple=typename std::remove_cv< typename std::remove_reference<Tuple>::type>::type; return tuple_rotate_right_aux( mp11::make_index_sequence<std::tuple_size<RawTuple>::value>{}, std::forward<Tuple>(x)); } } /* namespace foa */ } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/flat_set_types.hpp
// Copyright (C) 2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_FOA_FLAT_SET_TYPES_HPP #define BOOST_UNORDERED_DETAIL_FOA_FLAT_SET_TYPES_HPP #include <boost/core/allocator_access.hpp> namespace boost { namespace unordered { namespace detail { namespace foa { template <class Key> struct flat_set_types { using key_type = Key; using init_type = Key; using value_type = Key; static Key const& extract(value_type const& key) { return key; } using element_type = value_type; static Key& value_from(element_type& x) { return x; } static element_type&& move(element_type& x) { return std::move(x); } template <class A, class... Args> static void construct(A& al, value_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A> static void destroy(A& al, value_type* p) noexcept { boost::allocator_destroy(al, p); } }; } // namespace foa } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_FOA_FLAT_SET_TYPES_HPP
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/node_map_types.hpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2024 Braden Ganetsky // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP #define BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP #include <boost/unordered/detail/foa/element_type.hpp> #include <boost/core/allocator_access.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/core/pointer_traits.hpp> namespace boost { namespace unordered { namespace detail { namespace foa { template <class Key, class T, class VoidPtr> struct node_map_types { using key_type = Key; using mapped_type = T; using raw_key_type = typename std::remove_const<Key>::type; using raw_mapped_type = typename std::remove_const<T>::type; using init_type = std::pair<raw_key_type, raw_mapped_type>; using value_type = std::pair<Key const, T>; using moved_type = std::pair<raw_key_type&&, raw_mapped_type&&>; using element_type = foa::element_type<value_type, VoidPtr>; static value_type& value_from(element_type const& x) { return *(x.p); } template <class K, class V> static raw_key_type const& extract(std::pair<K, V> const& kv) { return kv.first; } static raw_key_type const& extract(element_type const& kv) { return kv.p->first; } static element_type&& move(element_type& x) { return std::move(x); } static moved_type move(init_type& x) { return {std::move(x.first), std::move(x.second)}; } static moved_type move(value_type& x) { return {std::move(const_cast<raw_key_type&>(x.first)), std::move(const_cast<raw_mapped_type&>(x.second))}; } template <class A> static void construct(A&, element_type* p, element_type&& x) noexcept { p->p = x.p; x.p = nullptr; } template <class A> static void construct( A& al, element_type* p, element_type const& copy) { construct(al, p, *copy.p); } template <class A, class... Args> static void construct(A& al, init_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A, class... Args> static void construct(A& al, value_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A, class... Args> static void construct(A& al, key_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A, class... Args> static void construct(A& al, element_type* p, Args&&... args) { p->p = boost::allocator_allocate(al, 1); BOOST_TRY { boost::allocator_construct( al, boost::to_address(p->p), std::forward<Args>(args)...); } BOOST_CATCH(...) { boost::allocator_deallocate(al, p->p, 1); BOOST_RETHROW } BOOST_CATCH_END } template <class A> static void destroy(A& al, value_type* p) noexcept { boost::allocator_destroy(al, p); } template <class A> static void destroy(A& al, init_type* p) noexcept { boost::allocator_destroy(al, p); } template <class A> static void destroy(A& al, key_type* p) noexcept { boost::allocator_destroy(al, p); } template <class A> static void destroy(A& al, element_type* p) noexcept { if (p->p) { destroy(al, boost::to_address(p->p)); boost::allocator_deallocate(al, p->p, 1); } } }; } // namespace foa } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/core.hpp
/* Common base for Boost.Unordered open-addressing tables. * * Copyright 2022-2024 Joaquin M Lopez Munoz. * Copyright 2023 Christian Mazakas. * Copyright 2024 Braden Ganetsky. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_FOA_CORE_HPP #define BOOST_UNORDERED_DETAIL_FOA_CORE_HPP #include <boost/assert.hpp> #include <boost/config.hpp> #include <boost/config/workaround.hpp> #include <boost/core/allocator_traits.hpp> #include <boost/core/bit.hpp> #include <boost/core/empty_value.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/core/pointer_traits.hpp> #include <boost/cstdint.hpp> #include <boost/predef.h> #include <boost/unordered/detail/allocator_constructed.hpp> #include <boost/unordered/detail/narrow_cast.hpp> #include <boost/unordered/detail/mulx.hpp> #include <boost/unordered/detail/static_assert.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/hash_traits.hpp> #include <climits> #include <cmath> #include <cstddef> #include <cstring> #include <limits> #include <memory> #include <new> #include <tuple> #include <type_traits> #include <utility> #if !defined(BOOST_UNORDERED_DISABLE_SSE2) #if defined(BOOST_UNORDERED_ENABLE_SSE2)|| \ defined(__SSE2__)|| \ defined(_M_X64)||(defined(_M_IX86_FP)&&_M_IX86_FP>=2) #define BOOST_UNORDERED_SSE2 #endif #endif #if !defined(BOOST_UNORDERED_DISABLE_NEON) #if defined(BOOST_UNORDERED_ENABLE_NEON)||\ (defined(__ARM_NEON)&&!defined(__ARM_BIG_ENDIAN)) #define BOOST_UNORDERED_LITTLE_ENDIAN_NEON #endif #endif #if defined(BOOST_UNORDERED_SSE2) #include <emmintrin.h> #elif defined(BOOST_UNORDERED_LITTLE_ENDIAN_NEON) #include <arm_neon.h> #endif #ifdef __has_builtin #define BOOST_UNORDERED_HAS_BUILTIN(x) __has_builtin(x) #else #define BOOST_UNORDERED_HAS_BUILTIN(x) 0 #endif #if !defined(NDEBUG) #define BOOST_UNORDERED_ASSUME(cond) BOOST_ASSERT(cond) #elif BOOST_UNORDERED_HAS_BUILTIN(__builtin_assume) #define BOOST_UNORDERED_ASSUME(cond) __builtin_assume(cond) #elif defined(__GNUC__) || BOOST_UNORDERED_HAS_BUILTIN(__builtin_unreachable) #define BOOST_UNORDERED_ASSUME(cond) \ do{ \ if(!(cond))__builtin_unreachable(); \ }while(0) #elif defined(_MSC_VER) #define BOOST_UNORDERED_ASSUME(cond) __assume(cond) #else #define BOOST_UNORDERED_ASSUME(cond) \ do{ \ static_cast<void>(false&&(cond)); \ }while(0) #endif /* We use BOOST_UNORDERED_PREFETCH[_ELEMENTS] macros rather than proper * functions because of https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109985 */ #if defined(BOOST_GCC)||defined(BOOST_CLANG) #define BOOST_UNORDERED_PREFETCH(p) __builtin_prefetch((const char*)(p)) #elif defined(BOOST_UNORDERED_SSE2) #define BOOST_UNORDERED_PREFETCH(p) _mm_prefetch((const char*)(p),_MM_HINT_T0) #else #define BOOST_UNORDERED_PREFETCH(p) ((void)(p)) #endif /* We have experimentally confirmed that ARM architectures get a higher * speedup when around the first half of the element slots in a group are * prefetched, whereas for Intel just the first cache line is best. * Please report back if you find better tunings for some particular * architectures. */ #if BOOST_ARCH_ARM /* Cache line size can't be known at compile time, so we settle on * the very frequent value of 64B. */ #define BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N) \ do{ \ auto BOOST_UNORDERED_P=(p); \ constexpr int cache_line=64; \ const char *p0=reinterpret_cast<const char*>(BOOST_UNORDERED_P), \ *p1=p0+sizeof(*BOOST_UNORDERED_P)*(N)/2; \ for(;p0<p1;p0+=cache_line)BOOST_UNORDERED_PREFETCH(p0); \ }while(0) #else #define BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N) BOOST_UNORDERED_PREFETCH(p) #endif #ifdef __has_feature #define BOOST_UNORDERED_HAS_FEATURE(x) __has_feature(x) #else #define BOOST_UNORDERED_HAS_FEATURE(x) 0 #endif #if BOOST_UNORDERED_HAS_FEATURE(thread_sanitizer)|| \ defined(__SANITIZE_THREAD__) #define BOOST_UNORDERED_THREAD_SANITIZER #endif #define BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) \ static_assert(boost::unordered::detail::is_nothrow_swappable<Hash>::value, \ "Template parameter Hash is required to be nothrow Swappable."); \ static_assert(boost::unordered::detail::is_nothrow_swappable<Pred>::value, \ "Template parameter Pred is required to be nothrow Swappable"); namespace boost{ namespace unordered{ namespace detail{ namespace foa{ static constexpr std::size_t default_bucket_count=0; /* foa::table_core is the common base of foa::table and foa::concurrent_table, * which in their turn serve as the foundational core of * boost::unordered_(flat|node)_(map|set) and boost::concurrent_flat_(map|set), * respectively. Its main internal design aspects are: * * - Element slots are logically split into groups of size N=15. The number * of groups is always a power of two, so the number of allocated slots is of the form (N*2^n)-1 (final slot reserved for a sentinel mark). * - Positioning is done at the group level rather than the slot level, that * is, for any given element its hash value is used to locate a group and * insertion is performed on the first available element of that group; * if the group is full (overflow), further groups are tried using * quadratic probing. * - Each group has an associated 16B metadata word holding reduced hash * values and overflow information. Reduced hash values are used to * accelerate lookup within the group by using 128-bit SIMD or 64-bit word * operations. */ /* group15 controls metadata information of a group of N=15 element slots. * The 16B metadata word is organized as follows (LSB depicted rightmost): * * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ * |ofw|h14|h13|h13|h11|h10|h09|h08|h07|h06|h05|h04|h03|h02|h01|h00| * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ * * hi is 0 if the i-th element slot is avalaible, 1 to mark a sentinel and, * when the slot is occupied, a value in the range [2,255] obtained from the * element's original hash value. * ofw is the so-called overflow byte. If insertion of an element with hash * value h is tried on a full group, then the (h%8)-th bit of the overflow * byte is set to 1 and a further group is probed. Having an overflow byte * brings two advantages: * * - There's no need to reserve a special value of hi to mark tombstone * slots; each reduced hash value keeps then log2(254)=7.99 bits of the * original hash (alternative approaches reserve one full bit to mark * if the slot is available/deleted, so their reduced hash values are 7 bit * strong only). * - When doing an unsuccessful lookup (i.e. the element is not present in * the table), probing stops at the first non-overflowed group. Having 8 * bits for signalling overflow makes it very likely that we stop at the * current group (this happens when no element with the same (h%8) value * has overflowed in the group), saving us an additional group check even * under high-load/high-erase conditions. It is critical that hash * reduction is invariant under modulo 8 (see maybe_caused_overflow). * * When looking for an element with hash value h, match(h) returns a bitmask * signalling which slots have the same reduced hash value. If available, * match uses SSE2 or (little endian) Neon 128-bit SIMD operations. On non-SIMD * scenarios, the logical layout described above is physically mapped to two * 64-bit words with *bit interleaving*, i.e. the least significant 16 bits of * the first 64-bit word contain the least significant bits of each byte in the * "logical" 128-bit word, and so forth. With this layout, match can be * implemented with 4 ANDs, 3 shifts, 2 XORs, 1 OR and 1 NOT. * * IntegralWrapper<Integral> is used to implement group15's underlying * metadata: it behaves as a plain integral for foa::table or introduces * atomic ops for foa::concurrent_table. If IntegralWrapper<...> is trivially * constructible, so is group15, in which case it can be initialized via memset * etc. Where needed, group15::initialize resets the metadata to the all * zeros (default state). */ #if defined(BOOST_UNORDERED_SSE2) template<template<typename> class IntegralWrapper> struct group15 { static constexpr std::size_t N=15; static constexpr bool regular_layout=true; struct dummy_group_type { alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0}; }; inline void initialize() { _mm_store_si128( reinterpret_cast<__m128i*>(m),_mm_setzero_si128()); } inline void set(std::size_t pos,std::size_t hash) { BOOST_ASSERT(pos<N); at(pos)=reduced_hash(hash); } inline void set_sentinel() { at(N-1)=sentinel_; } inline bool is_sentinel(std::size_t pos)const { BOOST_ASSERT(pos<N); return at(pos)==sentinel_; } static inline bool is_sentinel(unsigned char* pc)noexcept { return *pc==sentinel_; } inline void reset(std::size_t pos) { BOOST_ASSERT(pos<N); at(pos)=available_; } static inline void reset(unsigned char* pc) { *reinterpret_cast<slot_type*>(pc)=available_; } inline int match(std::size_t hash)const { return _mm_movemask_epi8( _mm_cmpeq_epi8(load_metadata(),_mm_set1_epi32(match_word(hash))))&0x7FFF; } inline bool is_not_overflowed(std::size_t hash)const { static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128}; return !(overflow()&shift[hash%8]); } inline void mark_overflow(std::size_t hash) { overflow()|=static_cast<unsigned char>(1<<(hash%8)); } static inline bool maybe_caused_overflow(unsigned char* pc) { std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15); group15 *pg=reinterpret_cast<group15*>(pc-pos); return !pg->is_not_overflowed(*pc); } inline int match_available()const { return _mm_movemask_epi8( _mm_cmpeq_epi8(load_metadata(),_mm_setzero_si128()))&0x7FFF; } inline bool is_occupied(std::size_t pos)const { BOOST_ASSERT(pos<N); return at(pos)!=available_; } static inline bool is_occupied(unsigned char* pc)noexcept { return *reinterpret_cast<slot_type*>(pc)!=available_; } inline int match_occupied()const { return (~match_available())&0x7FFF; } private: using slot_type=IntegralWrapper<unsigned char>; BOOST_UNORDERED_STATIC_ASSERT(sizeof(slot_type)==1); static constexpr unsigned char available_=0, sentinel_=1; inline __m128i load_metadata()const { #if defined(BOOST_UNORDERED_THREAD_SANITIZER) /* ThreadSanitizer complains on 1-byte atomic writes combined with * 16-byte atomic reads. */ return _mm_set_epi8( (char)m[15],(char)m[14],(char)m[13],(char)m[12], (char)m[11],(char)m[10],(char)m[ 9],(char)m[ 8], (char)m[ 7],(char)m[ 6],(char)m[ 5],(char)m[ 4], (char)m[ 3],(char)m[ 2],(char)m[ 1],(char)m[ 0]); #else return _mm_load_si128(reinterpret_cast<const __m128i*>(m)); #endif } inline static int match_word(std::size_t hash) { static constexpr boost::uint32_t word[]= { 0x08080808u,0x09090909u,0x02020202u,0x03030303u,0x04040404u,0x05050505u, 0x06060606u,0x07070707u,0x08080808u,0x09090909u,0x0A0A0A0Au,0x0B0B0B0Bu, 0x0C0C0C0Cu,0x0D0D0D0Du,0x0E0E0E0Eu,0x0F0F0F0Fu,0x10101010u,0x11111111u, 0x12121212u,0x13131313u,0x14141414u,0x15151515u,0x16161616u,0x17171717u, 0x18181818u,0x19191919u,0x1A1A1A1Au,0x1B1B1B1Bu,0x1C1C1C1Cu,0x1D1D1D1Du, 0x1E1E1E1Eu,0x1F1F1F1Fu,0x20202020u,0x21212121u,0x22222222u,0x23232323u, 0x24242424u,0x25252525u,0x26262626u,0x27272727u,0x28282828u,0x29292929u, 0x2A2A2A2Au,0x2B2B2B2Bu,0x2C2C2C2Cu,0x2D2D2D2Du,0x2E2E2E2Eu,0x2F2F2F2Fu, 0x30303030u,0x31313131u,0x32323232u,0x33333333u,0x34343434u,0x35353535u, 0x36363636u,0x37373737u,0x38383838u,0x39393939u,0x3A3A3A3Au,0x3B3B3B3Bu, 0x3C3C3C3Cu,0x3D3D3D3Du,0x3E3E3E3Eu,0x3F3F3F3Fu,0x40404040u,0x41414141u, 0x42424242u,0x43434343u,0x44444444u,0x45454545u,0x46464646u,0x47474747u, 0x48484848u,0x49494949u,0x4A4A4A4Au,0x4B4B4B4Bu,0x4C4C4C4Cu,0x4D4D4D4Du, 0x4E4E4E4Eu,0x4F4F4F4Fu,0x50505050u,0x51515151u,0x52525252u,0x53535353u, 0x54545454u,0x55555555u,0x56565656u,0x57575757u,0x58585858u,0x59595959u, 0x5A5A5A5Au,0x5B5B5B5Bu,0x5C5C5C5Cu,0x5D5D5D5Du,0x5E5E5E5Eu,0x5F5F5F5Fu, 0x60606060u,0x61616161u,0x62626262u,0x63636363u,0x64646464u,0x65656565u, 0x66666666u,0x67676767u,0x68686868u,0x69696969u,0x6A6A6A6Au,0x6B6B6B6Bu, 0x6C6C6C6Cu,0x6D6D6D6Du,0x6E6E6E6Eu,0x6F6F6F6Fu,0x70707070u,0x71717171u, 0x72727272u,0x73737373u,0x74747474u,0x75757575u,0x76767676u,0x77777777u, 0x78787878u,0x79797979u,0x7A7A7A7Au,0x7B7B7B7Bu,0x7C7C7C7Cu,0x7D7D7D7Du, 0x7E7E7E7Eu,0x7F7F7F7Fu,0x80808080u,0x81818181u,0x82828282u,0x83838383u, 0x84848484u,0x85858585u,0x86868686u,0x87878787u,0x88888888u,0x89898989u, 0x8A8A8A8Au,0x8B8B8B8Bu,0x8C8C8C8Cu,0x8D8D8D8Du,0x8E8E8E8Eu,0x8F8F8F8Fu, 0x90909090u,0x91919191u,0x92929292u,0x93939393u,0x94949494u,0x95959595u, 0x96969696u,0x97979797u,0x98989898u,0x99999999u,0x9A9A9A9Au,0x9B9B9B9Bu, 0x9C9C9C9Cu,0x9D9D9D9Du,0x9E9E9E9Eu,0x9F9F9F9Fu,0xA0A0A0A0u,0xA1A1A1A1u, 0xA2A2A2A2u,0xA3A3A3A3u,0xA4A4A4A4u,0xA5A5A5A5u,0xA6A6A6A6u,0xA7A7A7A7u, 0xA8A8A8A8u,0xA9A9A9A9u,0xAAAAAAAAu,0xABABABABu,0xACACACACu,0xADADADADu, 0xAEAEAEAEu,0xAFAFAFAFu,0xB0B0B0B0u,0xB1B1B1B1u,0xB2B2B2B2u,0xB3B3B3B3u, 0xB4B4B4B4u,0xB5B5B5B5u,0xB6B6B6B6u,0xB7B7B7B7u,0xB8B8B8B8u,0xB9B9B9B9u, 0xBABABABAu,0xBBBBBBBBu,0xBCBCBCBCu,0xBDBDBDBDu,0xBEBEBEBEu,0xBFBFBFBFu, 0xC0C0C0C0u,0xC1C1C1C1u,0xC2C2C2C2u,0xC3C3C3C3u,0xC4C4C4C4u,0xC5C5C5C5u, 0xC6C6C6C6u,0xC7C7C7C7u,0xC8C8C8C8u,0xC9C9C9C9u,0xCACACACAu,0xCBCBCBCBu, 0xCCCCCCCCu,0xCDCDCDCDu,0xCECECECEu,0xCFCFCFCFu,0xD0D0D0D0u,0xD1D1D1D1u, 0xD2D2D2D2u,0xD3D3D3D3u,0xD4D4D4D4u,0xD5D5D5D5u,0xD6D6D6D6u,0xD7D7D7D7u, 0xD8D8D8D8u,0xD9D9D9D9u,0xDADADADAu,0xDBDBDBDBu,0xDCDCDCDCu,0xDDDDDDDDu, 0xDEDEDEDEu,0xDFDFDFDFu,0xE0E0E0E0u,0xE1E1E1E1u,0xE2E2E2E2u,0xE3E3E3E3u, 0xE4E4E4E4u,0xE5E5E5E5u,0xE6E6E6E6u,0xE7E7E7E7u,0xE8E8E8E8u,0xE9E9E9E9u, 0xEAEAEAEAu,0xEBEBEBEBu,0xECECECECu,0xEDEDEDEDu,0xEEEEEEEEu,0xEFEFEFEFu, 0xF0F0F0F0u,0xF1F1F1F1u,0xF2F2F2F2u,0xF3F3F3F3u,0xF4F4F4F4u,0xF5F5F5F5u, 0xF6F6F6F6u,0xF7F7F7F7u,0xF8F8F8F8u,0xF9F9F9F9u,0xFAFAFAFAu,0xFBFBFBFBu, 0xFCFCFCFCu,0xFDFDFDFDu,0xFEFEFEFEu,0xFFFFFFFFu, }; return (int)word[narrow_cast<unsigned char>(hash)]; } inline static unsigned char reduced_hash(std::size_t hash) { return narrow_cast<unsigned char>(match_word(hash)); } inline slot_type& at(std::size_t pos) { return m[pos]; } inline const slot_type& at(std::size_t pos)const { return m[pos]; } inline slot_type& overflow() { return at(N); } inline const slot_type& overflow()const { return at(N); } alignas(16) slot_type m[16]; }; #elif defined(BOOST_UNORDERED_LITTLE_ENDIAN_NEON) template<template<typename> class IntegralWrapper> struct group15 { static constexpr std::size_t N=15; static constexpr bool regular_layout=true; struct dummy_group_type { alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0}; }; inline void initialize() { vst1q_u8(reinterpret_cast<uint8_t*>(m),vdupq_n_u8(0)); } inline void set(std::size_t pos,std::size_t hash) { BOOST_ASSERT(pos<N); at(pos)=reduced_hash(hash); } inline void set_sentinel() { at(N-1)=sentinel_; } inline bool is_sentinel(std::size_t pos)const { BOOST_ASSERT(pos<N); return pos==N-1&&at(N-1)==sentinel_; } static inline bool is_sentinel(unsigned char* pc)noexcept { return *reinterpret_cast<slot_type*>(pc)==sentinel_; } inline void reset(std::size_t pos) { BOOST_ASSERT(pos<N); at(pos)=available_; } static inline void reset(unsigned char* pc) { *reinterpret_cast<slot_type*>(pc)=available_; } inline int match(std::size_t hash)const { return simde_mm_movemask_epi8(vceqq_u8( load_metadata(),vdupq_n_u8(reduced_hash(hash))))&0x7FFF; } inline bool is_not_overflowed(std::size_t hash)const { static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128}; return !(overflow()&shift[hash%8]); } inline void mark_overflow(std::size_t hash) { overflow()|=static_cast<unsigned char>(1<<(hash%8)); } static inline bool maybe_caused_overflow(unsigned char* pc) { std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15); group15 *pg=reinterpret_cast<group15*>(pc-pos); return !pg->is_not_overflowed(*pc); }; inline int match_available()const { return simde_mm_movemask_epi8(vceqq_u8( load_metadata(),vdupq_n_u8(0)))&0x7FFF; } inline bool is_occupied(std::size_t pos)const { BOOST_ASSERT(pos<N); return at(pos)!=available_; } static inline bool is_occupied(unsigned char* pc)noexcept { return *reinterpret_cast<slot_type*>(pc)!=available_; } inline int match_occupied()const { return simde_mm_movemask_epi8(vcgtq_u8( load_metadata(),vdupq_n_u8(0)))&0x7FFF; } private: using slot_type=IntegralWrapper<unsigned char>; BOOST_UNORDERED_STATIC_ASSERT(sizeof(slot_type)==1); static constexpr unsigned char available_=0, sentinel_=1; inline uint8x16_t load_metadata()const { #if defined(BOOST_UNORDERED_THREAD_SANITIZER) /* ThreadSanitizer complains on 1-byte atomic writes combined with * 16-byte atomic reads. */ alignas(16) uint8_t data[16]={ m[ 0],m[ 1],m[ 2],m[ 3],m[ 4],m[ 5],m[ 6],m[ 7], m[ 8],m[ 9],m[10],m[11],m[12],m[13],m[14],m[15]}; return vld1q_u8(data); #else return vld1q_u8(reinterpret_cast<const uint8_t*>(m)); #endif } inline static unsigned char reduced_hash(std::size_t hash) { static constexpr unsigned char table[]={ 8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, }; return table[(unsigned char)hash]; } /* Copied from * https://github.com/simd-everywhere/simde/blob/master/simde/x86/ * sse2.h#L3763 */ static inline int simde_mm_movemask_epi8(uint8x16_t a) { static constexpr uint8_t md[16]={ 1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 0, 1 << 1, 1 << 2, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, }; uint8x16_t masked=vandq_u8(vld1q_u8(md),a); uint8x8x2_t tmp=vzip_u8(vget_low_u8(masked),vget_high_u8(masked)); uint16x8_t x=vreinterpretq_u16_u8(vcombine_u8(tmp.val[0],tmp.val[1])); #if defined(__ARM_ARCH_ISA_A64) return vaddvq_u16(x); #else uint64x2_t t64=vpaddlq_u32(vpaddlq_u16(x)); return int(vgetq_lane_u64(t64,0))+int(vgetq_lane_u64(t64,1)); #endif } inline slot_type& at(std::size_t pos) { return m[pos]; } inline const slot_type& at(std::size_t pos)const { return m[pos]; } inline slot_type& overflow() { return at(N); } inline const slot_type& overflow()const { return at(N); } alignas(16) slot_type m[16]; }; #else /* non-SIMD */ template<template<typename> class IntegralWrapper> struct group15 { static constexpr std::size_t N=15; static constexpr bool regular_layout=false; struct dummy_group_type { alignas(16) boost::uint64_t m[2]= {0x0000000000004000ull,0x0000000000000000ull}; }; inline void initialize(){m[0]=0;m[1]=0;} inline void set(std::size_t pos,std::size_t hash) { BOOST_ASSERT(pos<N); set_impl(pos,reduced_hash(hash)); } inline void set_sentinel() { set_impl(N-1,sentinel_); } inline bool is_sentinel(std::size_t pos)const { BOOST_ASSERT(pos<N); return pos==N-1&& (m[0] & boost::uint64_t(0x4000400040004000ull))== boost::uint64_t(0x4000ull)&& (m[1] & boost::uint64_t(0x4000400040004000ull))==0; } inline void reset(std::size_t pos) { BOOST_ASSERT(pos<N); set_impl(pos,available_); } static inline void reset(unsigned char* pc) { std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15); pc-=pos; reinterpret_cast<group15*>(pc)->reset(pos); } inline int match(std::size_t hash)const { return match_impl(reduced_hash(hash)); } inline bool is_not_overflowed(std::size_t hash)const { return !(reinterpret_cast<const boost::uint16_t*>(m)[hash%8] & 0x8000u); } inline void mark_overflow(std::size_t hash) { reinterpret_cast<boost::uint16_t*>(m)[hash%8]|=0x8000u; } static inline bool maybe_caused_overflow(unsigned char* pc) { std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15); group15 *pg=reinterpret_cast<group15*>(pc-pos); boost::uint64_t x=((pg->m[0])>>pos)&0x000100010001ull; boost::uint32_t y=narrow_cast<boost::uint32_t>(x|(x>>15)|(x>>30)); return !pg->is_not_overflowed(y); }; inline int match_available()const { boost::uint64_t x=~(m[0]|m[1]); boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32)); y&=y>>16; return y&0x7FFF; } inline bool is_occupied(std::size_t pos)const { BOOST_ASSERT(pos<N); boost::uint64_t x=m[0]|m[1]; return (x&(0x0001000100010001ull<<pos))!=0; } inline int match_occupied()const { boost::uint64_t x=m[0]|m[1]; boost::uint32_t y=narrow_cast<boost::uint32_t>(x|(x>>32)); y|=y>>16; return y&0x7FFF; } private: using word_type=IntegralWrapper<uint64_t>; BOOST_UNORDERED_STATIC_ASSERT(sizeof(word_type)==8); static constexpr unsigned char available_=0, sentinel_=1; inline static unsigned char reduced_hash(std::size_t hash) { static constexpr unsigned char table[]={ 8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239, 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, }; return table[narrow_cast<unsigned char>(hash)]; } inline void set_impl(std::size_t pos,std::size_t n) { BOOST_ASSERT(n<256); set_impl(m[0],pos,n&0xFu); set_impl(m[1],pos,n>>4); } static inline void set_impl(word_type& x,std::size_t pos,std::size_t n) { static constexpr boost::uint64_t mask[]= { 0x0000000000000000ull,0x0000000000000001ull,0x0000000000010000ull, 0x0000000000010001ull,0x0000000100000000ull,0x0000000100000001ull, 0x0000000100010000ull,0x0000000100010001ull,0x0001000000000000ull, 0x0001000000000001ull,0x0001000000010000ull,0x0001000000010001ull, 0x0001000100000000ull,0x0001000100000001ull,0x0001000100010000ull, 0x0001000100010001ull, }; static constexpr boost::uint64_t imask[]= { 0x0001000100010001ull,0x0001000100010000ull,0x0001000100000001ull, 0x0001000100000000ull,0x0001000000010001ull,0x0001000000010000ull, 0x0001000000000001ull,0x0001000000000000ull,0x0000000100010001ull, 0x0000000100010000ull,0x0000000100000001ull,0x0000000100000000ull, 0x0000000000010001ull,0x0000000000010000ull,0x0000000000000001ull, 0x0000000000000000ull, }; BOOST_ASSERT(pos<16&&n<16); x|= mask[n]<<pos; x&=~(imask[n]<<pos); } inline int match_impl(std::size_t n)const { static constexpr boost::uint64_t mask[]= { 0x0000000000000000ull,0x000000000000ffffull,0x00000000ffff0000ull, 0x00000000ffffffffull,0x0000ffff00000000ull,0x0000ffff0000ffffull, 0x0000ffffffff0000ull,0x0000ffffffffffffull,0xffff000000000000ull, 0xffff00000000ffffull,0xffff0000ffff0000ull,0xffff0000ffffffffull, 0xffffffff00000000ull,0xffffffff0000ffffull,0xffffffffffff0000ull, 0xffffffffffffffffull, }; BOOST_ASSERT(n<256); boost::uint64_t x=m[0]^mask[n&0xFu]; x=~((m[1]^mask[n>>4])|x); boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32)); y&=y>>16; return y&0x7FFF; } alignas(16) word_type m[2]; }; #endif /* foa::table_core uses a size policy to obtain the permissible sizes of the * group array (and, by implication, the element array) and to do the * hash->group mapping. * * - size_index(n) returns an unspecified "index" number used in other policy * operations. * - size(size_index_) returns the number of groups for the given index. It * is guaranteed that size(size_index(n)) >= n. * - min_size() is the minimum number of groups permissible, i.e. * size(size_index(0)). * - position(hash,size_index_) maps hash to a position in the range * [0,size(size_index_)). * * The reason we're introducing the intermediate index value for calculating * sizes and positions is that it allows us to optimize the implementation of * position, which is in the hot path of lookup and insertion operations: * pow2_size_policy, the actual size policy used by foa::table, returns 2^n * (n>0) as permissible sizes and returns the n most significant bits * of the hash value as the position in the group array; using a size index * defined as i = (bits in std::size_t) - n, we have an unbeatable * implementation of position(hash) as hash>>i. * There's a twofold reason for choosing the high bits of hash for positioning: * - Multiplication-based mixing tends to yield better entropy in the high * part of its result. * - group15 reduced-hash values take the *low* bits of hash, and we want * these values and positioning to be as uncorrelated as possible. */ struct pow2_size_policy { static inline std::size_t size_index(std::size_t n) { // TODO: min size is 2, see if we can bring it down to 1 without loss // of performance return sizeof(std::size_t)*CHAR_BIT- (n<=2?1:((std::size_t)(boost::core::bit_width(n-1)))); } static inline std::size_t size(std::size_t size_index_) { return std::size_t(1)<<(sizeof(std::size_t)*CHAR_BIT-size_index_); } static constexpr std::size_t min_size(){return 2;} static inline std::size_t position(std::size_t hash,std::size_t size_index_) { return hash>>size_index_; } }; /* size index of a group array for a given *element* capacity */ template<typename Group,typename SizePolicy> static inline std::size_t size_index_for(std::size_t n) { /* n/N+1 == ceil((n+1)/N) (extra +1 for the sentinel) */ return SizePolicy::size_index(n/Group::N+1); } /* Quadratic prober over a power-of-two range using triangular numbers. * mask in next(mask) must be the range size minus one (and since size is 2^n, * mask has exactly its n first bits set to 1). */ struct pow2_quadratic_prober { pow2_quadratic_prober(std::size_t pos_):pos{pos_}{} inline std::size_t get()const{return pos;} /* next returns false when the whole array has been traversed, which ends * probing (in practice, full-table probing will only happen with very small * arrays). */ inline bool next(std::size_t mask) { step+=1; pos=(pos+step)&mask; return step<=mask; } private: std::size_t pos,step=0; }; /* Mixing policies: no_mix is the identity function, and mulx_mix * uses the mulx function from <boost/unordered/detail/mulx.hpp>. * * foa::table_core mixes hash results with mulx_mix unless the hash is marked * as avalanching, i.e. of good quality * (see <boost/unordered/hash_traits.hpp>). */ struct no_mix { template<typename Hash,typename T> static inline std::size_t mix(const Hash& h,const T& x) { return h(x); } }; struct mulx_mix { template<typename Hash,typename T> static inline std::size_t mix(const Hash& h,const T& x) { return mulx(h(x)); } }; /* boost::core::countr_zero has a potentially costly check for * the case x==0. */ inline unsigned int unchecked_countr_zero(int x) { #if defined(BOOST_MSVC) unsigned long r; _BitScanForward(&r,(unsigned long)x); return (unsigned int)r; #else BOOST_UNORDERED_ASSUME(x!=0); return (unsigned int)boost::core::countr_zero((unsigned int)x); #endif } /* table_arrays controls allocation, initialization and deallocation of * paired arrays of groups and element slots. Only one chunk of memory is * allocated to place both arrays: this is not done for efficiency reasons, * but in order to be able to properly align the group array without storing * additional offset information --the alignment required (16B) is usually * greater than alignof(std::max_align_t) and thus not guaranteed by * allocators. */ template<typename Group,std::size_t Size> Group* dummy_groups() { /* Dummy storage initialized as if in an empty container (actually, each * of its groups is initialized like a separate empty container). * We make table_arrays::groups point to this when capacity()==0, so that * we are not allocating any dynamic memory and yet lookup can be implemented * without checking for groups==nullptr. This space won't ever be used for * insertion as the container's capacity is precisely zero. */ static constexpr typename Group::dummy_group_type storage[Size]={typename Group::dummy_group_type(),}; return reinterpret_cast<Group*>( const_cast<typename Group::dummy_group_type*>(storage)); } template< typename Ptr,typename Ptr2, typename std::enable_if<!std::is_same<Ptr,Ptr2>::value>::type* = nullptr > Ptr to_pointer(Ptr2 p) { if(!p){return nullptr;} return boost::pointer_traits<Ptr>::pointer_to(*p); } template<typename Ptr> Ptr to_pointer(Ptr p) { return p; } template<typename Arrays,typename Allocator> struct arrays_holder { arrays_holder(const Arrays& arrays,const Allocator& al): arrays_{arrays},al_{al} {} /* not defined but VS in pre-C++17 mode needs to see it for RVO */ arrays_holder(arrays_holder const&); arrays_holder& operator=(arrays_holder const&)=delete; ~arrays_holder() { if(!released_){ arrays_.delete_(typename Arrays::allocator_type(al_),arrays_); } } const Arrays& release() { released_=true; return arrays_; } private: Arrays arrays_; Allocator al_; bool released_=false; }; template<typename Value,typename Group,typename SizePolicy,typename Allocator> struct table_arrays { using allocator_type=typename boost::allocator_rebind<Allocator,Value>::type; using value_type=Value; using group_type=Group; static constexpr auto N=group_type::N; using size_policy=SizePolicy; using value_type_pointer= typename boost::allocator_pointer<allocator_type>::type; using group_type_pointer= typename boost::pointer_traits<value_type_pointer>::template rebind<group_type>; using group_type_pointer_traits=boost::pointer_traits<group_type_pointer>; table_arrays( std::size_t gsi,std::size_t gsm, group_type_pointer pg,value_type_pointer pe): groups_size_index{gsi},groups_size_mask{gsm},groups_{pg},elements_{pe}{} value_type* elements()const noexcept{return boost::to_address(elements_);} group_type* groups()const noexcept{return boost::to_address(groups_);} static void set_arrays(table_arrays& arrays,allocator_type al,std::size_t n) { return set_arrays( arrays,al,n,std::is_same<group_type*,group_type_pointer>{}); } static void set_arrays( table_arrays& arrays,allocator_type al,std::size_t, std::false_type /* always allocate */) { using storage_traits=boost::allocator_traits<allocator_type>; auto groups_size_index=arrays.groups_size_index; auto groups_size=size_policy::size(groups_size_index); auto sal=allocator_type(al); arrays.elements_=storage_traits::allocate(sal,buffer_size(groups_size)); /* Align arrays.groups to sizeof(group_type). table_iterator critically * depends on such alignment for its increment operation. */ auto p=reinterpret_cast<unsigned char*>(arrays.elements()+groups_size*N-1); p+=(uintptr_t(sizeof(group_type))- reinterpret_cast<uintptr_t>(p))%sizeof(group_type); arrays.groups_= group_type_pointer_traits::pointer_to(*reinterpret_cast<group_type*>(p)); initialize_groups( arrays.groups(),groups_size, is_trivially_default_constructible<group_type>{}); arrays.groups()[groups_size-1].set_sentinel(); } static void set_arrays( table_arrays& arrays,allocator_type al,std::size_t n, std::true_type /* optimize for n==0*/) { if(!n){ arrays.groups_=dummy_groups<group_type,size_policy::min_size()>(); } else{ set_arrays(arrays,al,n,std::false_type{}); } } static table_arrays new_(allocator_type al,std::size_t n) { auto groups_size_index=size_index_for<group_type,size_policy>(n); auto groups_size=size_policy::size(groups_size_index); table_arrays arrays{groups_size_index,groups_size-1,nullptr,nullptr}; set_arrays(arrays,al,n); return arrays; } static void delete_(allocator_type al,table_arrays& arrays)noexcept { using storage_traits=boost::allocator_traits<allocator_type>; auto sal=allocator_type(al); if(arrays.elements()){ storage_traits::deallocate( sal,arrays.elements_,buffer_size(arrays.groups_size_mask+1)); } } /* combined space for elements and groups measured in sizeof(value_type)s */ static std::size_t buffer_size(std::size_t groups_size) { auto buffer_bytes= /* space for elements (we subtract 1 because of the sentinel) */ sizeof(value_type)*(groups_size*N-1)+ /* space for groups + padding for group alignment */ sizeof(group_type)*(groups_size+1)-1; /* ceil(buffer_bytes/sizeof(value_type)) */ return (buffer_bytes+sizeof(value_type)-1)/sizeof(value_type); } static void initialize_groups( group_type* pg,std::size_t size,std::true_type /* memset */) { /* memset faster/not slower than manual, assumes all zeros is group_type's * default layout. * reinterpret_cast: GCC may complain about group_type not being trivially * copy-assignable when we're relying on trivial copy constructibility. */ std::memset( reinterpret_cast<unsigned char*>(pg),0,sizeof(group_type)*size); } static void initialize_groups( group_type* pg,std::size_t size,std::false_type /* manual */) { while(size--!=0)::new (pg++) group_type(); } std::size_t groups_size_index; std::size_t groups_size_mask; group_type_pointer groups_; value_type_pointer elements_; }; struct if_constexpr_void_else{void operator()()const{}}; template<bool B,typename F,typename G=if_constexpr_void_else> void if_constexpr(F f,G g={}) { std::get<B?0:1>(std::forward_as_tuple(f,g))(); } template<bool B,typename T,typename std::enable_if<B>::type* =nullptr> void copy_assign_if(T& x,const T& y){x=y;} template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr> void copy_assign_if(T&,const T&){} template<bool B,typename T,typename std::enable_if<B>::type* =nullptr> void move_assign_if(T& x,T& y){x=std::move(y);} template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr> void move_assign_if(T&,T&){} template<bool B,typename T,typename std::enable_if<B>::type* =nullptr> void swap_if(T& x,T& y){using std::swap; swap(x,y);} template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr> void swap_if(T&,T&){} template<typename Allocator> struct is_std_allocator:std::false_type{}; template<typename T> struct is_std_allocator<std::allocator<T>>:std::true_type{}; /* std::allocator::construct marked as deprecated */ #if defined(_LIBCPP_SUPPRESS_DEPRECATED_PUSH) _LIBCPP_SUPPRESS_DEPRECATED_PUSH #elif defined(_STL_DISABLE_DEPRECATED_WARNING) _STL_DISABLE_DEPRECATED_WARNING #elif defined(_MSC_VER) #pragma warning(push) #pragma warning(disable:4996) #endif template<typename Allocator,typename Ptr,typename... Args> struct alloc_has_construct { private: template<typename Allocator2> static decltype( std::declval<Allocator2&>().construct( std::declval<Ptr>(),std::declval<Args&&>()...), std::true_type{} ) check(int); template<typename> static std::false_type check(...); public: static constexpr bool value=decltype(check<Allocator>(0))::value; }; #if defined(_LIBCPP_SUPPRESS_DEPRECATED_POP) _LIBCPP_SUPPRESS_DEPRECATED_POP #elif defined(_STL_RESTORE_DEPRECATED_WARNING) _STL_RESTORE_DEPRECATED_WARNING #elif defined(_MSC_VER) #pragma warning(pop) #endif /* We expose the hard-coded max load factor so that tests can use it without * needing to pull it from an instantiated class template such as the table * class. */ static constexpr float mlf=0.875f; template<typename Group,typename Element> struct table_locator { table_locator()=default; table_locator(Group* pg_,unsigned int n_,Element* p_):pg{pg_},n{n_},p{p_}{} explicit operator bool()const noexcept{return p!=nullptr;} Group *pg=nullptr; unsigned int n=0; Element *p=nullptr; }; struct try_emplace_args_t{}; template<typename TypePolicy,typename Allocator,typename... Args> class alloc_cted_insert_type { using emplace_type=typename std::conditional< std::is_constructible<typename TypePolicy::init_type,Args...>::value, typename TypePolicy::init_type, typename TypePolicy::value_type >::type; using insert_type=typename std::conditional< std::is_constructible<typename TypePolicy::value_type,emplace_type>::value, emplace_type,typename TypePolicy::element_type >::type; using alloc_cted = allocator_constructed<Allocator, insert_type, TypePolicy>; alloc_cted val; public: alloc_cted_insert_type(const Allocator& al_,Args&&... args):val{al_,std::forward<Args>(args)...} { } insert_type& value(){return val.value();} }; template<typename TypePolicy,typename Allocator,typename... Args> alloc_cted_insert_type<TypePolicy,Allocator,Args...> alloc_make_insert_type(const Allocator& al,Args&&... args) { return {al,std::forward<Args>(args)...}; } template <typename TypePolicy, typename Allocator, typename KFwdRef, typename = void> class alloc_cted_or_fwded_key_type { using key_type = typename TypePolicy::key_type; allocator_constructed<Allocator, key_type, TypePolicy> val; public: alloc_cted_or_fwded_key_type(const Allocator& al_, KFwdRef k) : val(al_, std::forward<KFwdRef>(k)) { } key_type&& move_or_fwd() { return std::move(val.value()); } }; template <typename TypePolicy, typename Allocator, typename KFwdRef> class alloc_cted_or_fwded_key_type<TypePolicy, Allocator, KFwdRef, typename std::enable_if< is_similar<KFwdRef, typename TypePolicy::key_type>::value>::type> { // This specialization acts as a forwarding-reference wrapper BOOST_UNORDERED_STATIC_ASSERT(std::is_reference<KFwdRef>::value); KFwdRef ref; public: alloc_cted_or_fwded_key_type(const Allocator&, KFwdRef k) : ref(std::forward<KFwdRef>(k)) { } KFwdRef move_or_fwd() { return std::forward<KFwdRef>(ref); } }; template <typename Container> using is_map = std::integral_constant<bool, !std::is_same<typename Container::key_type, typename Container::value_type>::value>; template <typename Container, typename K> using is_emplace_kv_able = std::integral_constant<bool, is_map<Container>::value && (is_similar<K, typename Container::key_type>::value || is_complete_and_move_constructible<typename Container::key_type>::value)>; /* table_core. The TypePolicy template parameter is used to generate * instantiations suitable for either maps or sets, and introduces non-standard * init_type and element_type: * * - TypePolicy::key_type and TypePolicy::value_type have the obvious * meaning. TypePolicy::mapped_type is expected to be provided as well * when key_type and value_type are not the same. * * - TypePolicy::init_type is the type implicitly converted to when * writing x.insert({...}). For maps, this is std::pair<Key,T> rather * than std::pair<const Key,T> so that, for instance, x.insert({"hello",0}) * produces a cheaply moveable std::string&& ("hello") rather than * a copyable const std::string&&. foa::table::insert is extended to accept * both init_type and value_type references. * * - TypePolicy::construct and TypePolicy::destroy are used for the * construction and destruction of the internal types: value_type, * init_type, element_type, and key_type. * * - TypePolicy::move is used to provide move semantics for the internal * types used by the container during rehashing and emplace. These types * are init_type, value_type and emplace_type. During insertion, a * stack-local type will be created based on the constructibility of the * value_type and the supplied arguments. TypePolicy::move is used here * for transfer of ownership. Similarly, TypePolicy::move is also used * during rehashing when elements are moved to the new table. * * - TypePolicy::extract returns a const reference to the key part of * a value of type value_type, init_type, element_type or * decltype(TypePolicy::move(...)). * * - TypePolicy::element_type is the type that table_arrays uses when * allocating buckets, which allows us to have flat and node container. * For flat containers, element_type is value_type. For node * containers, it is a strong typedef to value_type*. * * - TypePolicy::value_from returns a mutable reference to value_type from * a given element_type. This is used when elements of the table themselves * need to be moved, such as during move construction/assignment when * allocators are unequal and there is no propagation. For all other cases, * the element_type itself is moved. */ #include <boost/unordered/detail/foa/ignore_wshadow.hpp> #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable:4714) /* marked as __forceinline not inlined */ #endif #if BOOST_WORKAROUND(BOOST_MSVC,<=1900) /* VS2015 marks as unreachable generic catch clauses around non-throwing * code. */ #pragma warning(push) #pragma warning(disable:4702) #endif template< typename TypePolicy,typename Group,template<typename...> class Arrays, typename SizeControl,typename Hash,typename Pred,typename Allocator > class #if defined(_MSC_VER)&&_MSC_FULL_VER>=190023918 __declspec(empty_bases) /* activate EBO with multiple inheritance */ #endif table_core:empty_value<Hash,0>,empty_value<Pred,1>,empty_value<Allocator,2> { public: using type_policy=TypePolicy; using group_type=Group; static constexpr auto N=group_type::N; using size_policy=pow2_size_policy; using prober=pow2_quadratic_prober; using mix_policy=typename std::conditional< hash_is_avalanching<Hash>::value, no_mix, mulx_mix >::type; using alloc_traits=boost::allocator_traits<Allocator>; using element_type=typename type_policy::element_type; using arrays_type=Arrays<element_type,group_type,size_policy,Allocator>; using size_ctrl_type=SizeControl; static constexpr auto uses_fancy_pointers=!std::is_same< typename alloc_traits::pointer, typename alloc_traits::value_type* >::value; using key_type=typename type_policy::key_type; using init_type=typename type_policy::init_type; using value_type=typename type_policy::value_type; using hasher=Hash; using key_equal=Pred; using allocator_type=Allocator; using pointer=value_type*; using const_pointer=const value_type*; using reference=value_type&; using const_reference=const value_type&; using size_type=std::size_t; using difference_type=std::ptrdiff_t; using locator=table_locator<group_type,element_type>; using arrays_holder_type=arrays_holder<arrays_type,Allocator>; table_core( std::size_t n=default_bucket_count,const Hash& h_=Hash(), const Pred& pred_=Pred(),const Allocator& al_=Allocator()): hash_base{empty_init,h_},pred_base{empty_init,pred_}, allocator_base{empty_init,al_},arrays(new_arrays(n)), size_ctrl{initial_max_load(),0} {} /* genericize on an ArraysFn so that we can do things like delay an * allocation for the group_access data required by cfoa after the move * constructors of Hash, Pred have been invoked */ template<typename ArraysFn> table_core( Hash&& h_,Pred&& pred_,Allocator&& al_, ArraysFn arrays_fn,const size_ctrl_type& size_ctrl_): hash_base{empty_init,std::move(h_)}, pred_base{empty_init,std::move(pred_)}, allocator_base{empty_init,std::move(al_)}, arrays(arrays_fn()),size_ctrl(size_ctrl_) {} table_core(const table_core& x): table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{} template<typename ArraysFn> table_core(table_core&& x,arrays_holder_type&& ah,ArraysFn arrays_fn): table_core( std::move(x.h()),std::move(x.pred()),std::move(x.al()), arrays_fn,x.size_ctrl) { x.arrays=ah.release(); x.size_ctrl.ml=x.initial_max_load(); x.size_ctrl.size=0; } table_core(table_core&& x) noexcept( std::is_nothrow_move_constructible<Hash>::value&& std::is_nothrow_move_constructible<Pred>::value&& std::is_nothrow_move_constructible<Allocator>::value&& !uses_fancy_pointers): table_core{ std::move(x),x.make_empty_arrays(),[&x]{return x.arrays;}} {} table_core(const table_core& x,const Allocator& al_): table_core{std::size_t(std::ceil(float(x.size())/mlf)),x.h(),x.pred(),al_} { copy_elements_from(x); } table_core(table_core&& x,const Allocator& al_): table_core{std::move(x.h()),std::move(x.pred()),al_} { if(al()==x.al()){ using std::swap; swap(arrays,x.arrays); swap(size_ctrl,x.size_ctrl); } else{ reserve(x.size()); clear_on_exit c{x}; (void)c; /* unused var warning */ /* This works because subsequent x.clear() does not depend on the * elements' values. */ x.for_all_elements([this](element_type* p){ unchecked_insert(type_policy::move(type_policy::value_from(*p))); }); } } ~table_core()noexcept { for_all_elements([this](element_type* p){ destroy_element(p); }); delete_arrays(arrays); } std::size_t initial_max_load()const { static constexpr std::size_t small_capacity=2*N-1; auto capacity_=capacity(); if(capacity_<=small_capacity){ return capacity_; /* we allow 100% usage */ } else{ return (std::size_t)(mlf*(float)(capacity_)); } } arrays_holder_type make_empty_arrays()const { return make_arrays(0); } table_core& operator=(const table_core& x) { BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) static constexpr auto pocca= alloc_traits::propagate_on_container_copy_assignment::value; if(this!=std::addressof(x)){ /* If copy construction here winds up throwing, the container is still * left intact so we perform these operations first. */ hasher tmp_h=x.h(); key_equal tmp_p=x.pred(); clear(); /* Because we've asserted at compile-time that Hash and Pred are nothrow * swappable, we can safely mutate our source container and maintain * consistency between the Hash, Pred compatibility. */ using std::swap; swap(h(),tmp_h); swap(pred(),tmp_p); if_constexpr<pocca>([&,this]{ if(al()!=x.al()){ auto ah=x.make_arrays(std::size_t(std::ceil(float(x.size())/mlf))); delete_arrays(arrays); arrays=ah.release(); size_ctrl.ml=initial_max_load(); } copy_assign_if<pocca>(al(),x.al()); }); /* noshrink: favor memory reuse over tightness */ noshrink_reserve(x.size()); copy_elements_from(x); } return *this; } #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable:4127) /* conditional expression is constant */ #endif table_core& operator=(table_core&& x) noexcept( (alloc_traits::propagate_on_container_move_assignment::value|| alloc_traits::is_always_equal::value)&&!uses_fancy_pointers) { BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) static constexpr auto pocma= alloc_traits::propagate_on_container_move_assignment::value; if(this!=std::addressof(x)){ /* Given ambiguity in implementation strategies briefly discussed here: * https://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#2227 * * we opt into requiring nothrow swappability and eschew the move * operations associated with Hash, Pred. * * To this end, we ensure that the user never has to consider the * moved-from state of their Hash, Pred objects */ using std::swap; clear(); if(pocma||al()==x.al()){ auto ah=x.make_empty_arrays(); swap(h(),x.h()); swap(pred(),x.pred()); delete_arrays(arrays); move_assign_if<pocma>(al(),x.al()); arrays=x.arrays; size_ctrl.ml=std::size_t(x.size_ctrl.ml); size_ctrl.size=std::size_t(x.size_ctrl.size); x.arrays=ah.release(); x.size_ctrl.ml=x.initial_max_load(); x.size_ctrl.size=0; } else{ swap(h(),x.h()); swap(pred(),x.pred()); /* noshrink: favor memory reuse over tightness */ noshrink_reserve(x.size()); clear_on_exit c{x}; (void)c; /* unused var warning */ /* This works because subsequent x.clear() does not depend on the * elements' values. */ x.for_all_elements([this](element_type* p){ unchecked_insert(type_policy::move(type_policy::value_from(*p))); }); } } return *this; } #if defined(BOOST_MSVC) #pragma warning(pop) /* C4127 */ #endif allocator_type get_allocator()const noexcept{return al();} bool empty()const noexcept{return size()==0;} std::size_t size()const noexcept{return size_ctrl.size;} std::size_t max_size()const noexcept{return SIZE_MAX;} BOOST_FORCEINLINE void erase(group_type* pg,unsigned int pos,element_type* p)noexcept { destroy_element(p); recover_slot(pg,pos); } BOOST_FORCEINLINE void erase(unsigned char* pc,element_type* p)noexcept { destroy_element(p); recover_slot(pc); } template<typename Key> BOOST_FORCEINLINE locator find(const Key& x)const { auto hash=hash_for(x); return find(x,position_for(hash),hash); } #if defined(BOOST_MSVC) /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */ #pragma warning(push) #pragma warning(disable:4800) #endif template<typename Key> BOOST_FORCEINLINE locator find( const Key& x,std::size_t pos0,std::size_t hash)const { prober pb(pos0); do{ auto pos=pb.get(); auto pg=arrays.groups()+pos; auto mask=pg->match(hash); if(mask){ auto elements=arrays.elements(); BOOST_UNORDERED_ASSUME(elements!=nullptr); auto p=elements+pos*N; BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N); do{ auto n=unchecked_countr_zero(mask); if(BOOST_LIKELY(bool(pred()(x,key_from(p[n]))))){ return {pg,n,p+n}; } mask&=mask-1; }while(mask); } if(BOOST_LIKELY(pg->is_not_overflowed(hash))){ return {}; } } while(BOOST_LIKELY(pb.next(arrays.groups_size_mask))); return {}; } #if defined(BOOST_MSVC) #pragma warning(pop) /* C4800 */ #endif void swap(table_core& x) noexcept( alloc_traits::propagate_on_container_swap::value|| alloc_traits::is_always_equal::value) { BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) static constexpr auto pocs= alloc_traits::propagate_on_container_swap::value; using std::swap; if_constexpr<pocs>([&,this]{ swap_if<pocs>(al(),x.al()); }, [&,this]{ /* else */ BOOST_ASSERT(al()==x.al()); (void)this; /* makes sure captured this is used */ }); swap(h(),x.h()); swap(pred(),x.pred()); swap(arrays,x.arrays); swap(size_ctrl,x.size_ctrl); } void clear()noexcept { auto p=arrays.elements(); if(p){ for(auto pg=arrays.groups(),last=pg+arrays.groups_size_mask+1; pg!=last;++pg,p+=N){ auto mask=match_really_occupied(pg,last); while(mask){ destroy_element(p+unchecked_countr_zero(mask)); mask&=mask-1; } /* we wipe the entire metadata to reset the overflow byte as well */ pg->initialize(); } arrays.groups()[arrays.groups_size_mask].set_sentinel(); size_ctrl.ml=initial_max_load(); size_ctrl.size=0; } } hasher hash_function()const{return h();} key_equal key_eq()const{return pred();} std::size_t capacity()const noexcept { return arrays.elements()?(arrays.groups_size_mask+1)*N-1:0; } float load_factor()const noexcept { if(capacity()==0)return 0; else return float(size())/float(capacity()); } float max_load_factor()const noexcept{return mlf;} std::size_t max_load()const noexcept{return size_ctrl.ml;} void rehash(std::size_t n) { auto m=size_t(std::ceil(float(size())/mlf)); if(m>n)n=m; if(n)n=capacity_for(n); /* exact resulting capacity */ if(n!=capacity())unchecked_rehash(n); } void reserve(std::size_t n) { rehash(std::size_t(std::ceil(float(n)/mlf))); } friend bool operator==(const table_core& x,const table_core& y) { return x.size()==y.size()&& x.for_all_elements_while([&](element_type* p){ auto loc=y.find(key_from(*p)); return loc&& const_cast<const value_type&>(type_policy::value_from(*p))== const_cast<const value_type&>(type_policy::value_from(*loc.p)); }); } friend bool operator!=(const table_core& x,const table_core& y) { return !(x==y); } struct clear_on_exit { ~clear_on_exit(){x.clear();} table_core& x; }; Hash& h(){return hash_base::get();} const Hash& h()const{return hash_base::get();} Pred& pred(){return pred_base::get();} const Pred& pred()const{return pred_base::get();} Allocator& al(){return allocator_base::get();} const Allocator& al()const{return allocator_base::get();} template<typename... Args> void construct_element(element_type* p,Args&&... args) { type_policy::construct(al(),p,std::forward<Args>(args)...); } template<typename... Args> void construct_element(element_type* p,try_emplace_args_t,Args&&... args) { construct_element_from_try_emplace_args( p, std::integral_constant<bool,std::is_same<key_type,value_type>::value>{}, std::forward<Args>(args)...); } void destroy_element(element_type* p)noexcept { type_policy::destroy(al(),p); } struct destroy_element_on_exit { ~destroy_element_on_exit(){this_->destroy_element(p);} table_core *this_; element_type *p; }; template<typename T> static inline auto key_from(const T& x) ->decltype(type_policy::extract(x)) { return type_policy::extract(x); } template<typename Key,typename... Args> static inline const Key& key_from( try_emplace_args_t,const Key& x,const Args&...) { return x; } template<typename Key> inline std::size_t hash_for(const Key& x)const { return mix_policy::mix(h(),x); } inline std::size_t position_for(std::size_t hash)const { return position_for(hash,arrays); } static inline std::size_t position_for( std::size_t hash,const arrays_type& arrays_) { return size_policy::position(hash,arrays_.groups_size_index); } static inline int match_really_occupied(group_type* pg,group_type* last) { /* excluding the sentinel */ return pg->match_occupied()&~(int(pg==last-1)<<(N-1)); } template<typename... Args> locator unchecked_emplace_at( std::size_t pos0,std::size_t hash,Args&&... args) { auto res=nosize_unchecked_emplace_at( arrays,pos0,hash,std::forward<Args>(args)...); ++size_ctrl.size; return res; } BOOST_NOINLINE void unchecked_rehash_for_growth() { auto new_arrays_=new_arrays_for_growth(); unchecked_rehash(new_arrays_); } template<typename... Args> BOOST_NOINLINE locator unchecked_emplace_with_rehash(std::size_t hash,Args&&... args) { auto new_arrays_=new_arrays_for_growth(); locator it; BOOST_TRY{ /* strong exception guarantee -> try insertion before rehash */ it=nosize_unchecked_emplace_at( new_arrays_,position_for(hash,new_arrays_), hash,std::forward<Args>(args)...); } BOOST_CATCH(...){ delete_arrays(new_arrays_); BOOST_RETHROW } BOOST_CATCH_END /* new_arrays_ lifetime taken care of by unchecked_rehash */ unchecked_rehash(new_arrays_); ++size_ctrl.size; return it; } void noshrink_reserve(std::size_t n) { /* used only on assignment after element clearance */ BOOST_ASSERT(empty()); if(n){ n=std::size_t(std::ceil(float(n)/mlf)); /* elements -> slots */ n=capacity_for(n); /* exact resulting capacity */ if(n>capacity()){ auto new_arrays_=new_arrays(n); delete_arrays(arrays); arrays=new_arrays_; size_ctrl.ml=initial_max_load(); } } } template<typename F> void for_all_elements(F f)const { for_all_elements(arrays,f); } template<typename F> static auto for_all_elements(const arrays_type& arrays_,F f) ->decltype(f(nullptr),void()) { for_all_elements_while(arrays_,[&](element_type* p){f(p);return true;}); } template<typename F> static auto for_all_elements(const arrays_type& arrays_,F f) ->decltype(f(nullptr,0,nullptr),void()) { for_all_elements_while( arrays_,[&](group_type* pg,unsigned int n,element_type* p) {f(pg,n,p);return true;}); } template<typename F> bool for_all_elements_while(F f)const { return for_all_elements_while(arrays,f); } template<typename F> static auto for_all_elements_while(const arrays_type& arrays_,F f) ->decltype(f(nullptr),bool()) { return for_all_elements_while( arrays_,[&](group_type*,unsigned int,element_type* p){return f(p);}); } template<typename F> static auto for_all_elements_while(const arrays_type& arrays_,F f) ->decltype(f(nullptr,0,nullptr),bool()) { auto p=arrays_.elements(); if(p){ for(auto pg=arrays_.groups(),last=pg+arrays_.groups_size_mask+1; pg!=last;++pg,p+=N){ auto mask=match_really_occupied(pg,last); while(mask){ auto n=unchecked_countr_zero(mask); if(!f(pg,n,p+n))return false; mask&=mask-1; } } } return true; } arrays_type arrays; size_ctrl_type size_ctrl; private: template< typename,typename,template<typename...> class, typename,typename,typename,typename > friend class table_core; using hash_base=empty_value<Hash,0>; using pred_base=empty_value<Pred,1>; using allocator_base=empty_value<Allocator,2>; /* used by allocator-extended move ctor */ table_core(Hash&& h_,Pred&& pred_,const Allocator& al_): hash_base{empty_init,std::move(h_)}, pred_base{empty_init,std::move(pred_)}, allocator_base{empty_init,al_},arrays(new_arrays(0)), size_ctrl{initial_max_load(),0} { } arrays_type new_arrays(std::size_t n)const { return arrays_type::new_(typename arrays_type::allocator_type(al()),n); } arrays_type new_arrays_for_growth()const { /* Due to the anti-drift mechanism (see recover_slot), the new arrays may * be of the same size as the old arrays; in the limit, erasing one * element at full load and then inserting could bring us back to the same * capacity after a costly rehash. To avoid this, we jump to the next * capacity level when the number of erased elements is <= 10% of total * elements at full load, which is implemented by requesting additional * F*size elements, with F = P * 10% / (1 - P * 10%), where P is the * probability of an element having caused overflow; P has been measured as * ~0.162 under ideal conditions, yielding F ~ 0.0165 ~ 1/61. */ return new_arrays(std::size_t( std::ceil(static_cast<float>(size()+size()/61+1)/mlf))); } void delete_arrays(arrays_type& arrays_)noexcept { arrays_type::delete_(typename arrays_type::allocator_type(al()),arrays_); } arrays_holder_type make_arrays(std::size_t n)const { return {new_arrays(n),al()}; } template<typename Key,typename... Args> void construct_element_from_try_emplace_args( element_type* p,std::false_type,Key&& x,Args&&... args) { type_policy::construct( this->al(),p, std::piecewise_construct, std::forward_as_tuple(std::forward<Key>(x)), std::forward_as_tuple(std::forward<Args>(args)...)); } /* This overload allows boost::unordered_flat_set to internally use * try_emplace to implement heterogeneous insert (P2363). */ template<typename Key> void construct_element_from_try_emplace_args( element_type* p,std::true_type,Key&& x) { type_policy::construct(this->al(),p,std::forward<Key>(x)); } void copy_elements_from(const table_core& x) { BOOST_ASSERT(empty()); BOOST_ASSERT(this!=std::addressof(x)); if(arrays.groups_size_mask==x.arrays.groups_size_mask){ fast_copy_elements_from(x); } else{ x.for_all_elements([this](const element_type* p){ unchecked_insert(*p); }); } } void fast_copy_elements_from(const table_core& x) { if(arrays.elements()&&x.arrays.elements()){ copy_elements_array_from(x); copy_groups_array_from(x); size_ctrl.ml=std::size_t(x.size_ctrl.ml); size_ctrl.size=std::size_t(x.size_ctrl.size); } } void copy_elements_array_from(const table_core& x) { copy_elements_array_from( x, std::integral_constant< bool, is_trivially_copy_constructible<element_type>::value&&( is_std_allocator<Allocator>::value|| !alloc_has_construct<Allocator,value_type*,const value_type&>::value) >{} ); } void copy_elements_array_from( const table_core& x,std::true_type /* -> memcpy */) { /* reinterpret_cast: GCC may complain about value_type not being trivially * copy-assignable when we're relying on trivial copy constructibility. */ std::memcpy( reinterpret_cast<unsigned char*>(arrays.elements()), reinterpret_cast<unsigned char*>(x.arrays.elements()), x.capacity()*sizeof(value_type)); } void copy_elements_array_from( const table_core& x,std::false_type /* -> manual */) { std::size_t num_constructed=0; BOOST_TRY{ x.for_all_elements([&,this](const element_type* p){ construct_element(arrays.elements()+(p-x.arrays.elements()),*p); ++num_constructed; }); } BOOST_CATCH(...){ if(num_constructed){ x.for_all_elements_while([&,this](const element_type* p){ destroy_element(arrays.elements()+(p-x.arrays.elements())); return --num_constructed!=0; }); } BOOST_RETHROW } BOOST_CATCH_END } void copy_groups_array_from(const table_core& x) { copy_groups_array_from(x,is_trivially_copy_assignable<group_type>{}); } void copy_groups_array_from( const table_core& x, std::true_type /* -> memcpy */) { std::memcpy( arrays.groups(),x.arrays.groups(), (arrays.groups_size_mask+1)*sizeof(group_type)); } void copy_groups_array_from( const table_core& x, std::false_type /* -> manual */) { auto pg=arrays.groups(); auto xpg=x.arrays.groups(); for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){ pg[i]=xpg[i]; } } void recover_slot(unsigned char* pc) { /* If this slot potentially caused overflow, we decrease the maximum load * so that average probe length won't increase unboundedly in repeated * insert/erase cycles (drift). */ size_ctrl.ml-=group_type::maybe_caused_overflow(pc); group_type::reset(pc); --size_ctrl.size; } void recover_slot(group_type* pg,std::size_t pos) { recover_slot(reinterpret_cast<unsigned char*>(pg)+pos); } static std::size_t capacity_for(std::size_t n) { return size_policy::size(size_index_for<group_type,size_policy>(n))*N-1; } BOOST_NOINLINE void unchecked_rehash(std::size_t n) { auto new_arrays_=new_arrays(n); unchecked_rehash(new_arrays_); } BOOST_NOINLINE void unchecked_rehash(arrays_type& new_arrays_) { std::size_t num_destroyed=0; BOOST_TRY{ for_all_elements([&,this](element_type* p){ nosize_transfer_element(p,new_arrays_,num_destroyed); }); } BOOST_CATCH(...){ if(num_destroyed){ for_all_elements_while( [&,this](group_type* pg,unsigned int n,element_type*){ recover_slot(pg,n); return --num_destroyed!=0; } ); } for_all_elements(new_arrays_,[this](element_type* p){ destroy_element(p); }); delete_arrays(new_arrays_); BOOST_RETHROW } BOOST_CATCH_END /* either all moved and destroyed or all copied */ BOOST_ASSERT(num_destroyed==size()||num_destroyed==0); if(num_destroyed!=size()){ for_all_elements([this](element_type* p){ destroy_element(p); }); } delete_arrays(arrays); arrays=new_arrays_; size_ctrl.ml=initial_max_load(); } template<typename Value> void unchecked_insert(Value&& x) { auto hash=hash_for(key_from(x)); unchecked_emplace_at(position_for(hash),hash,std::forward<Value>(x)); } void nosize_transfer_element( element_type* p,const arrays_type& arrays_,std::size_t& num_destroyed) { nosize_transfer_element( p,hash_for(key_from(*p)),arrays_,num_destroyed, std::integral_constant< /* std::move_if_noexcept semantics */ bool, std::is_nothrow_move_constructible<init_type>::value|| !std::is_same<element_type,value_type>::value|| !std::is_copy_constructible<element_type>::value>{}); } void nosize_transfer_element( element_type* p,std::size_t hash,const arrays_type& arrays_, std::size_t& num_destroyed,std::true_type /* ->move */) { /* Destroy p even if an an exception is thrown in the middle of move * construction, which could leave the source half-moved. */ ++num_destroyed; destroy_element_on_exit d{this,p}; (void)d; /* unused var warning */ nosize_unchecked_emplace_at( arrays_,position_for(hash,arrays_),hash,type_policy::move(*p)); } void nosize_transfer_element( element_type* p,std::size_t hash,const arrays_type& arrays_, std::size_t& /*num_destroyed*/,std::false_type /* ->copy */) { nosize_unchecked_emplace_at( arrays_,position_for(hash,arrays_),hash, const_cast<const element_type&>(*p)); } template<typename... Args> locator nosize_unchecked_emplace_at( const arrays_type& arrays_,std::size_t pos0,std::size_t hash, Args&&... args) { for(prober pb(pos0);;pb.next(arrays_.groups_size_mask)){ auto pos=pb.get(); auto pg=arrays_.groups()+pos; auto mask=pg->match_available(); if(BOOST_LIKELY(mask!=0)){ auto n=unchecked_countr_zero(mask); auto p=arrays_.elements()+pos*N+n; construct_element(p,std::forward<Args>(args)...); pg->set(n,hash); return {pg,n,p}; } else pg->mark_overflow(hash); } } }; #if BOOST_WORKAROUND(BOOST_MSVC,<=1900) #pragma warning(pop) /* C4702 */ #endif #if defined(BOOST_MSVC) #pragma warning(pop) /* C4714 */ #endif #include <boost/unordered/detail/foa/restore_wshadow.hpp> } /* namespace foa */ } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #undef BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED #undef BOOST_UNORDERED_HAS_FEATURE #undef BOOST_UNORDERED_HAS_BUILTIN #endif
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/ignore_wshadow.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #include <boost/config.hpp> #if defined(BOOST_GCC) #if !defined(BOOST_UNORDERED_DETAIL_RESTORE_WSHADOW) /* GCC's -Wshadow triggers at scenarios like this: * * struct foo{}; * template<typename Base> * struct derived:Base * { * void f(){int foo;} * }; * * derived<foo>x; * x.f(); // declaration of "foo" in derived::f shadows base type "foo" * * This makes shadowing warnings unavoidable in general when a class template * derives from user-provided classes, as is the case with foa::table_core * deriving from empty_value. */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wshadow" #else #pragma GCC diagnostic pop #endif #endif
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/flat_map_types.hpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2024 Braden Ganetsky // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_FOA_FLAT_MAP_TYPES_HPP #define BOOST_UNORDERED_DETAIL_FOA_FLAT_MAP_TYPES_HPP #include <boost/core/allocator_access.hpp> namespace boost { namespace unordered { namespace detail { namespace foa { template <class Key, class T> struct flat_map_types { using key_type = Key; using mapped_type = T; using raw_key_type = typename std::remove_const<Key>::type; using raw_mapped_type = typename std::remove_const<T>::type; using init_type = std::pair<raw_key_type, raw_mapped_type>; using moved_type = std::pair<raw_key_type&&, raw_mapped_type&&>; using value_type = std::pair<Key const, T>; using element_type = value_type; static value_type& value_from(element_type& x) { return x; } template <class K, class V> static raw_key_type const& extract(std::pair<K, V> const& kv) { return kv.first; } static moved_type move(init_type& x) { return {std::move(x.first), std::move(x.second)}; } static moved_type move(element_type& x) { // TODO: we probably need to launder here return {std::move(const_cast<raw_key_type&>(x.first)), std::move(const_cast<raw_mapped_type&>(x.second))}; } template <class A, class... Args> static void construct(A& al, init_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A, class... Args> static void construct(A& al, value_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A, class... Args> static void construct(A& al, key_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A> static void destroy(A& al, init_type* p) noexcept { boost::allocator_destroy(al, p); } template <class A> static void destroy(A& al, value_type* p) noexcept { boost::allocator_destroy(al, p); } template <class A> static void destroy(A& al, key_type* p) noexcept { boost::allocator_destroy(al, p); } }; } // namespace foa } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_FOA_FLAT_MAP_TYPES_HPP
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/node_handle.hpp
/* Copyright 2023 Christian Mazakas. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_FOA_NODE_HANDLE_HPP #define BOOST_UNORDERED_DETAIL_FOA_NODE_HANDLE_HPP #include <boost/unordered/detail/opt_storage.hpp> #include <boost/config.hpp> #include <boost/core/allocator_access.hpp> namespace boost{ namespace unordered{ namespace detail{ namespace foa{ template <class Iterator,class NodeType> struct insert_return_type { Iterator position; bool inserted; NodeType node; }; template <class TypePolicy,class Allocator> struct node_handle_base { protected: using type_policy=TypePolicy; using element_type=typename type_policy::element_type; public: using allocator_type = Allocator; private: using node_value_type=typename type_policy::value_type; element_type p_; BOOST_ATTRIBUTE_NO_UNIQUE_ADDRESS opt_storage<Allocator> a_; protected: node_value_type& data()noexcept { return *(p_.p); } node_value_type const& data()const noexcept { return *(p_.p); } element_type& element()noexcept { BOOST_ASSERT(!empty()); return p_; } element_type const& element()const noexcept { BOOST_ASSERT(!empty()); return p_; } Allocator& al()noexcept { BOOST_ASSERT(!empty()); return a_.t_; } Allocator const& al()const noexcept { BOOST_ASSERT(!empty()); return a_.t_; } void emplace(element_type&& x,Allocator a) { BOOST_ASSERT(empty()); auto* p=x.p; p_.p=p; new(&a_.t_)Allocator(a); x.p=nullptr; } void reset() { a_.t_.~Allocator(); p_.p=nullptr; } public: constexpr node_handle_base()noexcept:p_{nullptr}{} node_handle_base(node_handle_base&& nh) noexcept { p_.p = nullptr; if (!nh.empty()){ emplace(std::move(nh.p_),nh.al()); nh.reset(); } } node_handle_base& operator=(node_handle_base&& nh)noexcept { if(this!=&nh){ if(empty()){ if(nh.empty()){ /* empty(), nh.empty() */ /* nothing to do */ }else{ /* empty(), !nh.empty() */ emplace(std::move(nh.p_),std::move(nh.al())); nh.reset(); } }else{ if(nh.empty()){ /* !empty(), nh.empty() */ type_policy::destroy(al(),&p_); reset(); }else{ /* !empty(), !nh.empty() */ bool const pocma= boost::allocator_propagate_on_container_move_assignment< Allocator>::type::value; BOOST_ASSERT(pocma||al()==nh.al()); type_policy::destroy(al(),&p_); if(pocma){ al()=std::move(nh.al()); } p_=std::move(nh.p_); nh.reset(); } } }else{ if(empty()){ /* empty(), nh.empty() */ /* nothing to do */ }else{ /* !empty(), !nh.empty() */ type_policy::destroy(al(),&p_); reset(); } } return *this; } ~node_handle_base() { if(!empty()){ type_policy::destroy(al(),&p_); reset(); } } allocator_type get_allocator()const noexcept{return al();} explicit operator bool()const noexcept{ return !empty();} BOOST_ATTRIBUTE_NODISCARD bool empty()const noexcept{return p_.p==nullptr;} void swap(node_handle_base& nh) noexcept( boost::allocator_is_always_equal<Allocator>::type::value|| boost::allocator_propagate_on_container_swap<Allocator>::type::value) { if(this!=&nh){ if(empty()){ if(nh.empty()) { /* nothing to do here */ } else { emplace(std::move(nh.p_), nh.al()); nh.reset(); } }else{ if(nh.empty()){ nh.emplace(std::move(p_),al()); reset(); }else{ bool const pocs= boost::allocator_propagate_on_container_swap< Allocator>::type::value; BOOST_ASSERT(pocs || al()==nh.al()); using std::swap; p_.swap(nh.p_); if(pocs)swap(al(),nh.al()); } } } } friend void swap(node_handle_base& lhs,node_handle_base& rhs) noexcept(noexcept(lhs.swap(rhs))) { return lhs.swap(rhs); } }; } } } } #endif // BOOST_UNORDERED_DETAIL_FOA_NODE_HANDLE_HPP
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/node_set_types.hpp
// Copyright (C) 2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP #define BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP #include <boost/unordered/detail/foa/element_type.hpp> #include <boost/core/allocator_access.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/core/pointer_traits.hpp> namespace boost { namespace unordered { namespace detail { namespace foa { template <class Key, class VoidPtr> struct node_set_types { using key_type = Key; using init_type = Key; using value_type = Key; static Key const& extract(value_type const& key) { return key; } using element_type = foa::element_type<value_type, VoidPtr>; static value_type& value_from(element_type const& x) { return *x.p; } static Key const& extract(element_type const& k) { return *k.p; } static element_type&& move(element_type& x) { return std::move(x); } static value_type&& move(value_type& x) { return std::move(x); } template <class A> static void construct( A& al, element_type* p, element_type const& copy) { construct(al, p, *copy.p); } template <typename Allocator> static void construct( Allocator&, element_type* p, element_type&& x) noexcept { p->p = x.p; x.p = nullptr; } template <class A, class... Args> static void construct(A& al, value_type* p, Args&&... args) { boost::allocator_construct(al, p, std::forward<Args>(args)...); } template <class A, class... Args> static void construct(A& al, element_type* p, Args&&... args) { p->p = boost::allocator_allocate(al, 1); BOOST_TRY { boost::allocator_construct( al, boost::to_address(p->p), std::forward<Args>(args)...); } BOOST_CATCH(...) { boost::allocator_deallocate(al, p->p, 1); BOOST_RETHROW } BOOST_CATCH_END } template <class A> static void destroy(A& al, value_type* p) noexcept { boost::allocator_destroy(al, p); } template <class A> static void destroy(A& al, element_type* p) noexcept { if (p->p) { destroy(al, boost::to_address(p->p)); boost::allocator_deallocate(al, p->p, 1); } } }; } // namespace foa } // namespace detail } // namespace unordered } // namespace boost #endif // BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/element_type.hpp
/* Copyright 2023 Christian Mazakas. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_FOA_ELEMENT_TYPE_HPP #define BOOST_UNORDERED_DETAIL_FOA_ELEMENT_TYPE_HPP #include <boost/core/pointer_traits.hpp> namespace boost{ namespace unordered{ namespace detail{ namespace foa{ template<class T,class VoidPtr> struct element_type { using value_type=T; using pointer=typename boost::pointer_traits<VoidPtr>::template rebind<T>; pointer p; /* * we use a deleted copy constructor here so the type is no longer * trivially copy-constructible which inhibits our memcpy * optimizations when copying the tables */ element_type()=default; element_type(pointer p_):p(p_){} element_type(element_type const&)=delete; element_type(element_type&& rhs)noexcept { p = rhs.p; rhs.p = nullptr; } element_type& operator=(element_type const&)=delete; element_type& operator=(element_type&& rhs)noexcept { if (this!=&rhs){ p=rhs.p; rhs.p=nullptr; } return *this; } void swap(element_type& rhs)noexcept { auto tmp=p; p=rhs.p; rhs.p=tmp; } }; } } } } #endif // BOOST_UNORDERED_DETAIL_FOA_ELEMENT_TYPE_HPP
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/rw_spinlock.hpp
#ifndef BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED #define BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED // Copyright 2023 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/core/yield_primitives.hpp> #include <atomic> #include <cstdint> namespace boost{ namespace unordered{ namespace detail{ namespace foa{ class rw_spinlock { private: // bit 31: locked exclusive // bit 30: writer pending // bit 29..0: reader lock count static constexpr std::uint32_t locked_exclusive_mask = 1u << 31; // 0x8000'0000 static constexpr std::uint32_t writer_pending_mask = 1u << 30; // 0x4000'0000 static constexpr std::uint32_t reader_lock_count_mask = writer_pending_mask - 1; // 0x3FFF'FFFF std::atomic<std::uint32_t> state_ = {}; private: // Effects: Provides a hint to the implementation that the current thread // has been unable to make progress for k+1 iterations. static void yield( unsigned k ) noexcept { unsigned const sleep_every = 1024; // see below k %= sleep_every; if( k < 5 ) { // Intel recommendation from the Optimization Reference Manual // Exponentially increase number of PAUSE instructions each // iteration until reaching a maximum which is approximately // one timeslice long (2^4 == 16 in our case) unsigned const pause_count = 1u << k; for( unsigned i = 0; i < pause_count; ++i ) { boost::core::sp_thread_pause(); } } else if( k < sleep_every - 1 ) { // Once the maximum number of PAUSE instructions is reached, // we switch to yielding the timeslice immediately boost::core::sp_thread_yield(); } else { // After `sleep_every` iterations of no progress, we sleep, // to avoid a deadlock if a lower priority thread has the lock boost::core::sp_thread_sleep(); } } public: bool try_lock_shared() noexcept { std::uint32_t st = state_.load( std::memory_order_relaxed ); if( st >= reader_lock_count_mask ) { // either bit 31 set, bit 30 set, or reader count is max return false; } std::uint32_t newst = st + 1; return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed ); } void lock_shared() noexcept { for( unsigned k = 0; ; ++k ) { std::uint32_t st = state_.load( std::memory_order_relaxed ); if( st < reader_lock_count_mask ) { std::uint32_t newst = st + 1; if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return; } yield( k ); } } void unlock_shared() noexcept { // pre: locked shared, not locked exclusive state_.fetch_sub( 1, std::memory_order_release ); // if the writer pending bit is set, there's a writer waiting // let it acquire the lock; it will clear the bit on unlock } bool try_lock() noexcept { std::uint32_t st = state_.load( std::memory_order_relaxed ); if( st & locked_exclusive_mask ) { // locked exclusive return false; } if( st & reader_lock_count_mask ) { // locked shared return false; } std::uint32_t newst = locked_exclusive_mask; return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed ); } void lock() noexcept { for( unsigned k = 0; ; ++k ) { std::uint32_t st = state_.load( std::memory_order_relaxed ); if( st & locked_exclusive_mask ) { // locked exclusive, spin } else if( ( st & reader_lock_count_mask ) == 0 ) { // not locked exclusive, not locked shared, try to lock std::uint32_t newst = locked_exclusive_mask; if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return; } else if( st & writer_pending_mask ) { // writer pending bit already set, nothing to do } else { // locked shared, set writer pending bit std::uint32_t newst = st | writer_pending_mask; state_.compare_exchange_weak( st, newst, std::memory_order_relaxed, std::memory_order_relaxed ); } yield( k ); } } void unlock() noexcept { // pre: locked exclusive, not locked shared state_.store( 0, std::memory_order_release ); } }; } /* namespace foa */ } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif // BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/restore_wshadow.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #define BOOST_UNORDERED_DETAIL_RESTORE_WSHADOW #include <boost/unordered/detail/foa/ignore_wshadow.hpp> #undef BOOST_UNORDERED_DETAIL_RESTORE_WSHADOW
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/reentrancy_check.hpp
/* Copyright 2023 Joaquin M Lopez Munoz. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_FOA_REENTRANCY_CHECK_HPP #define BOOST_UNORDERED_DETAIL_FOA_REENTRANCY_CHECK_HPP #include <boost/assert.hpp> #include <utility> #if !defined(BOOST_UNORDERED_DISABLE_REENTRANCY_CHECK)&& \ !defined(BOOST_ASSERT_IS_VOID) #define BOOST_UNORDERED_REENTRANCY_CHECK #endif namespace boost{ namespace unordered{ namespace detail{ namespace foa{ #if defined(BOOST_UNORDERED_REENTRANCY_CHECK) class entry_trace { public: entry_trace(const void* px_):px{px_} { if(px){ BOOST_ASSERT_MSG(!find(px),"reentrancy not allowed"); header()=this; } } /* not used but VS in pre-C++17 mode needs to see it for RVO */ entry_trace(const entry_trace&); ~entry_trace(){clear();} void clear() { if(px){ header()=next; px=nullptr; } } private: static entry_trace*& header() { thread_local entry_trace *pe=nullptr; return pe; } static bool find(const void* px) { for(auto pe=header();pe;pe=pe->next){ if(pe->px==px)return true; } return false; } const void *px; entry_trace *next=header(); }; template<typename LockGuard> struct reentrancy_checked { template<typename... Args> reentrancy_checked(const void* px,Args&&... args): tr{px},lck{std::forward<Args>(args)...}{} void unlock() { lck.unlock(); tr.clear(); } entry_trace tr; LockGuard lck; }; template<typename LockGuard> struct reentrancy_bichecked { template<typename... Args> reentrancy_bichecked(const void* px,const void* py,Args&&... args): tr1{px},tr2{py!=px?py:nullptr},lck{std::forward<Args>(args)...}{} void unlock() { lck.unlock(); tr2.clear(); tr1.clear(); } entry_trace tr1,tr2; LockGuard lck; }; #else template<typename LockGuard> struct reentrancy_checked { template<typename... Args> reentrancy_checked(const void*,Args&&... args): lck{std::forward<Args>(args)...}{} void unlock(){lck.unlock();} LockGuard lck; }; template<typename LockGuard> struct reentrancy_bichecked { template<typename... Args> reentrancy_bichecked(const void*,const void*,Args&&... args): lck{std::forward<Args>(args)...}{} void unlock(){lck.unlock();} LockGuard lck; }; #endif } /* namespace foa */ } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/concurrent_table.hpp
/* Fast open-addressing concurrent hash table. * * Copyright 2023-2024 Joaquin M Lopez Munoz. * Copyright 2024 Braden Ganetsky. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP #define BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP #include <atomic> #include <boost/assert.hpp> #include <boost/config.hpp> #include <boost/core/ignore_unused.hpp> #include <boost/core/no_exceptions_support.hpp> #include <boost/core/serialization.hpp> #include <boost/cstdint.hpp> #include <boost/mp11/tuple.hpp> #include <boost/throw_exception.hpp> #include <boost/unordered/detail/archive_constructed.hpp> #include <boost/unordered/detail/bad_archive_exception.hpp> #include <boost/unordered/detail/foa/core.hpp> #include <boost/unordered/detail/foa/reentrancy_check.hpp> #include <boost/unordered/detail/foa/rw_spinlock.hpp> #include <boost/unordered/detail/foa/tuple_rotate_right.hpp> #include <boost/unordered/detail/serialization_version.hpp> #include <boost/unordered/detail/static_assert.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <cstddef> #include <functional> #include <initializer_list> #include <iterator> #include <memory> #include <new> #include <type_traits> #include <tuple> #include <utility> #if !defined(BOOST_UNORDERED_DISABLE_PARALLEL_ALGORITHMS) #if defined(BOOST_UNORDERED_ENABLE_PARALLEL_ALGORITHMS)|| \ !defined(BOOST_NO_CXX17_HDR_EXECUTION) #define BOOST_UNORDERED_PARALLEL_ALGORITHMS #endif #endif #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) #include <algorithm> #include <execution> #endif namespace boost{ namespace unordered{ namespace detail{ #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template<typename ExecutionPolicy> using is_execution_policy=std::is_execution_policy< typename std::remove_cv< typename std::remove_reference<ExecutionPolicy>::type >::type >; #else template<typename ExecutionPolicy> using is_execution_policy=std::false_type; #endif namespace foa{ static constexpr std::size_t cacheline_size=64; template<typename T,std::size_t N> class cache_aligned_array { public: cache_aligned_array(){for(std::size_t n=0;n<N;)::new (data(n++)) T();} ~cache_aligned_array(){for(auto n=N;n>0;)data(n--)->~T();} cache_aligned_array(const cache_aligned_array&)=delete; cache_aligned_array& operator=(const cache_aligned_array&)=delete; T& operator[](std::size_t pos)noexcept{return *data(pos);} private: static constexpr std::size_t element_offset= (sizeof(T)+cacheline_size-1)/cacheline_size*cacheline_size; BOOST_UNORDERED_STATIC_ASSERT(alignof(T)<=cacheline_size); T* data(std::size_t pos)noexcept { return reinterpret_cast<T*>( (reinterpret_cast<uintptr_t>(&buf)+cacheline_size-1)/ cacheline_size*cacheline_size +pos*element_offset); } unsigned char buf[element_offset*N+cacheline_size-1]; }; template<typename Mutex,std::size_t N> class multimutex { public: constexpr std::size_t size()const noexcept{return N;} Mutex& operator[](std::size_t pos)noexcept { BOOST_ASSERT(pos<N); return mutexes[pos]; } void lock()noexcept{for(std::size_t n=0;n<N;)mutexes[n++].lock();} void unlock()noexcept{for(auto n=N;n>0;)mutexes[--n].unlock();} private: cache_aligned_array<Mutex,N> mutexes; }; /* std::shared_lock is C++14 */ template<typename Mutex> class shared_lock { public: shared_lock(Mutex& m_)noexcept:m(m_){m.lock_shared();} ~shared_lock()noexcept{if(owns)m.unlock_shared();} /* not used but VS in pre-C++17 mode needs to see it for RVO */ shared_lock(const shared_lock&); void lock(){BOOST_ASSERT(!owns);m.lock_shared();owns=true;} void unlock(){BOOST_ASSERT(owns);m.unlock_shared();owns=false;} private: Mutex &m; bool owns=true; }; /* VS in pre-C++17 mode can't implement RVO for std::lock_guard due to * its copy constructor being deleted. */ template<typename Mutex> class lock_guard { public: lock_guard(Mutex& m_)noexcept:m(m_){m.lock();} ~lock_guard()noexcept{m.unlock();} /* not used but VS in pre-C++17 mode needs to see it for RVO */ lock_guard(const lock_guard&); private: Mutex &m; }; /* inspired by boost/multi_index/detail/scoped_bilock.hpp */ template<typename Mutex> class scoped_bilock { public: scoped_bilock(Mutex& m1,Mutex& m2)noexcept { bool mutex_lt=std::less<Mutex*>{}(&m1,&m2); pm1=mutex_lt?&m1:&m2; pm1->lock(); if(&m1==&m2){ pm2=nullptr; } else{ pm2=mutex_lt?&m2:&m1; pm2->lock(); } } /* not used but VS in pre-C++17 mode needs to see it for RVO */ scoped_bilock(const scoped_bilock&); ~scoped_bilock()noexcept { if(pm2)pm2->unlock(); pm1->unlock(); } private: Mutex *pm1,*pm2; }; /* use atomics for group metadata storage */ template<typename Integral> struct atomic_integral { operator Integral()const{return n.load(std::memory_order_relaxed);} void operator=(Integral m){n.store(m,std::memory_order_relaxed);} void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);} void operator&=(Integral m){n.fetch_and(m,std::memory_order_relaxed);} atomic_integral& operator=(atomic_integral const& rhs) { n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed); return *this; } std::atomic<Integral> n; }; /* Group-level concurrency protection. It provides a rw mutex plus an * atomic insertion counter for optimistic insertion (see * unprotected_norehash_emplace_or_visit). */ struct group_access { using mutex_type=rw_spinlock; using shared_lock_guard=shared_lock<mutex_type>; using exclusive_lock_guard=lock_guard<mutex_type>; using insert_counter_type=std::atomic<boost::uint32_t>; shared_lock_guard shared_access(){return shared_lock_guard{m};} exclusive_lock_guard exclusive_access(){return exclusive_lock_guard{m};} insert_counter_type& insert_counter(){return cnt;} private: mutex_type m; insert_counter_type cnt{0}; }; template<std::size_t Size> group_access* dummy_group_accesses() { /* Default group_access array to provide to empty containers without * incurring dynamic allocation. Mutexes won't actually ever be used, * (no successful reduced hash match) and insertion counters won't ever * be incremented (insertions won't succeed as capacity()==0). */ static group_access accesses[Size]; return accesses; } /* subclasses table_arrays to add an additional group_access array */ template<typename Value,typename Group,typename SizePolicy,typename Allocator> struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy,Allocator> { using group_access_allocator_type= typename boost::allocator_rebind<Allocator,group_access>::type; using group_access_pointer= typename boost::allocator_pointer<group_access_allocator_type>::type; using super=table_arrays<Value,Group,SizePolicy,Allocator>; using allocator_type=typename super::allocator_type; concurrent_table_arrays(const super& arrays,group_access_pointer pga): super{arrays},group_accesses_{pga}{} group_access* group_accesses()const noexcept{ return boost::to_address(group_accesses_); } static concurrent_table_arrays new_(allocator_type al,std::size_t n) { super x{super::new_(al,n)}; BOOST_TRY{ return new_group_access(group_access_allocator_type(al),x); } BOOST_CATCH(...){ super::delete_(al,x); BOOST_RETHROW } BOOST_CATCH_END } static void set_group_access( group_access_allocator_type al,concurrent_table_arrays& arrays) { set_group_access( al,arrays,std::is_same<group_access*,group_access_pointer>{}); } static void set_group_access( group_access_allocator_type al, concurrent_table_arrays& arrays, std::false_type /* fancy pointers */) { arrays.group_accesses_= boost::allocator_allocate(al,arrays.groups_size_mask+1); for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){ ::new (arrays.group_accesses()+i) group_access(); } } static void set_group_access( group_access_allocator_type al, concurrent_table_arrays& arrays, std::true_type /* optimize when elements() is null */) { if(!arrays.elements()){ arrays.group_accesses_= dummy_group_accesses<SizePolicy::min_size()>(); } else { set_group_access(al,arrays,std::false_type{}); } } static concurrent_table_arrays new_group_access( group_access_allocator_type al,const super& x) { concurrent_table_arrays arrays{x,nullptr}; set_group_access(al,arrays); return arrays; } static void delete_(allocator_type al,concurrent_table_arrays& arrays)noexcept { delete_group_access(group_access_allocator_type(al),arrays); super::delete_(al,arrays); } static void delete_group_access( group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept { if(arrays.elements()){ boost::allocator_deallocate( al,arrays.group_accesses_,arrays.groups_size_mask+1); } } group_access_pointer group_accesses_; }; struct atomic_size_control { static constexpr auto atomic_size_t_size=sizeof(std::atomic<std::size_t>); BOOST_UNORDERED_STATIC_ASSERT(atomic_size_t_size<cacheline_size); atomic_size_control(std::size_t ml_,std::size_t size_): pad0_{},ml{ml_},pad1_{},size{size_}{} atomic_size_control(const atomic_size_control& x): pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{} /* padding to avoid false sharing internally and with sorrounding data */ unsigned char pad0_[cacheline_size-atomic_size_t_size]; std::atomic<std::size_t> ml; unsigned char pad1_[cacheline_size-atomic_size_t_size]; std::atomic<std::size_t> size; }; /* std::swap can't be used on non-assignable atomics */ inline void swap_atomic_size_t(std::atomic<std::size_t>& x,std::atomic<std::size_t>& y) { std::size_t tmp=x; x=static_cast<std::size_t>(y); y=tmp; } inline void swap(atomic_size_control& x,atomic_size_control& y) { swap_atomic_size_t(x.ml,y.ml); swap_atomic_size_t(x.size,y.size); } /* foa::concurrent_table serves as the foundation for end-user concurrent * hash containers. * * The exposed interface (completed by the wrapping containers) is not that * of a regular container (in fact, it does not model Container as understood * by the C++ standard): * * - Iterators are not provided as they are not suitable for concurrent * scenarios. * - As a consequence, composite operations with regular containers * (like, for instance, looking up an element and modifying it), must * be provided natively without any intervening iterator/accesor. * Visitation is a core concept in this design, either on its own (eg. * visit(k) locates the element with key k *and* accesses it) or as part * of a native composite operation (eg. try_emplace_or_visit). Visitation * is constant or mutating depending on whether the used table function is * const or not. * - The API provides member functions for all the meaningful composite * operations of the form "X (and|or) Y", where X, Y are one of the * primitives FIND, ACCESS, INSERT or ERASE. * - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based * on C++17 stdlib parallel algorithms. * * Consult boost::concurrent_flat_(map|set) docs for the full API reference. * Heterogeneous lookup is suported by default, that is, without checking for * any ::is_transparent typedefs --this checking is done by the wrapping * containers. * * Thread-safe concurrency is implemented using a two-level lock system: * * - A first container-level lock is implemented with an array of * rw spinlocks acting as a single rw mutex with very little * cache-coherence traffic on read (each thread is assigned a different * spinlock in the array). Container-level write locking is only used for * rehashing and other container-wide operations (assignment, swap, etc.) * - Each group of slots has an associated rw spinlock. A thread holds * at most one group lock at any given time. Lookup is implemented in * a (groupwise) lock-free manner until a reduced hash match is found, in * which case the relevant group is locked and the slot is double-checked * for occupancy and compared with the key. * - Each group has also an associated so-called insertion counter used for * the following optimistic insertion algorithm: * - The value of the insertion counter for the initial group in the probe * sequence is locally recorded (let's call this value c0). * - Lookup is as described above. If lookup finds no equivalent element, * search for an available slot for insertion successively locks/unlocks * each group in the probing sequence. * - When an available slot is located, it is preemptively occupied (its * reduced hash value is set) and the insertion counter is atomically * incremented: if no other thread has incremented the counter during the * whole operation (which is checked by comparing with c0), then we're * good to go and complete the insertion, otherwise we roll back and * start over. */ template<typename,typename,typename,typename> class table; /* concurrent/non-concurrent interop */ template <typename TypePolicy,typename Hash,typename Pred,typename Allocator> using concurrent_table_core_impl=table_core< TypePolicy,group15<atomic_integral>,concurrent_table_arrays, atomic_size_control,Hash,Pred,Allocator>; #include <boost/unordered/detail/foa/ignore_wshadow.hpp> #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable:4714) /* marked as __forceinline not inlined */ #endif template<typename TypePolicy,typename Hash,typename Pred,typename Allocator> class concurrent_table: concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator> { using super=concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>; using type_policy=typename super::type_policy; using group_type=typename super::group_type; using super::N; using prober=typename super::prober; using arrays_type=typename super::arrays_type; using size_ctrl_type=typename super::size_ctrl_type; using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>; friend compatible_nonconcurrent_table; public: using key_type=typename super::key_type; using init_type=typename super::init_type; using value_type=typename super::value_type; using element_type=typename super::element_type; using hasher=typename super::hasher; using key_equal=typename super::key_equal; using allocator_type=typename super::allocator_type; using size_type=typename super::size_type; static constexpr std::size_t bulk_visit_size=16; private: template<typename Value,typename T> using enable_if_is_value_type=typename std::enable_if< !std::is_same<init_type,value_type>::value&& std::is_same<Value,value_type>::value, T >::type; public: concurrent_table( std::size_t n=default_bucket_count,const Hash& h_=Hash(), const Pred& pred_=Pred(),const Allocator& al_=Allocator()): super{n,h_,pred_,al_} {} concurrent_table(const concurrent_table& x): concurrent_table(x,x.exclusive_access()){} concurrent_table(concurrent_table&& x): concurrent_table(std::move(x),x.exclusive_access()){} concurrent_table(const concurrent_table& x,const Allocator& al_): concurrent_table(x,al_,x.exclusive_access()){} concurrent_table(concurrent_table&& x,const Allocator& al_): concurrent_table(std::move(x),al_,x.exclusive_access()){} template<typename ArraysType> concurrent_table( compatible_nonconcurrent_table&& x, arrays_holder<ArraysType,Allocator>&& ah): super{ std::move(x.h()),std::move(x.pred()),std::move(x.al()), [&x]{return arrays_type::new_group_access( x.al(),typename arrays_type::super{ x.arrays.groups_size_index,x.arrays.groups_size_mask, to_pointer<typename arrays_type::group_type_pointer>( reinterpret_cast<group_type*>(x.arrays.groups())), x.arrays.elements_});}, size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}} { x.arrays=ah.release(); x.size_ctrl.ml=x.initial_max_load(); x.size_ctrl.size=0; } concurrent_table(compatible_nonconcurrent_table&& x): concurrent_table(std::move(x),x.make_empty_arrays()) {} ~concurrent_table()=default; concurrent_table& operator=(const concurrent_table& x) { auto lck=exclusive_access(*this,x); super::operator=(x); return *this; } concurrent_table& operator=(concurrent_table&& x)noexcept( noexcept(std::declval<super&>() = std::declval<super&&>())) { auto lck=exclusive_access(*this,x); super::operator=(std::move(x)); return *this; } concurrent_table& operator=(std::initializer_list<value_type> il) { auto lck=exclusive_access(); super::clear(); super::noshrink_reserve(il.size()); for (auto const& v : il) { this->unprotected_emplace(v); } return *this; } allocator_type get_allocator()const noexcept { auto lck=shared_access(); return super::get_allocator(); } template<typename Key,typename F> BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f) { return visit_impl(group_exclusive{},x,std::forward<F>(f)); } template<typename Key,typename F> BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)const { return visit_impl(group_shared{},x,std::forward<F>(f)); } template<typename Key,typename F> BOOST_FORCEINLINE std::size_t cvisit(const Key& x,F&& f)const { return visit(x,std::forward<F>(f)); } template<typename FwdIterator,typename F> BOOST_FORCEINLINE std::size_t visit(FwdIterator first,FwdIterator last,F&& f) { return bulk_visit_impl(group_exclusive{},first,last,std::forward<F>(f)); } template<typename FwdIterator,typename F> BOOST_FORCEINLINE std::size_t visit(FwdIterator first,FwdIterator last,F&& f)const { return bulk_visit_impl(group_shared{},first,last,std::forward<F>(f)); } template<typename FwdIterator,typename F> BOOST_FORCEINLINE std::size_t cvisit(FwdIterator first,FwdIterator last,F&& f)const { return visit(first,last,std::forward<F>(f)); } template<typename F> std::size_t visit_all(F&& f) { return visit_all_impl(group_exclusive{},std::forward<F>(f)); } template<typename F> std::size_t visit_all(F&& f)const { return visit_all_impl(group_shared{},std::forward<F>(f)); } template<typename F> std::size_t cvisit_all(F&& f)const { return visit_all(std::forward<F>(f)); } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template<typename ExecutionPolicy,typename F> void visit_all(ExecutionPolicy&& policy,F&& f) { visit_all_impl( group_exclusive{}, std::forward<ExecutionPolicy>(policy),std::forward<F>(f)); } template<typename ExecutionPolicy,typename F> void visit_all(ExecutionPolicy&& policy,F&& f)const { visit_all_impl( group_shared{}, std::forward<ExecutionPolicy>(policy),std::forward<F>(f)); } template<typename ExecutionPolicy,typename F> void cvisit_all(ExecutionPolicy&& policy,F&& f)const { visit_all(std::forward<ExecutionPolicy>(policy),std::forward<F>(f)); } #endif template<typename F> bool visit_while(F&& f) { return visit_while_impl(group_exclusive{},std::forward<F>(f)); } template<typename F> bool visit_while(F&& f)const { return visit_while_impl(group_shared{},std::forward<F>(f)); } template<typename F> bool cvisit_while(F&& f)const { return visit_while(std::forward<F>(f)); } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template<typename ExecutionPolicy,typename F> bool visit_while(ExecutionPolicy&& policy,F&& f) { return visit_while_impl( group_exclusive{}, std::forward<ExecutionPolicy>(policy),std::forward<F>(f)); } template<typename ExecutionPolicy,typename F> bool visit_while(ExecutionPolicy&& policy,F&& f)const { return visit_while_impl( group_shared{}, std::forward<ExecutionPolicy>(policy),std::forward<F>(f)); } template<typename ExecutionPolicy,typename F> bool cvisit_while(ExecutionPolicy&& policy,F&& f)const { return visit_while( std::forward<ExecutionPolicy>(policy),std::forward<F>(f)); } #endif bool empty()const noexcept{return size()==0;} std::size_t size()const noexcept { auto lck=shared_access(); return unprotected_size(); } using super::max_size; template<typename... Args> BOOST_FORCEINLINE bool emplace(Args&&... args) { return construct_and_emplace(std::forward<Args>(args)...); } /* Optimization for value_type and init_type, to avoid constructing twice */ template<typename Value> BOOST_FORCEINLINE auto emplace(Value&& x)->typename std::enable_if< detail::is_similar_to_any<Value,value_type,init_type>::value,bool>::type { return emplace_impl(std::forward<Value>(x)); } /* Optimizations for maps for (k,v) to avoid eagerly constructing value */ template <typename K, typename V> BOOST_FORCEINLINE auto emplace(K&& k, V&& v) -> typename std::enable_if<is_emplace_kv_able<concurrent_table, K>::value, bool>::type { alloc_cted_or_fwded_key_type<type_policy, Allocator, K&&> x( this->al(), std::forward<K>(k)); return emplace_impl( try_emplace_args_t{}, x.move_or_fwd(), std::forward<V>(v)); } BOOST_FORCEINLINE bool insert(const init_type& x){return emplace_impl(x);} BOOST_FORCEINLINE bool insert(init_type&& x){return emplace_impl(std::move(x));} /* template<typename=void> tilts call ambiguities in favor of init_type */ template<typename=void> BOOST_FORCEINLINE bool insert(const value_type& x){return emplace_impl(x);} template<typename=void> BOOST_FORCEINLINE bool insert(value_type&& x){return emplace_impl(std::move(x));} template<typename Key,typename... Args> BOOST_FORCEINLINE bool try_emplace(Key&& x,Args&&... args) { return emplace_impl( try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...); } template<typename Key,typename... Args> BOOST_FORCEINLINE bool try_emplace_or_visit(Key&& x,Args&&... args) { return emplace_or_visit_flast( group_exclusive{}, try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...); } template<typename Key,typename... Args> BOOST_FORCEINLINE bool try_emplace_or_cvisit(Key&& x,Args&&... args) { return emplace_or_visit_flast( group_shared{}, try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...); } template<typename... Args> BOOST_FORCEINLINE bool emplace_or_visit(Args&&... args) { return construct_and_emplace_or_visit_flast( group_exclusive{},std::forward<Args>(args)...); } template<typename... Args> BOOST_FORCEINLINE bool emplace_or_cvisit(Args&&... args) { return construct_and_emplace_or_visit_flast( group_shared{},std::forward<Args>(args)...); } template<typename F> BOOST_FORCEINLINE bool insert_or_visit(const init_type& x,F&& f) { return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x); } template<typename F> BOOST_FORCEINLINE bool insert_or_cvisit(const init_type& x,F&& f) { return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x); } template<typename F> BOOST_FORCEINLINE bool insert_or_visit(init_type&& x,F&& f) { return emplace_or_visit_impl( group_exclusive{},std::forward<F>(f),std::move(x)); } template<typename F> BOOST_FORCEINLINE bool insert_or_cvisit(init_type&& x,F&& f) { return emplace_or_visit_impl( group_shared{},std::forward<F>(f),std::move(x)); } /* SFINAE tilts call ambiguities in favor of init_type */ template<typename Value,typename F> BOOST_FORCEINLINE auto insert_or_visit(const Value& x,F&& f) ->enable_if_is_value_type<Value,bool> { return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x); } template<typename Value,typename F> BOOST_FORCEINLINE auto insert_or_cvisit(const Value& x,F&& f) ->enable_if_is_value_type<Value,bool> { return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x); } template<typename Value,typename F> BOOST_FORCEINLINE auto insert_or_visit(Value&& x,F&& f) ->enable_if_is_value_type<Value,bool> { return emplace_or_visit_impl( group_exclusive{},std::forward<F>(f),std::move(x)); } template<typename Value,typename F> BOOST_FORCEINLINE auto insert_or_cvisit(Value&& x,F&& f) ->enable_if_is_value_type<Value,bool> { return emplace_or_visit_impl( group_shared{},std::forward<F>(f),std::move(x)); } template<typename Key> BOOST_FORCEINLINE std::size_t erase(const Key& x) { return erase_if(x,[](const value_type&){return true;}); } template<typename Key,typename F> BOOST_FORCEINLINE auto erase_if(const Key& x,F&& f)->typename std::enable_if< !is_execution_policy<Key>::value,std::size_t>::type { auto lck=shared_access(); auto hash=this->hash_for(x); std::size_t res=0; unprotected_internal_visit( group_exclusive{},x,this->position_for(hash),hash, [&,this](group_type* pg,unsigned int n,element_type* p) { if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){ super::erase(pg,n,p); res=1; } }); return res; } template<typename F> std::size_t erase_if(F&& f) { auto lck=shared_access(); std::size_t res=0; for_all_elements( group_exclusive{}, [&,this](group_type* pg,unsigned int n,element_type* p){ if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){ super::erase(pg,n,p); ++res; } }); return res; } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template<typename ExecutionPolicy,typename F> auto erase_if(ExecutionPolicy&& policy,F&& f)->typename std::enable_if< is_execution_policy<ExecutionPolicy>::value,void>::type { auto lck=shared_access(); for_all_elements( group_exclusive{},std::forward<ExecutionPolicy>(policy), [&,this](group_type* pg,unsigned int n,element_type* p){ if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){ super::erase(pg,n,p); } }); } #endif void swap(concurrent_table& x) noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>()))) { auto lck=exclusive_access(*this,x); super::swap(x); } void clear()noexcept { auto lck=exclusive_access(); super::clear(); } // TODO: should we accept different allocator too? template<typename Hash2,typename Pred2> size_type merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& x) { using merge_table_type=concurrent_table<TypePolicy,Hash2,Pred2,Allocator>; using super2=typename merge_table_type::super; // for clang boost::ignore_unused<super2>(); auto lck=exclusive_access(*this,x); size_type s=super::size(); x.super2::for_all_elements( /* super2::for_all_elements -> unprotected */ [&,this](group_type* pg,unsigned int n,element_type* p){ typename merge_table_type::erase_on_exit e{x,pg,n,p}; if(!unprotected_emplace(type_policy::move(*p)))e.rollback(); }); return size_type{super::size()-s}; } template<typename Hash2,typename Pred2> void merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);} hasher hash_function()const { auto lck=shared_access(); return super::hash_function(); } key_equal key_eq()const { auto lck=shared_access(); return super::key_eq(); } template<typename Key> BOOST_FORCEINLINE std::size_t count(Key&& x)const { return (std::size_t)contains(std::forward<Key>(x)); } template<typename Key> BOOST_FORCEINLINE bool contains(Key&& x)const { return visit(std::forward<Key>(x),[](const value_type&){})!=0; } std::size_t capacity()const noexcept { auto lck=shared_access(); return super::capacity(); } float load_factor()const noexcept { auto lck=shared_access(); if(super::capacity()==0)return 0; else return float(unprotected_size())/ float(super::capacity()); } using super::max_load_factor; std::size_t max_load()const noexcept { auto lck=shared_access(); return super::max_load(); } void rehash(std::size_t n) { auto lck=exclusive_access(); super::rehash(n); } void reserve(std::size_t n) { auto lck=exclusive_access(); super::reserve(n); } template<typename Predicate> friend std::size_t erase_if(concurrent_table& x,Predicate&& pr) { return x.erase_if(std::forward<Predicate>(pr)); } friend bool operator==(const concurrent_table& x,const concurrent_table& y) { auto lck=exclusive_access(x,y); return static_cast<const super&>(x)==static_cast<const super&>(y); } friend bool operator!=(const concurrent_table& x,const concurrent_table& y) { return !(x==y); } private: template<typename,typename,typename,typename> friend class concurrent_table; using mutex_type=rw_spinlock; using multimutex_type=multimutex<mutex_type,128>; // TODO: adapt 128 to the machine using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>; using exclusive_lock_guard=reentrancy_checked<lock_guard<multimutex_type>>; using exclusive_bilock_guard= reentrancy_bichecked<scoped_bilock<multimutex_type>>; using group_shared_lock_guard=typename group_access::shared_lock_guard; using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard; using group_insert_counter_type=typename group_access::insert_counter_type; concurrent_table(const concurrent_table& x,exclusive_lock_guard): super{x}{} concurrent_table(concurrent_table&& x,exclusive_lock_guard): super{std::move(x)}{} concurrent_table( const concurrent_table& x,const Allocator& al_,exclusive_lock_guard): super{x,al_}{} concurrent_table( concurrent_table&& x,const Allocator& al_,exclusive_lock_guard): super{std::move(x),al_}{} inline shared_lock_guard shared_access()const { thread_local auto id=(++thread_counter)%mutexes.size(); return shared_lock_guard{this,mutexes[id]}; } inline exclusive_lock_guard exclusive_access()const { return exclusive_lock_guard{this,mutexes}; } static inline exclusive_bilock_guard exclusive_access( const concurrent_table& x,const concurrent_table& y) { return {&x,&y,x.mutexes,y.mutexes}; } template<typename Hash2,typename Pred2> static inline exclusive_bilock_guard exclusive_access( const concurrent_table& x, const concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& y) { return {&x,&y,x.mutexes,y.mutexes}; } /* Tag-dispatched shared/exclusive group access */ using group_shared=std::false_type; using group_exclusive=std::true_type; inline group_shared_lock_guard access(group_shared,std::size_t pos)const { return this->arrays.group_accesses()[pos].shared_access(); } inline group_exclusive_lock_guard access( group_exclusive,std::size_t pos)const { return this->arrays.group_accesses()[pos].exclusive_access(); } inline group_insert_counter_type& insert_counter(std::size_t pos)const { return this->arrays.group_accesses()[pos].insert_counter(); } /* Const casts value_type& according to the level of group access for * safe passing to visitation functions. When type_policy is set-like, * access is always const regardless of group access. */ static inline const value_type& cast_for(group_shared,value_type& x){return x;} static inline typename std::conditional< std::is_same<key_type,value_type>::value, const value_type&, value_type& >::type cast_for(group_exclusive,value_type& x){return x;} struct erase_on_exit { erase_on_exit( concurrent_table& x_, group_type* pg_,unsigned int pos_,element_type* p_): x(x_),pg(pg_),pos(pos_),p(p_){} ~erase_on_exit(){if(!rollback_)x.super::erase(pg,pos,p);} void rollback(){rollback_=true;} concurrent_table &x; group_type *pg; unsigned int pos; element_type *p; bool rollback_=false; }; template<typename GroupAccessMode,typename Key,typename F> BOOST_FORCEINLINE std::size_t visit_impl( GroupAccessMode access_mode,const Key& x,F&& f)const { auto lck=shared_access(); auto hash=this->hash_for(x); return unprotected_visit( access_mode,x,this->position_for(hash),hash,std::forward<F>(f)); } template<typename GroupAccessMode,typename FwdIterator,typename F> BOOST_FORCEINLINE std::size_t bulk_visit_impl( GroupAccessMode access_mode,FwdIterator first,FwdIterator last,F&& f)const { auto lck=shared_access(); std::size_t res=0; auto n=static_cast<std::size_t>(std::distance(first,last)); while(n){ auto m=n<2*bulk_visit_size?n:bulk_visit_size; res+=unprotected_bulk_visit(access_mode,first,m,std::forward<F>(f)); n-=m; std::advance( first, static_cast< typename std::iterator_traits<FwdIterator>::difference_type>(m)); } return res; } template<typename GroupAccessMode,typename F> std::size_t visit_all_impl(GroupAccessMode access_mode,F&& f)const { auto lck=shared_access(); std::size_t res=0; for_all_elements(access_mode,[&](element_type* p){ f(cast_for(access_mode,type_policy::value_from(*p))); ++res; }); return res; } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template<typename GroupAccessMode,typename ExecutionPolicy,typename F> void visit_all_impl( GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const { auto lck=shared_access(); for_all_elements( access_mode,std::forward<ExecutionPolicy>(policy), [&](element_type* p){ f(cast_for(access_mode,type_policy::value_from(*p))); }); } #endif template<typename GroupAccessMode,typename F> bool visit_while_impl(GroupAccessMode access_mode,F&& f)const { auto lck=shared_access(); return for_all_elements_while(access_mode,[&](element_type* p){ return f(cast_for(access_mode,type_policy::value_from(*p))); }); } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template<typename GroupAccessMode,typename ExecutionPolicy,typename F> bool visit_while_impl( GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const { auto lck=shared_access(); return for_all_elements_while( access_mode,std::forward<ExecutionPolicy>(policy), [&](element_type* p){ return f(cast_for(access_mode,type_policy::value_from(*p))); }); } #endif template<typename GroupAccessMode,typename Key,typename F> BOOST_FORCEINLINE std::size_t unprotected_visit( GroupAccessMode access_mode, const Key& x,std::size_t pos0,std::size_t hash,F&& f)const { return unprotected_internal_visit( access_mode,x,pos0,hash, [&](group_type*,unsigned int,element_type* p) {f(cast_for(access_mode,type_policy::value_from(*p)));}); } #if defined(BOOST_MSVC) /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */ #pragma warning(push) #pragma warning(disable:4800) #endif template<typename GroupAccessMode,typename Key,typename F> BOOST_FORCEINLINE std::size_t unprotected_internal_visit( GroupAccessMode access_mode, const Key& x,std::size_t pos0,std::size_t hash,F&& f)const { prober pb(pos0); do{ auto pos=pb.get(); auto pg=this->arrays.groups()+pos; auto mask=pg->match(hash); if(mask){ auto p=this->arrays.elements()+pos*N; BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N); auto lck=access(access_mode,pos); do{ auto n=unchecked_countr_zero(mask); if(BOOST_LIKELY( pg->is_occupied(n)&&bool(this->pred()(x,this->key_from(p[n]))))){ f(pg,n,p+n); return 1; } mask&=mask-1; }while(mask); } if(BOOST_LIKELY(pg->is_not_overflowed(hash))){ return 0; } } while(BOOST_LIKELY(pb.next(this->arrays.groups_size_mask))); return 0; } template<typename GroupAccessMode,typename FwdIterator,typename F> BOOST_FORCEINLINE std::size_t unprotected_bulk_visit( GroupAccessMode access_mode,FwdIterator first,std::size_t m,F&& f)const { BOOST_ASSERT(m<2*bulk_visit_size); std::size_t res=0, hashes[2*bulk_visit_size-1], positions[2*bulk_visit_size-1]; int masks[2*bulk_visit_size-1]; auto it=first; for(auto i=m;i--;++it){ auto hash=hashes[i]=this->hash_for(*it); auto pos=positions[i]=this->position_for(hash); BOOST_UNORDERED_PREFETCH(this->arrays.groups()+pos); } for(auto i=m;i--;){ auto hash=hashes[i]; auto pos=positions[i]; auto mask=masks[i]=(this->arrays.groups()+pos)->match(hash); if(mask){ BOOST_UNORDERED_PREFETCH(this->arrays.group_accesses()+pos); BOOST_UNORDERED_PREFETCH( this->arrays.elements()+pos*N+unchecked_countr_zero(mask)); } } it=first; for(auto i=m;i--;++it){ auto pos=positions[i]; prober pb(pos); auto pg=this->arrays.groups()+pos; auto mask=masks[i]; element_type *p; if(!mask)goto post_mask; p=this->arrays.elements()+pos*N; for(;;){ { auto lck=access(access_mode,pos); do{ auto n=unchecked_countr_zero(mask); if(BOOST_LIKELY( pg->is_occupied(n)&& bool(this->pred()(*it,this->key_from(p[n]))))){ f(cast_for(access_mode,type_policy::value_from(p[n]))); ++res; goto next_key; } mask&=mask-1; }while(mask); } post_mask: do{ if(BOOST_LIKELY(pg->is_not_overflowed(hashes[i]))|| BOOST_UNLIKELY(!pb.next(this->arrays.groups_size_mask))){ goto next_key; } pos=pb.get(); pg=this->arrays.groups()+pos; mask=pg->match(hashes[i]); }while(!mask); p=this->arrays.elements()+pos*N; BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N); } next_key:; } return res; } #if defined(BOOST_MSVC) #pragma warning(pop) /* C4800 */ #endif std::size_t unprotected_size()const { std::size_t m=this->size_ctrl.ml; std::size_t s=this->size_ctrl.size; return s<=m?s:m; } template<typename... Args> BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args) { return construct_and_emplace_or_visit( group_shared{},[](const value_type&){},std::forward<Args>(args)...); } struct call_construct_and_emplace_or_visit { template<typename... Args> BOOST_FORCEINLINE bool operator()( concurrent_table* this_,Args&&... args)const { return this_->construct_and_emplace_or_visit( std::forward<Args>(args)...); } }; template<typename GroupAccessMode,typename... Args> BOOST_FORCEINLINE bool construct_and_emplace_or_visit_flast( GroupAccessMode access_mode,Args&&... args) { return mp11::tuple_apply( call_construct_and_emplace_or_visit{}, std::tuple_cat( std::make_tuple(this,access_mode), tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...)) ) ); } template<typename GroupAccessMode,typename F,typename... Args> BOOST_FORCEINLINE bool construct_and_emplace_or_visit( GroupAccessMode access_mode,F&& f,Args&&... args) { auto lck=shared_access(); alloc_cted_insert_type<type_policy,Allocator,Args...> x( this->al(),std::forward<Args>(args)...); int res=unprotected_norehash_emplace_or_visit( access_mode,std::forward<F>(f),type_policy::move(x.value())); if(BOOST_LIKELY(res>=0))return res!=0; lck.unlock(); rehash_if_full(); return noinline_emplace_or_visit( access_mode,std::forward<F>(f),type_policy::move(x.value())); } template<typename... Args> BOOST_FORCEINLINE bool emplace_impl(Args&&... args) { return emplace_or_visit_impl( group_shared{},[](const value_type&){},std::forward<Args>(args)...); } template<typename GroupAccessMode,typename F,typename... Args> BOOST_NOINLINE bool noinline_emplace_or_visit( GroupAccessMode access_mode,F&& f,Args&&... args) { return emplace_or_visit_impl( access_mode,std::forward<F>(f),std::forward<Args>(args)...); } struct call_emplace_or_visit_impl { template<typename... Args> BOOST_FORCEINLINE bool operator()( concurrent_table* this_,Args&&... args)const { return this_->emplace_or_visit_impl(std::forward<Args>(args)...); } }; template<typename GroupAccessMode,typename... Args> BOOST_FORCEINLINE bool emplace_or_visit_flast( GroupAccessMode access_mode,Args&&... args) { return mp11::tuple_apply( call_emplace_or_visit_impl{}, std::tuple_cat( std::make_tuple(this,access_mode), tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...)) ) ); } template<typename GroupAccessMode,typename F,typename... Args> BOOST_FORCEINLINE bool emplace_or_visit_impl( GroupAccessMode access_mode,F&& f,Args&&... args) { for(;;){ { auto lck=shared_access(); int res=unprotected_norehash_emplace_or_visit( access_mode,std::forward<F>(f),std::forward<Args>(args)...); if(BOOST_LIKELY(res>=0))return res!=0; } rehash_if_full(); } } template<typename... Args> BOOST_FORCEINLINE bool unprotected_emplace(Args&&... args) { const auto &k=this->key_from(std::forward<Args>(args)...); auto hash=this->hash_for(k); auto pos0=this->position_for(hash); if(this->find(k,pos0,hash))return false; if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){ this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...); } else{ this->unchecked_emplace_with_rehash(hash,std::forward<Args>(args)...); } return true; } struct reserve_size { reserve_size(concurrent_table& x_):x(x_) { size_=++x.size_ctrl.size; } ~reserve_size() { if(!commit_)--x.size_ctrl.size; } bool succeeded()const{return size_<=x.size_ctrl.ml;} void commit(){commit_=true;} concurrent_table &x; std::size_t size_; bool commit_=false; }; struct reserve_slot { reserve_slot(group_type* pg_,std::size_t pos_,std::size_t hash): pg{pg_},pos{pos_} { pg->set(pos,hash); } ~reserve_slot() { if(!commit_)pg->reset(pos); } void commit(){commit_=true;} group_type *pg; std::size_t pos; bool commit_=false; }; template<typename GroupAccessMode,typename F,typename... Args> BOOST_FORCEINLINE int unprotected_norehash_emplace_or_visit( GroupAccessMode access_mode,F&& f,Args&&... args) { const auto &k=this->key_from(std::forward<Args>(args)...); auto hash=this->hash_for(k); auto pos0=this->position_for(hash); for(;;){ startover: boost::uint32_t counter=insert_counter(pos0); if(unprotected_visit( access_mode,k,pos0,hash,std::forward<F>(f)))return 0; reserve_size rsize(*this); if(BOOST_LIKELY(rsize.succeeded())){ for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){ auto pos=pb.get(); auto pg=this->arrays.groups()+pos; auto lck=access(group_exclusive{},pos); auto mask=pg->match_available(); if(BOOST_LIKELY(mask!=0)){ auto n=unchecked_countr_zero(mask); reserve_slot rslot{pg,n,hash}; if(BOOST_UNLIKELY(insert_counter(pos0)++!=counter)){ /* other thread inserted from pos0, need to start over */ goto startover; } auto p=this->arrays.elements()+pos*N+n; this->construct_element(p,std::forward<Args>(args)...); rslot.commit(); rsize.commit(); return 1; } pg->mark_overflow(hash); } } else return -1; } } void rehash_if_full() { auto lck=exclusive_access(); if(this->size_ctrl.size==this->size_ctrl.ml){ this->unchecked_rehash_for_growth(); } } template<typename GroupAccessMode,typename F> auto for_all_elements(GroupAccessMode access_mode,F f)const ->decltype(f(nullptr),void()) { for_all_elements( access_mode,[&](group_type*,unsigned int,element_type* p){f(p);}); } template<typename GroupAccessMode,typename F> auto for_all_elements(GroupAccessMode access_mode,F f)const ->decltype(f(nullptr,0,nullptr),void()) { for_all_elements_while( access_mode,[&](group_type* pg,unsigned int n,element_type* p) {f(pg,n,p);return true;}); } template<typename GroupAccessMode,typename F> auto for_all_elements_while(GroupAccessMode access_mode,F f)const ->decltype(f(nullptr),bool()) { return for_all_elements_while( access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);}); } template<typename GroupAccessMode,typename F> auto for_all_elements_while(GroupAccessMode access_mode,F f)const ->decltype(f(nullptr,0,nullptr),bool()) { auto p=this->arrays.elements(); if(p){ for(auto pg=this->arrays.groups(),last=pg+this->arrays.groups_size_mask+1; pg!=last;++pg,p+=N){ auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups())); auto mask=this->match_really_occupied(pg,last); while(mask){ auto n=unchecked_countr_zero(mask); if(!f(pg,n,p+n))return false; mask&=mask-1; } } } return true; } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) template<typename GroupAccessMode,typename ExecutionPolicy,typename F> auto for_all_elements( GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const ->decltype(f(nullptr),void()) { for_all_elements( access_mode,std::forward<ExecutionPolicy>(policy), [&](group_type*,unsigned int,element_type* p){f(p);}); } template<typename GroupAccessMode,typename ExecutionPolicy,typename F> auto for_all_elements( GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const ->decltype(f(nullptr,0,nullptr),void()) { if(!this->arrays.elements())return; auto first=this->arrays.groups(), last=first+this->arrays.groups_size_mask+1; std::for_each(std::forward<ExecutionPolicy>(policy),first,last, [&,this](group_type& g){ auto pos=static_cast<std::size_t>(&g-first); auto p=this->arrays.elements()+pos*N; auto lck=access(access_mode,pos); auto mask=this->match_really_occupied(&g,last); while(mask){ auto n=unchecked_countr_zero(mask); f(&g,n,p+n); mask&=mask-1; } } ); } template<typename GroupAccessMode,typename ExecutionPolicy,typename F> bool for_all_elements_while( GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const { if(!this->arrays.elements())return true; auto first=this->arrays.groups(), last=first+this->arrays.groups_size_mask+1; return std::all_of(std::forward<ExecutionPolicy>(policy),first,last, [&,this](group_type& g){ auto pos=static_cast<std::size_t>(&g-first); auto p=this->arrays.elements()+pos*N; auto lck=access(access_mode,pos); auto mask=this->match_really_occupied(&g,last); while(mask){ auto n=unchecked_countr_zero(mask); if(!f(p+n))return false; mask&=mask-1; } return true; } ); } #endif friend class boost::serialization::access; template<typename Archive> void serialize(Archive& ar,unsigned int version) { core::split_member(ar,*this,version); } template<typename Archive> void save(Archive& ar,unsigned int version)const { save( ar,version, std::integral_constant<bool,std::is_same<key_type,value_type>::value>{}); } template<typename Archive> void save(Archive& ar,unsigned int,std::true_type /* set */)const { auto lck=exclusive_access(); const std::size_t s=super::size(); const serialization_version<value_type> value_version; ar<<core::make_nvp("count",s); ar<<core::make_nvp("value_version",value_version); super::for_all_elements([&,this](element_type* p){ auto& x=type_policy::value_from(*p); core::save_construct_data_adl(ar,std::addressof(x),value_version); ar<<serialization::make_nvp("item",x); }); } template<typename Archive> void save(Archive& ar,unsigned int,std::false_type /* map */)const { using raw_key_type=typename std::remove_const<key_type>::type; using raw_mapped_type=typename std::remove_const< typename TypePolicy::mapped_type>::type; auto lck=exclusive_access(); const std::size_t s=super::size(); const serialization_version<raw_key_type> key_version; const serialization_version<raw_mapped_type> mapped_version; ar<<core::make_nvp("count",s); ar<<core::make_nvp("key_version",key_version); ar<<core::make_nvp("mapped_version",mapped_version); super::for_all_elements([&,this](element_type* p){ /* To remain lib-independent from Boost.Serialization and not rely on * the user having included the serialization code for std::pair * (boost/serialization/utility.hpp), we serialize the key and the * mapped value separately. */ auto& x=type_policy::value_from(*p); core::save_construct_data_adl( ar,std::addressof(x.first),key_version); ar<<serialization::make_nvp("key",x.first); core::save_construct_data_adl( ar,std::addressof(x.second),mapped_version); ar<<serialization::make_nvp("mapped",x.second); }); } template<typename Archive> void load(Archive& ar,unsigned int version) { load( ar,version, std::integral_constant<bool,std::is_same<key_type,value_type>::value>{}); } template<typename Archive> void load(Archive& ar,unsigned int,std::true_type /* set */) { auto lck=exclusive_access(); std::size_t s; serialization_version<value_type> value_version; ar>>core::make_nvp("count",s); ar>>core::make_nvp("value_version",value_version); super::clear(); super::reserve(s); for(std::size_t n=0;n<s;++n){ archive_constructed<value_type> value("item",ar,value_version); auto& x=value.get(); auto hash=this->hash_for(x); auto pos0=this->position_for(hash); if(this->find(x,pos0,hash))throw_exception(bad_archive_exception()); auto loc=this->unchecked_emplace_at(pos0,hash,std::move(x)); ar.reset_object_address(std::addressof(*loc.p),std::addressof(x)); } } template<typename Archive> void load(Archive& ar,unsigned int,std::false_type /* map */) { using raw_key_type=typename std::remove_const<key_type>::type; using raw_mapped_type=typename std::remove_const< typename TypePolicy::mapped_type>::type; auto lck=exclusive_access(); std::size_t s; serialization_version<raw_key_type> key_version; serialization_version<raw_mapped_type> mapped_version; ar>>core::make_nvp("count",s); ar>>core::make_nvp("key_version",key_version); ar>>core::make_nvp("mapped_version",mapped_version); super::clear(); super::reserve(s); for(std::size_t n=0;n<s;++n){ archive_constructed<raw_key_type> key("key",ar,key_version); archive_constructed<raw_mapped_type> mapped("mapped",ar,mapped_version); auto& k=key.get(); auto& m=mapped.get(); auto hash=this->hash_for(k); auto pos0=this->position_for(hash); if(this->find(k,pos0,hash))throw_exception(bad_archive_exception()); auto loc=this->unchecked_emplace_at(pos0,hash,std::move(k),std::move(m)); ar.reset_object_address(std::addressof(loc.p->first),std::addressof(k)); ar.reset_object_address(std::addressof(loc.p->second),std::addressof(m)); } } static std::atomic<std::size_t> thread_counter; mutable multimutex_type mutexes; }; template<typename T,typename H,typename P,typename A> std::atomic<std::size_t> concurrent_table<T,H,P,A>::thread_counter={}; #if defined(BOOST_MSVC) #pragma warning(pop) /* C4714 */ #endif #include <boost/unordered/detail/foa/restore_wshadow.hpp> } /* namespace foa */ } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered/include/boost/unordered/detail
repos/unordered/include/boost/unordered/detail/foa/table.hpp
/* Fast open-addressing hash table. * * Copyright 2022-2023 Joaquin M Lopez Munoz. * Copyright 2023 Christian Mazakas. * Copyright 2024 Braden Ganetsky. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * See https://www.boost.org/libs/unordered for library home page. */ #ifndef BOOST_UNORDERED_DETAIL_FOA_TABLE_HPP #define BOOST_UNORDERED_DETAIL_FOA_TABLE_HPP #include <boost/assert.hpp> #include <boost/config.hpp> #include <boost/config/workaround.hpp> #include <boost/core/serialization.hpp> #include <boost/unordered/detail/foa/core.hpp> #include <boost/unordered/detail/serialize_tracked_address.hpp> #include <boost/unordered/detail/type_traits.hpp> #include <cstddef> #include <iterator> #include <memory> #include <type_traits> #include <utility> namespace boost{ namespace unordered{ namespace detail{ namespace foa{ /* use plain integrals for group metadata storage */ template<typename Integral> struct plain_integral { operator Integral()const{return n;} void operator=(Integral m){n=m;} #if BOOST_WORKAROUND(BOOST_GCC,>=50000 && BOOST_GCC<60000) void operator|=(Integral m){n=static_cast<Integral>(n|m);} void operator&=(Integral m){n=static_cast<Integral>(n&m);} #else void operator|=(Integral m){n|=m;} void operator&=(Integral m){n&=m;} #endif Integral n; }; struct plain_size_control { std::size_t ml; std::size_t size; }; template<typename,typename,typename,typename> class table; /* table_iterator keeps two pointers: * * - A pointer p to the element slot. * - A pointer pc to the n-th byte of the associated group metadata, where n * is the position of the element in the group. * * A simpler solution would have been to keep a pointer p to the element, a * pointer pg to the group, and the position n, but that would increase * sizeof(table_iterator) by 4/8 bytes. In order to make this compact * representation feasible, it is required that group objects are aligned * to their size, so that we can recover pg and n as * * - n = pc%sizeof(group) * - pg = pc-n * * (for explanatory purposes pg and pc are treated above as if they were memory * addresses rather than pointers). * * p = nullptr is conventionally used to mark end() iterators. */ /* internal conversion from const_iterator to iterator */ struct const_iterator_cast_tag{}; template<typename TypePolicy,typename GroupPtr,bool Const> class table_iterator { using group_pointer_traits=boost::pointer_traits<GroupPtr>; using type_policy=TypePolicy; using table_element_type=typename type_policy::element_type; using group_type=typename group_pointer_traits::element_type; using table_element_pointer= typename group_pointer_traits::template rebind<table_element_type>; using char_pointer= typename group_pointer_traits::template rebind<unsigned char>; static constexpr auto N=group_type::N; static constexpr auto regular_layout=group_type::regular_layout; public: using difference_type=std::ptrdiff_t; using value_type=typename type_policy::value_type; using pointer= typename std::conditional<Const,value_type const*,value_type*>::type; using reference= typename std::conditional<Const,value_type const&,value_type&>::type; using iterator_category=std::forward_iterator_tag; using element_type= typename std::conditional<Const,value_type const,value_type>::type; table_iterator():pc_{nullptr},p_{nullptr}{}; template<bool Const2,typename std::enable_if<!Const2>::type* =nullptr> table_iterator(const table_iterator<TypePolicy,GroupPtr,Const2>& x): pc_{x.pc_},p_{x.p_}{} table_iterator( const_iterator_cast_tag, const table_iterator<TypePolicy,GroupPtr,true>& x): pc_{x.pc_},p_{x.p_}{} inline reference operator*()const noexcept {return type_policy::value_from(*p());} inline pointer operator->()const noexcept {return std::addressof(type_policy::value_from(*p()));} inline table_iterator& operator++()noexcept{increment();return *this;} inline table_iterator operator++(int)noexcept {auto x=*this;increment();return x;} friend inline bool operator==( const table_iterator& x,const table_iterator& y) {return x.p()==y.p();} friend inline bool operator!=( const table_iterator& x,const table_iterator& y) {return !(x==y);} private: template<typename,typename,bool> friend class table_iterator; template<typename> friend class table_erase_return_type; template<typename,typename,typename,typename> friend class table; table_iterator(group_type* pg,std::size_t n,const table_element_type* ptet): pc_{to_pointer<char_pointer>( reinterpret_cast<unsigned char*>(const_cast<group_type*>(pg))+n)}, p_{to_pointer<table_element_pointer>(const_cast<table_element_type*>(ptet))} {} unsigned char* pc()const noexcept{return boost::to_address(pc_);} table_element_type* p()const noexcept{return boost::to_address(p_);} inline void increment()noexcept { BOOST_ASSERT(p()!=nullptr); increment(std::integral_constant<bool,regular_layout>{}); } inline void increment(std::true_type /* regular layout */)noexcept { using diff_type= typename boost::pointer_traits<char_pointer>::difference_type; for(;;){ ++p_; if(reinterpret_cast<uintptr_t>(pc())%sizeof(group_type)==N-1){ pc_+=static_cast<diff_type>(sizeof(group_type)-(N-1)); break; } ++pc_; if(!group_type::is_occupied(pc()))continue; if(BOOST_UNLIKELY(group_type::is_sentinel(pc())))p_=nullptr; return; } for(;;){ int mask=reinterpret_cast<group_type*>(pc())->match_occupied(); if(mask!=0){ auto n=unchecked_countr_zero(mask); if(BOOST_UNLIKELY(reinterpret_cast<group_type*>(pc())->is_sentinel(n))){ p_=nullptr; } else{ pc_+=static_cast<diff_type>(n); p_+=static_cast<diff_type>(n); } return; } pc_+=static_cast<diff_type>(sizeof(group_type)); p_+=static_cast<diff_type>(N); } } inline void increment(std::false_type /* interleaved */)noexcept { using diff_type= typename boost::pointer_traits<char_pointer>::difference_type; std::size_t n0=reinterpret_cast<uintptr_t>(pc())%sizeof(group_type); pc_-=static_cast<diff_type>(n0); int mask=( reinterpret_cast<group_type*>(pc())->match_occupied()>>(n0+1))<<(n0+1); if(!mask){ do{ pc_+=sizeof(group_type); p_+=N; } while((mask=reinterpret_cast<group_type*>(pc())->match_occupied())==0); } auto n=unchecked_countr_zero(mask); if(BOOST_UNLIKELY(reinterpret_cast<group_type*>(pc())->is_sentinel(n))){ p_=nullptr; } else{ pc_+=static_cast<diff_type>(n); p_-=static_cast<diff_type>(n0); p_+=static_cast<diff_type>(n); } } template<typename Archive> friend void serialization_track(Archive& ar,const table_iterator& x) { if(x.p()){ track_address(ar,x.pc_); track_address(ar,x.p_); } } friend class boost::serialization::access; template<typename Archive> void serialize(Archive& ar,unsigned int) { if(!p())pc_=nullptr; serialize_tracked_address(ar,pc_); serialize_tracked_address(ar,p_); } char_pointer pc_=nullptr; table_element_pointer p_=nullptr; }; /* Returned by table::erase([const_]iterator) to avoid iterator increment * if discarded. */ template<typename Iterator> class table_erase_return_type; template<typename TypePolicy,typename GroupPtr,bool Const> class table_erase_return_type<table_iterator<TypePolicy,GroupPtr,Const>> { using iterator=table_iterator<TypePolicy,GroupPtr,Const>; using const_iterator=table_iterator<TypePolicy,GroupPtr,true>; public: /* can't delete it because VS in pre-C++17 mode needs to see it for RVO */ table_erase_return_type(const table_erase_return_type&); operator iterator()const noexcept { auto it=pos; it.increment(); /* valid even if *it was erased */ return iterator(const_iterator_cast_tag{},it); } template< bool dependent_value=false, typename std::enable_if<!Const||dependent_value>::type* =nullptr > operator const_iterator()const noexcept{return this->operator iterator();} private: template<typename,typename,typename,typename> friend class table; table_erase_return_type(const_iterator pos_):pos{pos_}{} table_erase_return_type& operator=(const table_erase_return_type&)=delete; const_iterator pos; }; /* foa::table interface departs in a number of ways from that of C++ unordered * associative containers because it's not for end-user consumption * (boost::unordered_(flat|node)_(map|set) wrappers complete it as * appropriate). * * The table supports two main modes of operation: flat and node-based. In the * flat case, buckets directly store elements. For node-based, buckets store * pointers to individually heap-allocated elements. * * For both flat and node-based: * * - begin() is not O(1). * - No bucket API. * - Load factor is fixed and can't be set by the user. * * For flat only: * * - value_type must be moveable. * - Pointer stability is not kept under rehashing. * - No extract API. * * try_emplace, erase and find support heterogeneous lookup by default, * that is, without checking for any ::is_transparent typedefs --the * checking is done by boost::unordered_(flat|node)_(map|set). */ template<typename,typename,typename,typename> class concurrent_table; /* concurrent/non-concurrent interop */ template <typename TypePolicy,typename Hash,typename Pred,typename Allocator> using table_core_impl= table_core<TypePolicy,group15<plain_integral>,table_arrays, plain_size_control,Hash,Pred,Allocator>; #include <boost/unordered/detail/foa/ignore_wshadow.hpp> #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable:4714) /* marked as __forceinline not inlined */ #endif template<typename TypePolicy,typename Hash,typename Pred,typename Allocator> class table:table_core_impl<TypePolicy,Hash,Pred,Allocator> { using super=table_core_impl<TypePolicy,Hash,Pred,Allocator>; using type_policy=typename super::type_policy; using group_type=typename super::group_type; using super::N; using prober=typename super::prober; using arrays_type=typename super::arrays_type; using size_ctrl_type=typename super::size_ctrl_type; using locator=typename super::locator; using compatible_concurrent_table= concurrent_table<TypePolicy,Hash,Pred,Allocator>; using group_type_pointer=typename boost::pointer_traits< typename boost::allocator_pointer<Allocator>::type >::template rebind<group_type>; friend compatible_concurrent_table; public: using key_type=typename super::key_type; using init_type=typename super::init_type; using value_type=typename super::value_type; using element_type=typename super::element_type; private: static constexpr bool has_mutable_iterator= !std::is_same<key_type,value_type>::value; public: using hasher=typename super::hasher; using key_equal=typename super::key_equal; using allocator_type=typename super::allocator_type; using pointer=typename super::pointer; using const_pointer=typename super::const_pointer; using reference=typename super::reference; using const_reference=typename super::const_reference; using size_type=typename super::size_type; using difference_type=typename super::difference_type; using const_iterator=table_iterator<type_policy,group_type_pointer,true>; using iterator=typename std::conditional< has_mutable_iterator, table_iterator<type_policy,group_type_pointer,false>, const_iterator>::type; using erase_return_type=table_erase_return_type<iterator>; table( std::size_t n=default_bucket_count,const Hash& h_=Hash(), const Pred& pred_=Pred(),const Allocator& al_=Allocator()): super{n,h_,pred_,al_} {} table(const table& x)=default; table(table&& x)=default; table(const table& x,const Allocator& al_):super{x,al_}{} table(table&& x,const Allocator& al_):super{std::move(x),al_}{} table(compatible_concurrent_table&& x): table(std::move(x),x.exclusive_access()){} ~table()=default; table& operator=(const table& x)=default; table& operator=(table&& x)=default; using super::get_allocator; iterator begin()noexcept { iterator it{this->arrays.groups(),0,this->arrays.elements()}; if(this->arrays.elements()&& !(this->arrays.groups()[0].match_occupied()&0x1))++it; return it; } const_iterator begin()const noexcept {return const_cast<table*>(this)->begin();} iterator end()noexcept{return {};} const_iterator end()const noexcept{return const_cast<table*>(this)->end();} const_iterator cbegin()const noexcept{return begin();} const_iterator cend()const noexcept{return end();} using super::empty; using super::size; using super::max_size; template<typename... Args> BOOST_FORCEINLINE std::pair<iterator,bool> emplace(Args&&... args) { alloc_cted_insert_type<type_policy,Allocator,Args...> x( this->al(),std::forward<Args>(args)...); return emplace_impl(type_policy::move(x.value())); } /* Optimization for value_type and init_type, to avoid constructing twice */ template <typename T> BOOST_FORCEINLINE typename std::enable_if< detail::is_similar_to_any<T, value_type, init_type>::value, std::pair<iterator, bool> >::type emplace(T&& x) { return emplace_impl(std::forward<T>(x)); } /* Optimizations for maps for (k,v) to avoid eagerly constructing value */ template <typename K, typename V> BOOST_FORCEINLINE typename std::enable_if<is_emplace_kv_able<table, K>::value, std::pair<iterator, bool> >::type emplace(K&& k, V&& v) { alloc_cted_or_fwded_key_type<type_policy, Allocator, K&&> x( this->al(), std::forward<K>(k)); return emplace_impl( try_emplace_args_t{}, x.move_or_fwd(), std::forward<V>(v)); } template<typename Key,typename... Args> BOOST_FORCEINLINE std::pair<iterator,bool> try_emplace( Key&& x,Args&&... args) { return emplace_impl( try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...); } BOOST_FORCEINLINE std::pair<iterator,bool> insert(const init_type& x){return emplace_impl(x);} BOOST_FORCEINLINE std::pair<iterator,bool> insert(init_type&& x){return emplace_impl(std::move(x));} /* template<typename=void> tilts call ambiguities in favor of init_type */ template<typename=void> BOOST_FORCEINLINE std::pair<iterator,bool> insert(const value_type& x){return emplace_impl(x);} template<typename=void> BOOST_FORCEINLINE std::pair<iterator,bool> insert(value_type&& x){return emplace_impl(std::move(x));} template<typename T=element_type> BOOST_FORCEINLINE typename std::enable_if< !std::is_same<T,value_type>::value, std::pair<iterator,bool> >::type insert(element_type&& x){return emplace_impl(std::move(x));} template< bool dependent_value=false, typename std::enable_if< has_mutable_iterator||dependent_value>::type* =nullptr > erase_return_type erase(iterator pos)noexcept {return erase(const_iterator(pos));} BOOST_FORCEINLINE erase_return_type erase(const_iterator pos)noexcept { super::erase(pos.pc(),pos.p()); return {pos}; } template<typename Key> BOOST_FORCEINLINE auto erase(Key&& x) -> typename std::enable_if< !std::is_convertible<Key,iterator>::value&& !std::is_convertible<Key,const_iterator>::value, std::size_t>::type { auto it=find(x); if(it!=end()){ erase(it); return 1; } else return 0; } void swap(table& x) noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>()))) { super::swap(x); } using super::clear; element_type extract(const_iterator pos) { BOOST_ASSERT(pos!=end()); erase_on_exit e{*this,pos}; (void)e; return std::move(*pos.p()); } // TODO: should we accept different allocator too? template<typename Hash2,typename Pred2> void merge(table<TypePolicy,Hash2,Pred2,Allocator>& x) { x.for_all_elements([&,this](group_type* pg,unsigned int n,element_type* p){ erase_on_exit e{x,{pg,n,p}}; if(!emplace_impl(type_policy::move(*p)).second)e.rollback(); }); } template<typename Hash2,typename Pred2> void merge(table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);} using super::hash_function; using super::key_eq; template<typename Key> BOOST_FORCEINLINE iterator find(const Key& x) { return make_iterator(super::find(x)); } template<typename Key> BOOST_FORCEINLINE const_iterator find(const Key& x)const { return const_cast<table*>(this)->find(x); } using super::capacity; using super::load_factor; using super::max_load_factor; using super::max_load; using super::rehash; using super::reserve; template<typename Predicate> friend std::size_t erase_if(table& x,Predicate& pr) { using value_reference=typename std::conditional< std::is_same<key_type,value_type>::value, const_reference, reference >::type; std::size_t s=x.size(); x.for_all_elements( [&](group_type* pg,unsigned int n,element_type* p){ if(pr(const_cast<value_reference>(type_policy::value_from(*p)))){ x.super::erase(pg,n,p); } }); return std::size_t(s-x.size()); } friend bool operator==(const table& x,const table& y) { return static_cast<const super&>(x)==static_cast<const super&>(y); } friend bool operator!=(const table& x,const table& y){return !(x==y);} private: template<typename ArraysType> table(compatible_concurrent_table&& x,arrays_holder<ArraysType,Allocator>&& ah): super{ std::move(x.h()),std::move(x.pred()),std::move(x.al()), [&x]{return arrays_type{ x.arrays.groups_size_index,x.arrays.groups_size_mask, to_pointer<group_type_pointer>( reinterpret_cast<group_type*>(x.arrays.groups())), x.arrays.elements_};}, size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}} { compatible_concurrent_table::arrays_type::delete_group_access(x.al(),x.arrays); x.arrays=ah.release(); x.size_ctrl.ml=x.initial_max_load(); x.size_ctrl.size=0; } template<typename ExclusiveLockGuard> table(compatible_concurrent_table&& x,ExclusiveLockGuard): table(std::move(x),x.make_empty_arrays()) {} struct erase_on_exit { erase_on_exit(table& x_,const_iterator it_):x(x_),it(it_){} ~erase_on_exit(){if(!rollback_)x.erase(it);} void rollback(){rollback_=true;} table& x; const_iterator it; bool rollback_=false; }; static inline iterator make_iterator(const locator& l)noexcept { return {l.pg,l.n,l.p}; } template<typename... Args> BOOST_FORCEINLINE std::pair<iterator,bool> emplace_impl(Args&&... args) { const auto &k=this->key_from(std::forward<Args>(args)...); auto hash=this->hash_for(k); auto pos0=this->position_for(hash); auto loc=super::find(k,pos0,hash); if(loc){ return {make_iterator(loc),false}; } if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){ return { make_iterator( this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...)), true }; } else{ return { make_iterator( this->unchecked_emplace_with_rehash( hash,std::forward<Args>(args)...)), true }; } } }; #if defined(BOOST_MSVC) #pragma warning(pop) /* C4714 */ #endif #include <boost/unordered/detail/foa/restore_wshadow.hpp> } /* namespace foa */ } /* namespace detail */ } /* namespace unordered */ } /* namespace boost */ #endif
0
repos/unordered
repos/unordered/.drone/drone.sh
#!/bin/bash # Copyright 2022 Peter Dimov # Distributed under the Boost Software License, Version 1.0. # https://www.boost.org/LICENSE_1_0.txt set -ex export PATH=~/.local/bin:/usr/local/bin:$PATH : ${TARGET:="libs/$LIBRARY/test"} DRONE_BUILD_DIR=$(pwd) BOOST_BRANCH=develop if [ "$DRONE_BRANCH" = "master" ]; then BOOST_BRANCH=master; fi cd .. git clone -b $BOOST_BRANCH --depth 1 https://github.com/boostorg/boost.git boost-root cd boost-root git submodule update --init tools/boostdep cp -r $DRONE_BUILD_DIR/* libs/$LIBRARY python tools/boostdep/depinst/depinst.py $LIBRARY ./bootstrap.sh ./b2 -d0 headers echo "using $TOOLSET : : $COMPILER ;" > ~/user-config.jam ./b2 -j3 $TARGET toolset=$TOOLSET cxxstd=$CXXSTD variant=debug,release ${ADDRMD:+address-model=$ADDRMD} ${STDLIB:+stdlib=$STDLIB} ${UBSAN:+undefined-sanitizer=norecover debug-symbols=on} ${ASAN:+address-sanitizer=norecover debug-symbols=on} ${TSAN:+thread-sanitizer=norecover debug-symbols=on} ${LINKFLAGS:+linkflags=$LINKFLAGS}
0
repos/unordered
repos/unordered/.drone/drone.bat
@REM Copyright 2022 Peter Dimov @REM Distributed under the Boost Software License, Version 1.0. @REM https://www.boost.org/LICENSE_1_0.txt @ECHO ON set LIBRARY=%1 set DRONE_BUILD_DIR=%CD% set BOOST_BRANCH=develop if "%DRONE_BRANCH%" == "master" set BOOST_BRANCH=master cd .. git clone -b %BOOST_BRANCH% --depth 1 https://github.com/boostorg/boost.git boost-root cd boost-root git submodule update --init tools/boostdep xcopy /s /e /q %DRONE_BUILD_DIR% libs\%LIBRARY%\ python tools/boostdep/depinst/depinst.py %LIBRARY% cmd /c bootstrap b2 -d0 headers if not "%CXXSTD%" == "" set CXXSTD=cxxstd=%CXXSTD% if not "%ADDRMD%" == "" set ADDRMD=address-model=%ADDRMD% if not "%CXXFLAGS%" == "" set CXXFLAGS=cxxflags=%CXXFLAGS% b2 --abbreviate-paths -j3 libs/%LIBRARY%/test toolset=%TOOLSET% %CXXSTD% %ADDRMD% %CXXFLAGS% variant=debug,release embed-manifest-via=linker
0
repos/unordered
repos/unordered/examples/case_insensitive_test.cpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./case_insensitive.hpp" #include <boost/core/lightweight_test.hpp> #include <boost/unordered_map.hpp> struct word_info { int tag; explicit word_info(int t = 0) : tag(t) {} }; void test1() { boost::unordered_map<std::string, word_info, hash_examples::ihash, hash_examples::iequal_to> idictionary; BOOST_TEST(idictionary.empty()); idictionary["one"] = word_info(1); BOOST_TEST(idictionary.size() == 1); BOOST_TEST(idictionary.find("ONE") != idictionary.end() && idictionary.find("ONE") == idictionary.find("one")); idictionary.insert(std::make_pair("ONE", word_info(2))); BOOST_TEST(idictionary.size() == 1); BOOST_TEST(idictionary.find("ONE") != idictionary.end() && idictionary.find("ONE")->first == "one" && idictionary.find("ONE")->second.tag == 1); idictionary["One"] = word_info(3); BOOST_TEST(idictionary.size() == 1); BOOST_TEST(idictionary.find("ONE") != idictionary.end() && idictionary.find("ONE")->first == "one" && idictionary.find("ONE")->second.tag == 3); idictionary["two"] = word_info(4); BOOST_TEST(idictionary.size() == 2); BOOST_TEST(idictionary.find("two") != idictionary.end() && idictionary.find("TWO")->first == "two" && idictionary.find("Two")->second.tag == 4); } void test2() { boost::unordered_map<std::wstring, word_info, hash_examples::ihash, hash_examples::iequal_to> idictionary; BOOST_TEST(idictionary.empty()); idictionary[L"one"] = word_info(1); BOOST_TEST(idictionary.size() == 1); BOOST_TEST(idictionary.find(L"ONE") != idictionary.end() && idictionary.find(L"ONE") == idictionary.find(L"one")); idictionary.insert(std::make_pair(L"ONE", word_info(2))); BOOST_TEST(idictionary.size() == 1); BOOST_TEST(idictionary.find(L"ONE") != idictionary.end() && idictionary.find(L"ONE")->first == L"one" && idictionary.find(L"ONE")->second.tag == 1); idictionary[L"One"] = word_info(3); BOOST_TEST(idictionary.size() == 1); BOOST_TEST(idictionary.find(L"ONE") != idictionary.end() && idictionary.find(L"ONE")->first == L"one" && idictionary.find(L"ONE")->second.tag == 3); idictionary[L"two"] = word_info(4); BOOST_TEST(idictionary.size() == 2); BOOST_TEST(idictionary.find(L"two") != idictionary.end() && idictionary.find(L"TWO")->first == L"two" && idictionary.find(L"Two")->second.tag == 4); } int main() { test1(); test2(); return boost::report_errors(); }
0
repos/unordered
repos/unordered/examples/fnv1.hpp
// Copyright 2008-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // This code is also released into the public domain. // Algorithm from: http://www.isthe.com/chongo/tech/comp/fnv/ #include <string> namespace hash { template <std::size_t FnvPrime, std::size_t OffsetBasis> struct basic_fnv_1 { std::size_t operator()(std::string const& text) const { std::size_t hash = OffsetBasis; for(std::string::const_iterator it = text.begin(), end = text.end(); it != end; ++it) { hash *= FnvPrime; hash ^= *it; } return hash; } }; template <std::size_t FnvPrime, std::size_t OffsetBasis> struct basic_fnv_1a { std::size_t operator()(std::string const& text) const { std::size_t hash = OffsetBasis; for(std::string::const_iterator it = text.begin(), end = text.end(); it != end; ++it) { hash ^= *it; hash *= FnvPrime; } return hash; } }; // For 32 bit machines: const std::size_t fnv_prime = 16777619u; const std::size_t fnv_offset_basis = 2166136261u; // For 64 bit machines: // const std::size_t fnv_prime = 1099511628211u; // const std::size_t fnv_offset_basis = 14695981039346656037u; // For 128 bit machines: // const std::size_t fnv_prime = 309485009821345068724781401u; // const std::size_t fnv_offset_basis = // 275519064689413815358837431229664493455u; // For 256 bit machines: // const std::size_t fnv_prime = // 374144419156711147060143317175368453031918731002211u; // const std::size_t fnv_offset_basis = // 100029257958052580907070968620625704837092796014241193945225284501741471925557u; typedef basic_fnv_1<fnv_prime, fnv_offset_basis> fnv_1; typedef basic_fnv_1a<fnv_prime, fnv_offset_basis> fnv_1a; }
0
repos/unordered
repos/unordered/examples/case_insensitive.hpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // This file implements a locale aware case insenstive equality predicate and // hash function. Unfortunately it still falls short of full // internationalization as it only deals with a single character at a time // (some languages have tricky cases where the characters in an upper case // string don't have a one-to-one correspondence with the lower case version of // the text, eg. ) #if !defined(BOOST_HASH_EXAMPLES_CASE_INSENSITIVE_HEADER) #define BOOST_HASH_EXAMPLES_CASE_INSENSITIVE_HEADER #include <boost/algorithm/string/predicate.hpp> #include <boost/container_hash/hash.hpp> namespace hash_examples { struct iequal_to { iequal_to() {} explicit iequal_to(std::locale const& l) : locale_(l) {} template <typename String1, typename String2> bool operator()(String1 const& x1, String2 const& x2) const { return boost::algorithm::iequals(x1, x2, locale_); } private: std::locale locale_; }; struct ihash { ihash() {} explicit ihash(std::locale const& l) : locale_(l) {} template <typename String> std::size_t operator()(String const& x) const { std::size_t seed = 0; for(typename String::const_iterator it = x.begin(); it != x.end(); ++it) { boost::hash_combine(seed, std::toupper(*it, locale_)); } return seed; } private: std::locale locale_; }; } #endif
0
repos/unordered
repos/unordered/benchmark/uint32.cpp
// Copyright 2021 Peter Dimov. // Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING #define _SILENCE_CXX20_CISO646_REMOVED_WARNING #include <boost/unordered_map.hpp> #include <boost/unordered/unordered_node_map.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/endian/conversion.hpp> #include <boost/core/detail/splitmix64.hpp> #include <boost/config.hpp> #ifdef HAVE_ABSEIL # include "absl/container/node_hash_map.h" # include "absl/container/flat_hash_map.h" #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE # include "ankerl/unordered_dense.h" #endif #include <unordered_map> #include <vector> #include <memory> #include <cstdint> #include <iostream> #include <iomanip> #include <chrono> using namespace std::chrono_literals; static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::uint32_t s, std::size_t size ) { auto t2 = std::chrono::steady_clock::now(); std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n"; t1 = t2; } constexpr unsigned N = 2'000'000; constexpr int K = 10; static std::vector< std::uint32_t > indices1, indices2, indices3; static void init_indices() { indices1.push_back( 0 ); for( unsigned i = 1; i <= N*2; ++i ) { indices1.push_back( i ); } indices2.push_back( 0 ); { boost::detail::splitmix64 rng; for( unsigned i = 1; i <= N*2; ++i ) { indices2.push_back( static_cast<std::uint32_t>( rng() ) ); } } indices3.push_back( 0 ); for( unsigned i = 1; i <= N*2; ++i ) { indices3.push_back( boost::endian::endian_reverse( static_cast<std::uint32_t>( i ) ) ); } } template<class Map> BOOST_NOINLINE void test_insert( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices1[ i ], i } ); } print_time( t1, "Consecutive insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices2[ i ], i } ); } print_time( t1, "Random insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices3[ i ], i } ); } print_time( t1, "Consecutive reversed insert", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_lookup( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::uint32_t s; s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices1[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices2[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Random lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices3[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive reversed lookup", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 ) { auto it = map.begin(); while( it != map.end() ) { if( it->second & 1 ) { if constexpr( std::is_void_v< decltype( map.erase( it ) ) > ) { map.erase( it++ ); } else { it = map.erase( it ); } } else { ++it; } } print_time( t1, "Iterate and erase odd elements", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_erase( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.erase( indices1[ i ] ); } print_time( t1, "Consecutive erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices2[ i ] ); } print_time( t1, "Random erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices3[ i ] ); } print_time( t1, "Consecutive reversed erase", 0, map.size() ); std::cout << std::endl; } // counting allocator static std::size_t s_alloc_bytes = 0; static std::size_t s_alloc_count = 0; template<class T> struct allocator { using value_type = T; allocator() = default; template<class U> allocator( allocator<U> const & ) noexcept { } template<class U> bool operator==( allocator<U> const & ) const noexcept { return true; } template<class U> bool operator!=( allocator<U> const& ) const noexcept { return false; } T* allocate( std::size_t n ) const { s_alloc_bytes += n * sizeof(T); s_alloc_count++; return std::allocator<T>().allocate( n ); } void deallocate( T* p, std::size_t n ) const noexcept { s_alloc_bytes -= n * sizeof(T); s_alloc_count--; std::allocator<T>().deallocate( p, n ); } }; // struct record { std::string label_; long long time_; std::size_t bytes_; std::size_t count_; }; static std::vector<record> times; template<template<class...> class Map> BOOST_NOINLINE void test( char const* label ) { std::cout << label << ":\n\n"; s_alloc_bytes = 0; s_alloc_count = 0; Map<std::uint32_t, std::uint32_t> map; auto t0 = std::chrono::steady_clock::now(); auto t1 = t0; test_insert( map, t1 ); std::cout << "Memory: " << s_alloc_bytes << " bytes in " << s_alloc_count << " allocations\n\n"; record rec = { label, 0, s_alloc_bytes, s_alloc_count }; test_lookup( map, t1 ); test_iteration( map, t1 ); test_lookup( map, t1 ); test_erase( map, t1 ); auto tN = std::chrono::steady_clock::now(); std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n"; rec.time_ = ( tN - t0 ) / 1ms; times.push_back( rec ); } // aliases using the counting allocator template<class K, class V> using allocator_for = ::allocator< std::pair<K const, V> >; template<class K, class V> using std_unordered_map = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map = boost::unordered_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map = boost::unordered_node_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map = boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map = absl::node_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map = absl::flat_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map = ankerl::unordered_dense::map<K, V, ankerl::unordered_dense::hash<K>, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif int main() { init_indices(); test<std_unordered_map>( "std::unordered_map" ); test<boost_unordered_map>( "boost::unordered_map" ); test<boost_unordered_node_map>( "boost::unordered_node_map" ); test<boost_unordered_flat_map>( "boost::unordered_flat_map" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map>( "ankerl::unordered_dense::map" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map>( "absl::node_hash_map" ); test<absl_flat_hash_map>( "absl::flat_hash_map" ); #endif std::cout << "---\n\n"; for( auto const& x: times ) { std::cout << std::setw( 30 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms, " << std::setw( 9 ) << x.bytes_ << " bytes in " << x.count_ << " allocations\n"; } } #ifdef HAVE_ABSEIL # include "absl/container/internal/raw_hash_set.cc" # include "absl/hash/internal/hash.cc" # include "absl/hash/internal/low_level_hash.cc" # include "absl/hash/internal/city.cc" #endif
0
repos/unordered
repos/unordered/benchmark/word_size.cpp
// Copyright 2021, 2022 Peter Dimov. // Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING #define _SILENCE_CXX20_CISO646_REMOVED_WARNING #include <boost/unordered_map.hpp> #include <boost/unordered/unordered_node_map.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/regex.hpp> #ifdef HAVE_ABSEIL # include "absl/container/node_hash_map.h" # include "absl/container/flat_hash_map.h" #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE # include "ankerl/unordered_dense.h" #endif #include <unordered_map> #include <vector> #include <memory> #include <cstdint> #include <iostream> #include <iomanip> #include <chrono> #include <fstream> using namespace std::chrono_literals; static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::size_t s, std::size_t size ) { auto t2 = std::chrono::steady_clock::now(); std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n"; t1 = t2; } static std::vector<std::string> words; static void init_words() { #if SIZE_MAX > UINT32_MAX char const* fn = "enwik9"; // http://mattmahoney.net/dc/textdata #else char const* fn = "enwik8"; // ditto #endif auto t1 = std::chrono::steady_clock::now(); std::ifstream is( fn ); std::string in( std::istreambuf_iterator<char>( is ), std::istreambuf_iterator<char>{} ); boost::regex re( "[a-zA-Z]+"); boost::sregex_token_iterator it( in.begin(), in.end(), re, 0 ), end; words.assign( it, end ); auto t2 = std::chrono::steady_clock::now(); std::cout << fn << ": " << words.size() << " words, " << ( t2 - t1 ) / 1ms << " ms\n\n"; } template<class Map> BOOST_NOINLINE void test_word_size( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( auto const& word: words ) { ++map[ word.size() ]; } print_time( t1, "Word size count", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::size_t s = 0; for( auto const& x: map ) { s += x.second; } print_time( t1, "Iterate and sum counts", s, map.size() ); std::cout << std::endl; } // counting allocator static std::size_t s_alloc_bytes = 0; static std::size_t s_alloc_count = 0; template<class T> struct allocator { using value_type = T; allocator() = default; template<class U> allocator( allocator<U> const & ) noexcept { } template<class U> bool operator==( allocator<U> const & ) const noexcept { return true; } template<class U> bool operator!=( allocator<U> const& ) const noexcept { return false; } T* allocate( std::size_t n ) const { s_alloc_bytes += n * sizeof(T); s_alloc_count++; return std::allocator<T>().allocate( n ); } void deallocate( T* p, std::size_t n ) const noexcept { s_alloc_bytes -= n * sizeof(T); s_alloc_count--; std::allocator<T>().deallocate( p, n ); } }; // struct record { std::string label_; long long time_; std::size_t bytes_; std::size_t count_; }; static std::vector<record> times; template<template<class...> class Map> BOOST_NOINLINE void test( char const* label ) { std::cout << label << ":\n\n"; s_alloc_bytes = 0; s_alloc_count = 0; Map<std::size_t, std::size_t> map; auto t0 = std::chrono::steady_clock::now(); auto t1 = t0; test_word_size( map, t1 ); std::cout << "Memory: " << s_alloc_bytes << " bytes in " << s_alloc_count << " allocations\n\n"; record rec = { label, 0, s_alloc_bytes, s_alloc_count }; test_iteration( map, t1 ); auto tN = std::chrono::steady_clock::now(); std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n"; rec.time_ = ( tN - t0 ) / 1ms; times.push_back( rec ); } // aliases using the counting allocator template<class K, class V> using allocator_for = ::allocator< std::pair<K const, V> >; template<class K, class V> using std_unordered_map = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map = boost::unordered_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map = boost::unordered_node_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map = boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map = absl::node_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map = absl::flat_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map = ankerl::unordered_dense::map<K, V, ankerl::unordered_dense::hash<K>, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif int main() { init_words(); test<std_unordered_map>( "std::unordered_map" ); test<boost_unordered_map>( "boost::unordered_map" ); test<boost_unordered_node_map>( "boost::unordered_node_map" ); test<boost_unordered_flat_map>( "boost::unordered_flat_map" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map>( "ankerl::unordered_dense::map" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map>( "absl::node_hash_map" ); test<absl_flat_hash_map>( "absl::flat_hash_map" ); #endif std::cout << "---\n\n"; for( auto const& x: times ) { std::cout << std::setw( 30 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms, " << std::setw( 9 ) << x.bytes_ << " bytes in " << x.count_ << " allocations\n"; } } #ifdef HAVE_ABSEIL # include "absl/container/internal/raw_hash_set.cc" # include "absl/hash/internal/hash.cc" # include "absl/hash/internal/low_level_hash.cc" # include "absl/hash/internal/city.cc" #endif
0
repos/unordered
repos/unordered/benchmark/string_view.cpp
// Copyright 2021 Peter Dimov. // Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING #define _SILENCE_CXX20_CISO646_REMOVED_WARNING #include <boost/unordered_map.hpp> #include <boost/unordered/unordered_node_map.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/core/detail/splitmix64.hpp> #include <boost/config.hpp> #ifdef HAVE_ABSEIL # include "absl/container/node_hash_map.h" # include "absl/container/flat_hash_map.h" #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE # include "ankerl/unordered_dense.h" #endif #include <unordered_map> #include <string_view> #include <vector> #include <memory> #include <cstdint> #include <iostream> #include <iomanip> #include <chrono> using namespace std::chrono_literals; static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::uint32_t s, std::size_t size ) { auto t2 = std::chrono::steady_clock::now(); std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n"; t1 = t2; } constexpr unsigned N = 2'000'000; constexpr int K = 10; static std::vector<std::string> indices1, indices2; static std::string make_index( unsigned x ) { char buffer[ 64 ]; std::snprintf( buffer, sizeof(buffer), "pfx_%u_sfx", x ); return buffer; } static std::string make_random_index( unsigned x ) { char buffer[ 64 ]; std::snprintf( buffer, sizeof(buffer), "pfx_%0*d_%u_sfx", x % 8 + 1, 0, x ); return buffer; } static void init_indices() { indices1.reserve( N*2+1 ); indices1.push_back( make_index( 0 ) ); for( unsigned i = 1; i <= N*2; ++i ) { indices1.push_back( make_index( i ) ); } indices2.reserve( N*2+1 ); indices2.push_back( make_index( 0 ) ); { boost::detail::splitmix64 rng; for( unsigned i = 1; i <= N*2; ++i ) { indices2.push_back( make_random_index( static_cast<std::uint32_t>( rng() ) ) ); } } } template<class Map> BOOST_NOINLINE void test_insert( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices1[ i ], i } ); } print_time( t1, "Consecutive insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices2[ i ], i } ); } print_time( t1, "Random insert", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_lookup( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::uint32_t s; s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices1[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices2[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Random lookup", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 ) { auto it = map.begin(); while( it != map.end() ) { if( it->second & 1 ) { if constexpr( std::is_void_v< decltype( map.erase( it ) ) > ) { map.erase( it++ ); } else { it = map.erase( it ); } } else { ++it; } } print_time( t1, "Iterate and erase odd elements", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_erase( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.erase( indices1[ i ] ); } print_time( t1, "Consecutive erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices2[ i ] ); } print_time( t1, "Random erase", 0, map.size() ); std::cout << std::endl; } // counting allocator static std::size_t s_alloc_bytes = 0; static std::size_t s_alloc_count = 0; template<class T> struct allocator { using value_type = T; allocator() = default; template<class U> allocator( allocator<U> const & ) noexcept { } template<class U> bool operator==( allocator<U> const & ) const noexcept { return true; } template<class U> bool operator!=( allocator<U> const& ) const noexcept { return false; } T* allocate( std::size_t n ) const { s_alloc_bytes += n * sizeof(T); s_alloc_count++; return std::allocator<T>().allocate( n ); } void deallocate( T* p, std::size_t n ) const noexcept { s_alloc_bytes -= n * sizeof(T); s_alloc_count--; std::allocator<T>().deallocate( p, n ); } }; // struct record { std::string label_; long long time_; std::size_t bytes_; std::size_t count_; }; static std::vector<record> times; template<template<class...> class Map> BOOST_NOINLINE void test( char const* label ) { std::cout << label << ":\n\n"; s_alloc_bytes = 0; s_alloc_count = 0; Map<std::string_view, std::uint32_t> map; auto t0 = std::chrono::steady_clock::now(); auto t1 = t0; test_insert( map, t1 ); std::cout << "Memory: " << s_alloc_bytes << " bytes in " << s_alloc_count << " allocations\n\n"; record rec = { label, 0, s_alloc_bytes, s_alloc_count }; test_lookup( map, t1 ); test_iteration( map, t1 ); test_lookup( map, t1 ); test_erase( map, t1 ); auto tN = std::chrono::steady_clock::now(); std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n"; rec.time_ = ( tN - t0 ) / 1ms; times.push_back( rec ); } // aliases using the counting allocator template<class K, class V> using allocator_for = ::allocator< std::pair<K const, V> >; template<class K, class V> using std_unordered_map = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map = boost::unordered_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map = boost::unordered_node_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map = boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map = absl::node_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map = absl::flat_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map = ankerl::unordered_dense::map<K, V, ankerl::unordered_dense::hash<K>, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif // fnv1a_hash template<int Bits> struct fnv1a_hash_impl; template<> struct fnv1a_hash_impl<32> { std::size_t operator()( std::string_view const& s ) const { std::size_t h = 0x811C9DC5u; char const * first = s.data(); char const * last = first + s.size(); for( ; first != last; ++first ) { h ^= static_cast<unsigned char>( *first ); h *= 0x01000193ul; } return h; } }; template<> struct fnv1a_hash_impl<64> { std::size_t operator()( std::string_view const& s ) const { std::size_t h = 0xCBF29CE484222325ull; char const * first = s.data(); char const * last = first + s.size(); for( ; first != last; ++first ) { h ^= static_cast<unsigned char>( *first ); h *= 0x00000100000001B3ull; } return h; } }; struct fnv1a_hash: fnv1a_hash_impl< std::numeric_limits<std::size_t>::digits > { using is_avalanching = void; }; template<class K, class V> using std_unordered_map_fnv1a = std::unordered_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map_fnv1a = boost::unordered_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map_fnv1a = boost::unordered_node_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map_fnv1a = boost::unordered_flat_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map_fnv1a = absl::node_hash_map<K, V, fnv1a_hash, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map_fnv1a = absl::flat_hash_map<K, V, fnv1a_hash, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map_fnv1a = ankerl::unordered_dense::map<K, V, fnv1a_hash, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif // int main() { init_indices(); test<std_unordered_map>( "std::unordered_map" ); test<boost_unordered_map>( "boost::unordered_map" ); test<boost_unordered_node_map>( "boost::unordered_node_map" ); test<boost_unordered_flat_map>( "boost::unordered_flat_map" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map>( "ankerl::unordered_dense::map" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map>( "absl::node_hash_map" ); test<absl_flat_hash_map>( "absl::flat_hash_map" ); #endif test<std_unordered_map_fnv1a>( "std::unordered_map, FNV-1a" ); test<boost_unordered_map_fnv1a>( "boost::unordered_map, FNV-1a" ); test<boost_unordered_node_map_fnv1a>( "boost::unordered_node_map, FNV-1a" ); test<boost_unordered_flat_map_fnv1a>( "boost::unordered_flat_map, FNV-1a" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map_fnv1a>( "ankerl::unordered_dense::map, FNV-1a" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map_fnv1a>( "absl::node_hash_map, FNV-1a" ); test<absl_flat_hash_map_fnv1a>( "absl::flat_hash_map, FNV-1a" ); #endif std::cout << "---\n\n"; for( auto const& x: times ) { std::cout << std::setw( 38 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms, " << std::setw( 9 ) << x.bytes_ << " bytes in " << x.count_ << " allocations\n"; } } #ifdef HAVE_ABSEIL # include "absl/container/internal/raw_hash_set.cc" # include "absl/hash/internal/hash.cc" # include "absl/hash/internal/low_level_hash.cc" # include "absl/hash/internal/city.cc" #endif
0
repos/unordered
repos/unordered/benchmark/uuid.cpp
// Copyright 2021, 2022 Peter Dimov. // Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING #define _SILENCE_CXX20_CISO646_REMOVED_WARNING #include <boost/unordered_map.hpp> #include <boost/unordered/unordered_node_map.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/endian/conversion.hpp> #include <boost/core/detail/splitmix64.hpp> #include <boost/container_hash/hash.hpp> #include <boost/config.hpp> #ifdef HAVE_ABSEIL # include "absl/container/node_hash_map.h" # include "absl/container/flat_hash_map.h" #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE # include "ankerl/unordered_dense.h" #endif #include <unordered_map> #include <vector> #include <memory> #include <cstdint> #include <iostream> #include <iomanip> #include <chrono> #include <cstring> using namespace std::chrono_literals; static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::uint64_t s, std::size_t size ) { auto t2 = std::chrono::steady_clock::now(); std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n"; t1 = t2; } constexpr unsigned N = 2'000'000; constexpr int K = 10; struct uuid { unsigned char data[ 16 ]; uuid(): data() { } uuid( std::uint64_t low, std::uint64_t high ) noexcept { boost::endian::store_little_u64( data + 0, low ); boost::endian::store_little_u64( data + 8, high ); } inline friend std::size_t hash_value( uuid const& u ) noexcept { std::uint64_t low = boost::endian::load_little_u64( u.data + 0 ); std::uint64_t high = boost::endian::load_little_u64( u.data + 8 ); std::size_t r = 0; boost::hash_combine( r, low ); boost::hash_combine( r, high ); return r; } inline friend bool operator==( uuid const& u1, uuid const& u2 ) noexcept { return std::memcmp( u1.data, u2.data, 16 ) == 0; } }; namespace std { template<> struct hash< ::uuid > { std::size_t operator()( uuid const& u ) const noexcept { return hash_value( u ); } }; } // namespace std static std::vector< uuid > indices1, indices2, indices3; static void init_indices() { indices1.push_back( {} ); for( unsigned i = 1; i <= N*2; ++i ) { indices1.push_back( { i, 0 } ); } indices2.push_back( {} ); { boost::detail::splitmix64 rng; for( unsigned i = 1; i <= N*2; ++i ) { indices2.push_back( { rng(), rng() } ); } } indices3.push_back( {} ); for( unsigned i = 1; i <= N*2; ++i ) { uuid k( i, 0 ); std::reverse( k.data + 0, k.data + 16 ); indices3.push_back( k ); } } template<class Map> BOOST_NOINLINE void test_insert( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices1[ i ], i } ); } print_time( t1, "Consecutive insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices2[ i ], i } ); } print_time( t1, "Random insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices3[ i ], i } ); } print_time( t1, "Consecutive reversed insert", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_lookup( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::uint64_t s; s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices1[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices2[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Random lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices3[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive reversed lookup", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 ) { auto it = map.begin(); while( it != map.end() ) { if( it->second & 1 ) { if constexpr( std::is_void_v< decltype( map.erase( it ) ) > ) { map.erase( it++ ); } else { it = map.erase( it ); } } else { ++it; } } print_time( t1, "Iterate and erase odd elements", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_erase( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.erase( indices1[ i ] ); } print_time( t1, "Consecutive erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices2[ i ] ); } print_time( t1, "Random erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices3[ i ] ); } print_time( t1, "Consecutive reversed erase", 0, map.size() ); std::cout << std::endl; } // counting allocator static std::size_t s_alloc_bytes = 0; static std::size_t s_alloc_count = 0; template<class T> struct allocator { using value_type = T; allocator() = default; template<class U> allocator( allocator<U> const & ) noexcept { } template<class U> bool operator==( allocator<U> const & ) const noexcept { return true; } template<class U> bool operator!=( allocator<U> const& ) const noexcept { return false; } T* allocate( std::size_t n ) const { s_alloc_bytes += n * sizeof(T); s_alloc_count++; return std::allocator<T>().allocate( n ); } void deallocate( T* p, std::size_t n ) const noexcept { s_alloc_bytes -= n * sizeof(T); s_alloc_count--; std::allocator<T>().deallocate( p, n ); } }; // struct record { std::string label_; long long time_; std::size_t bytes_; std::size_t count_; }; static std::vector<record> times; template<template<class...> class Map> BOOST_NOINLINE void test( char const* label ) { std::cout << label << ":\n\n"; s_alloc_bytes = 0; s_alloc_count = 0; Map<uuid, std::uint64_t> map; auto t0 = std::chrono::steady_clock::now(); auto t1 = t0; test_insert( map, t1 ); std::cout << "Memory: " << s_alloc_bytes << " bytes in " << s_alloc_count << " allocations\n\n"; record rec = { label, 0, s_alloc_bytes, s_alloc_count }; test_lookup( map, t1 ); test_iteration( map, t1 ); test_lookup( map, t1 ); test_erase( map, t1 ); auto tN = std::chrono::steady_clock::now(); std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n"; rec.time_ = ( tN - t0 ) / 1ms; times.push_back( rec ); } // aliases using the counting allocator template<class K, class V> using allocator_for = ::allocator< std::pair<K const, V> >; template<class K, class V> using std_unordered_map = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map = boost::unordered_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map = boost::unordered_node_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map = boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map = absl::node_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map = absl::flat_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map = ankerl::unordered_dense::map<K, V, ankerl::unordered_dense::hash<K>, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif int main() { init_indices(); test<std_unordered_map>( "std::unordered_map" ); test<boost_unordered_map>( "boost::unordered_map" ); test<boost_unordered_node_map>( "boost::unordered_node_map" ); test<boost_unordered_flat_map>( "boost::unordered_flat_map" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map>( "ankerl::unordered_dense::map" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map>( "absl::node_hash_map" ); test<absl_flat_hash_map>( "absl::flat_hash_map" ); #endif std::cout << "---\n\n"; for( auto const& x: times ) { std::cout << std::setw( 30 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms, " << std::setw( 9 ) << x.bytes_ << " bytes in " << x.count_ << " allocations\n"; } } #ifdef HAVE_ABSEIL # include "absl/container/internal/raw_hash_set.cc" # include "absl/hash/internal/hash.cc" # include "absl/hash/internal/low_level_hash.cc" # include "absl/hash/internal/city.cc" #endif
0
repos/unordered
repos/unordered/benchmark/word_count.cpp
// Copyright 2021, 2022 Peter Dimov. // Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING #define _SILENCE_CXX20_CISO646_REMOVED_WARNING #include <boost/unordered_map.hpp> #include <boost/unordered/unordered_node_map.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/regex.hpp> #ifdef HAVE_ABSEIL # include "absl/container/node_hash_map.h" # include "absl/container/flat_hash_map.h" #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE # include "ankerl/unordered_dense.h" #endif #include <unordered_map> #include <vector> #include <memory> #include <cstdint> #include <iostream> #include <iomanip> #include <chrono> #include <fstream> #include <string_view> #include <string> using namespace std::chrono_literals; static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::size_t s, std::size_t size ) { auto t2 = std::chrono::steady_clock::now(); std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n"; t1 = t2; } static std::vector<std::string> words; static void init_words() { #if SIZE_MAX > UINT32_MAX char const* fn = "enwik9"; // http://mattmahoney.net/dc/textdata #else char const* fn = "enwik8"; // ditto #endif auto t1 = std::chrono::steady_clock::now(); std::ifstream is( fn ); std::string in( std::istreambuf_iterator<char>( is ), std::istreambuf_iterator<char>{} ); boost::regex re( "[a-zA-Z]+"); boost::sregex_token_iterator it( in.begin(), in.end(), re, 0 ), end; words.assign( it, end ); auto t2 = std::chrono::steady_clock::now(); std::cout << fn << ": " << words.size() << " words, " << ( t2 - t1 ) / 1ms << " ms\n\n"; } template<class Map> BOOST_NOINLINE void test_word_count( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::size_t s = 0; for( auto const& word: words ) { ++map[ word ]; ++s; } print_time( t1, "Word count", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_contains( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::size_t s = 0; for( auto const& word: words ) { std::string_view w2( word ); w2.remove_prefix( 1 ); s += map.contains( w2 ); } print_time( t1, "Contains", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_count( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::size_t s = 0; for( auto const& word: words ) { std::string_view w2( word ); w2.remove_prefix( 1 ); s += map.count( w2 ); } print_time( t1, "Count", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::size_t max = 0; std::string_view word; for( auto const& x: map ) { if( x.second > max ) { word = x.first; max = x.second; } } print_time( t1, "Iterate and find max element", max, map.size() ); std::cout << std::endl; } // counting allocator static std::size_t s_alloc_bytes = 0; static std::size_t s_alloc_count = 0; template<class T> struct allocator { using value_type = T; allocator() = default; template<class U> allocator( allocator<U> const & ) noexcept { } template<class U> bool operator==( allocator<U> const & ) const noexcept { return true; } template<class U> bool operator!=( allocator<U> const& ) const noexcept { return false; } T* allocate( std::size_t n ) const { s_alloc_bytes += n * sizeof(T); s_alloc_count++; return std::allocator<T>().allocate( n ); } void deallocate( T* p, std::size_t n ) const noexcept { s_alloc_bytes -= n * sizeof(T); s_alloc_count--; std::allocator<T>().deallocate( p, n ); } }; // struct record { std::string label_; long long time_; std::size_t bytes_; std::size_t count_; }; static std::vector<record> times; template<template<class...> class Map> BOOST_NOINLINE void test( char const* label ) { std::cout << label << ":\n\n"; s_alloc_bytes = 0; s_alloc_count = 0; Map<std::string_view, std::size_t> map; auto t0 = std::chrono::steady_clock::now(); auto t1 = t0; test_word_count( map, t1 ); std::cout << "Memory: " << s_alloc_bytes << " bytes in " << s_alloc_count << " allocations\n\n"; record rec = { label, 0, s_alloc_bytes, s_alloc_count }; test_contains( map, t1 ); test_count( map, t1 ); test_iteration( map, t1 ); auto tN = std::chrono::steady_clock::now(); std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n"; rec.time_ = ( tN - t0 ) / 1ms; times.push_back( rec ); } // aliases using the counting allocator template<class K, class V> using allocator_for = ::allocator< std::pair<K const, V> >; template<class K, class V> using std_unordered_map = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map = boost::unordered_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map = boost::unordered_node_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map = boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map = absl::node_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map = absl::flat_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map = ankerl::unordered_dense::map<K, V, ankerl::unordered_dense::hash<K>, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif // fnv1a_hash template<int Bits> struct fnv1a_hash_impl; template<> struct fnv1a_hash_impl<32> { std::size_t operator()( std::string_view const& s ) const { std::size_t h = 0x811C9DC5u; char const * first = s.data(); char const * last = first + s.size(); for( ; first != last; ++first ) { h ^= static_cast<unsigned char>( *first ); h *= 0x01000193ul; } return h; } }; template<> struct fnv1a_hash_impl<64> { std::size_t operator()( std::string_view const& s ) const { std::size_t h = 0xCBF29CE484222325ull; char const * first = s.data(); char const * last = first + s.size(); for( ; first != last; ++first ) { h ^= static_cast<unsigned char>( *first ); h *= 0x00000100000001B3ull; } return h; } }; struct fnv1a_hash: fnv1a_hash_impl< std::numeric_limits<std::size_t>::digits > { using is_avalanching = void; }; template<class K, class V> using std_unordered_map_fnv1a = std::unordered_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map_fnv1a = boost::unordered_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map_fnv1a = boost::unordered_node_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map_fnv1a = boost::unordered_flat_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map_fnv1a = absl::node_hash_map<K, V, fnv1a_hash, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map_fnv1a = absl::flat_hash_map<K, V, fnv1a_hash, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map_fnv1a = ankerl::unordered_dense::map<K, V, fnv1a_hash, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif // int main() { init_words(); test<std_unordered_map>( "std::unordered_map" ); test<boost_unordered_map>( "boost::unordered_map" ); test<boost_unordered_node_map>( "boost::unordered_node_map" ); test<boost_unordered_flat_map>( "boost::unordered_flat_map" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map>( "ankerl::unordered_dense::map" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map>( "absl::node_hash_map" ); test<absl_flat_hash_map>( "absl::flat_hash_map" ); #endif test<std_unordered_map_fnv1a>( "std::unordered_map, FNV-1a" ); test<boost_unordered_map_fnv1a>( "boost::unordered_map, FNV-1a" ); test<boost_unordered_node_map_fnv1a>( "boost::unordered_node_map, FNV-1a" ); test<boost_unordered_flat_map_fnv1a>( "boost::unordered_flat_map, FNV-1a" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map_fnv1a>( "ankerl::unordered_dense::map, FNV-1a" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map_fnv1a>( "absl::node_hash_map, FNV-1a" ); test<absl_flat_hash_map_fnv1a>( "absl::flat_hash_map, FNV-1a" ); #endif std::cout << "---\n\n"; for( auto const& x: times ) { std::cout << std::setw( 38 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms, " << std::setw( 9 ) << x.bytes_ << " bytes in " << x.count_ << " allocations\n"; } } #ifdef HAVE_ABSEIL # include "absl/container/internal/raw_hash_set.cc" # include "absl/hash/internal/hash.cc" # include "absl/hash/internal/low_level_hash.cc" # include "absl/hash/internal/city.cc" #endif
0
repos/unordered
repos/unordered/benchmark/uint64.cpp
// Copyright 2021 Peter Dimov. // Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING #define _SILENCE_CXX20_CISO646_REMOVED_WARNING #include <boost/unordered_map.hpp> #include <boost/unordered/unordered_node_map.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/endian/conversion.hpp> #include <boost/core/detail/splitmix64.hpp> #include <boost/config.hpp> #ifdef HAVE_ABSEIL # include "absl/container/node_hash_map.h" # include "absl/container/flat_hash_map.h" #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE # include "ankerl/unordered_dense.h" #endif #include <unordered_map> #include <vector> #include <memory> #include <cstdint> #include <iostream> #include <iomanip> #include <chrono> using namespace std::chrono_literals; static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::uint64_t s, std::size_t size ) { auto t2 = std::chrono::steady_clock::now(); std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n"; t1 = t2; } constexpr unsigned N = 2'000'000; constexpr int K = 10; static std::vector< std::uint64_t > indices1, indices2, indices3; static void init_indices() { indices1.push_back( 0 ); for( unsigned i = 1; i <= N*2; ++i ) { indices1.push_back( i ); } indices2.push_back( 0 ); { boost::detail::splitmix64 rng; for( unsigned i = 1; i <= N*2; ++i ) { indices2.push_back( rng() ); } } indices3.push_back( 0 ); for( unsigned i = 1; i <= N*2; ++i ) { indices3.push_back( boost::endian::endian_reverse( static_cast<std::uint64_t>( i ) ) ); } } template<class Map> BOOST_NOINLINE void test_insert( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices1[ i ], i } ); } print_time( t1, "Consecutive insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices2[ i ], i } ); } print_time( t1, "Random insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices3[ i ], i } ); } print_time( t1, "Consecutive reversed insert", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_lookup( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::uint64_t s; s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices1[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices2[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Random lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices3[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive reversed lookup", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 ) { auto it = map.begin(); while( it != map.end() ) { if( it->second & 1 ) { if constexpr( std::is_void_v< decltype( map.erase( it ) ) > ) { map.erase( it++ ); } else { it = map.erase( it ); } } else { ++it; } } print_time( t1, "Iterate and erase odd elements", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_erase( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.erase( indices1[ i ] ); } print_time( t1, "Consecutive erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices2[ i ] ); } print_time( t1, "Random erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices3[ i ] ); } print_time( t1, "Consecutive reversed erase", 0, map.size() ); std::cout << std::endl; } // counting allocator static std::size_t s_alloc_bytes = 0; static std::size_t s_alloc_count = 0; template<class T> struct allocator { using value_type = T; allocator() = default; template<class U> allocator( allocator<U> const & ) noexcept { } template<class U> bool operator==( allocator<U> const & ) const noexcept { return true; } template<class U> bool operator!=( allocator<U> const& ) const noexcept { return false; } T* allocate( std::size_t n ) const { s_alloc_bytes += n * sizeof(T); s_alloc_count++; return std::allocator<T>().allocate( n ); } void deallocate( T* p, std::size_t n ) const noexcept { s_alloc_bytes -= n * sizeof(T); s_alloc_count--; std::allocator<T>().deallocate( p, n ); } }; // struct record { std::string label_; long long time_; std::size_t bytes_; std::size_t count_; }; static std::vector<record> times; template<template<class...> class Map> BOOST_NOINLINE void test( char const* label ) { std::cout << label << ":\n\n"; s_alloc_bytes = 0; s_alloc_count = 0; Map<std::uint64_t, std::uint64_t> map; auto t0 = std::chrono::steady_clock::now(); auto t1 = t0; test_insert( map, t1 ); std::cout << "Memory: " << s_alloc_bytes << " bytes in " << s_alloc_count << " allocations\n\n"; record rec = { label, 0, s_alloc_bytes, s_alloc_count }; test_lookup( map, t1 ); test_iteration( map, t1 ); test_lookup( map, t1 ); test_erase( map, t1 ); auto tN = std::chrono::steady_clock::now(); std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n"; rec.time_ = ( tN - t0 ) / 1ms; times.push_back( rec ); } // aliases using the counting allocator template<class K, class V> using allocator_for = ::allocator< std::pair<K const, V> >; template<class K, class V> using std_unordered_map = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map = boost::unordered_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map = boost::unordered_node_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map = boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map = absl::node_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map = absl::flat_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map = ankerl::unordered_dense::map<K, V, ankerl::unordered_dense::hash<K>, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif int main() { init_indices(); #if defined(BOOST_LIBSTDCXX_VERSION) && __SIZE_WIDTH__ == 32 // Pathological behavior: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104945 #else test<std_unordered_map>( "std::unordered_map" ); #endif test<boost_unordered_map>( "boost::unordered_map" ); test<boost_unordered_node_map>( "boost::unordered_node_map" ); test<boost_unordered_flat_map>( "boost::unordered_flat_map" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map>( "ankerl::unordered_dense::map" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map>( "absl::node_hash_map" ); test<absl_flat_hash_map>( "absl::flat_hash_map" ); #endif std::cout << "---\n\n"; for( auto const& x: times ) { std::cout << std::setw( 30 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms, " << std::setw( 9 ) << x.bytes_ << " bytes in " << x.count_ << " allocations\n"; } } #ifdef HAVE_ABSEIL # include "absl/container/internal/raw_hash_set.cc" # include "absl/hash/internal/hash.cc" # include "absl/hash/internal/low_level_hash.cc" # include "absl/hash/internal/city.cc" #endif
0
repos/unordered
repos/unordered/benchmark/string.cpp
// Copyright 2021 Peter Dimov. // Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #define _SILENCE_CXX17_OLD_ALLOCATOR_MEMBERS_DEPRECATION_WARNING #define _SILENCE_CXX20_CISO646_REMOVED_WARNING #include <boost/unordered_map.hpp> #include <boost/unordered/unordered_node_map.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/core/detail/splitmix64.hpp> #include <boost/config.hpp> #ifdef HAVE_ABSEIL # include "absl/container/node_hash_map.h" # include "absl/container/flat_hash_map.h" #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE # include "ankerl/unordered_dense.h" #endif #include <unordered_map> #include <vector> #include <memory> #include <cstdint> #include <iostream> #include <iomanip> #include <chrono> using namespace std::chrono_literals; static void print_time( std::chrono::steady_clock::time_point & t1, char const* label, std::uint32_t s, std::size_t size ) { auto t2 = std::chrono::steady_clock::now(); std::cout << label << ": " << ( t2 - t1 ) / 1ms << " ms (s=" << s << ", size=" << size << ")\n"; t1 = t2; } constexpr unsigned N = 2'000'000; constexpr int K = 10; static std::vector<std::string> indices1, indices2; static std::string make_index( unsigned x ) { char buffer[ 64 ]; std::snprintf( buffer, sizeof(buffer), "pfx_%u_sfx", x ); return buffer; } static std::string make_random_index( unsigned x ) { char buffer[ 64 ]; std::snprintf( buffer, sizeof(buffer), "pfx_%0*d_%u_sfx", x % 8 + 1, 0, x ); return buffer; } static void init_indices() { indices1.reserve( N*2+1 ); indices1.push_back( make_index( 0 ) ); for( unsigned i = 1; i <= N*2; ++i ) { indices1.push_back( make_index( i ) ); } indices2.reserve( N*2+1 ); indices2.push_back( make_index( 0 ) ); { boost::detail::splitmix64 rng; for( unsigned i = 1; i <= N*2; ++i ) { indices2.push_back( make_random_index( static_cast<std::uint32_t>( rng() ) ) ); } } } template<class Map> BOOST_NOINLINE void test_insert( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices1[ i ], i } ); } print_time( t1, "Consecutive insert", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.insert( { indices2[ i ], i } ); } print_time( t1, "Random insert", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_lookup( Map& map, std::chrono::steady_clock::time_point & t1 ) { std::uint32_t s; s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices1[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Consecutive lookup", s, map.size() ); s = 0; for( int j = 0; j < K; ++j ) { for( unsigned i = 1; i <= N * 2; ++i ) { auto it = map.find( indices2[ i ] ); if( it != map.end() ) s += it->second; } } print_time( t1, "Random lookup", s, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_iteration( Map& map, std::chrono::steady_clock::time_point & t1 ) { auto it = map.begin(); while( it != map.end() ) { if( it->second & 1 ) { if constexpr( std::is_void_v< decltype( map.erase( it ) ) > ) { map.erase( it++ ); } else { it = map.erase( it ); } } else { ++it; } } print_time( t1, "Iterate and erase odd elements", 0, map.size() ); std::cout << std::endl; } template<class Map> BOOST_NOINLINE void test_erase( Map& map, std::chrono::steady_clock::time_point & t1 ) { for( unsigned i = 1; i <= N; ++i ) { map.erase( indices1[ i ] ); } print_time( t1, "Consecutive erase", 0, map.size() ); for( unsigned i = 1; i <= N; ++i ) { map.erase( indices2[ i ] ); } print_time( t1, "Random erase", 0, map.size() ); std::cout << std::endl; } // counting allocator static std::size_t s_alloc_bytes = 0; static std::size_t s_alloc_count = 0; template<class T> struct allocator { using value_type = T; allocator() = default; template<class U> allocator( allocator<U> const & ) noexcept { } template<class U> bool operator==( allocator<U> const & ) const noexcept { return true; } template<class U> bool operator!=( allocator<U> const& ) const noexcept { return false; } T* allocate( std::size_t n ) const { s_alloc_bytes += n * sizeof(T); s_alloc_count++; return std::allocator<T>().allocate( n ); } void deallocate( T* p, std::size_t n ) const noexcept { s_alloc_bytes -= n * sizeof(T); s_alloc_count--; std::allocator<T>().deallocate( p, n ); } }; // struct record { std::string label_; long long time_; std::size_t bytes_; std::size_t count_; }; static std::vector<record> times; template<template<class...> class Map> BOOST_NOINLINE void test( char const* label ) { std::cout << label << ":\n\n"; s_alloc_bytes = 0; s_alloc_count = 0; Map<std::string, std::uint32_t> map; auto t0 = std::chrono::steady_clock::now(); auto t1 = t0; test_insert( map, t1 ); std::cout << "Memory: " << s_alloc_bytes << " bytes in " << s_alloc_count << " allocations\n\n"; record rec = { label, 0, s_alloc_bytes, s_alloc_count }; test_lookup( map, t1 ); test_iteration( map, t1 ); test_lookup( map, t1 ); test_erase( map, t1 ); auto tN = std::chrono::steady_clock::now(); std::cout << "Total: " << ( tN - t0 ) / 1ms << " ms\n\n"; rec.time_ = ( tN - t0 ) / 1ms; times.push_back( rec ); } // aliases using the counting allocator template<class K, class V> using allocator_for = ::allocator< std::pair<K const, V> >; template<class K, class V> using std_unordered_map = std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map = boost::unordered_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map = boost::unordered_node_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map = boost::unordered_flat_map<K, V, boost::hash<K>, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map = absl::node_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map = absl::flat_hash_map<K, V, absl::container_internal::hash_default_hash<K>, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map = ankerl::unordered_dense::map<K, V, ankerl::unordered_dense::hash<K>, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif // fnv1a_hash template<int Bits> struct fnv1a_hash_impl; template<> struct fnv1a_hash_impl<32> { std::size_t operator()( std::string const& s ) const { std::size_t h = 0x811C9DC5u; char const * first = s.data(); char const * last = first + s.size(); for( ; first != last; ++first ) { h ^= static_cast<unsigned char>( *first ); h *= 0x01000193ul; } return h; } }; template<> struct fnv1a_hash_impl<64> { std::size_t operator()( std::string const& s ) const { std::size_t h = 0xCBF29CE484222325ull; char const * first = s.data(); char const * last = first + s.size(); for( ; first != last; ++first ) { h ^= static_cast<unsigned char>( *first ); h *= 0x00000100000001B3ull; } return h; } }; struct fnv1a_hash: fnv1a_hash_impl< std::numeric_limits<std::size_t>::digits > { using is_avalanching = void; }; template<class K, class V> using std_unordered_map_fnv1a = std::unordered_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_map_fnv1a = boost::unordered_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_node_map_fnv1a = boost::unordered_node_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; template<class K, class V> using boost_unordered_flat_map_fnv1a = boost::unordered_flat_map<K, V, fnv1a_hash, std::equal_to<K>, allocator_for<K, V>>; #ifdef HAVE_ABSEIL template<class K, class V> using absl_node_hash_map_fnv1a = absl::node_hash_map<K, V, fnv1a_hash, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; template<class K, class V> using absl_flat_hash_map_fnv1a = absl::flat_hash_map<K, V, fnv1a_hash, absl::container_internal::hash_default_eq<K>, allocator_for<K, V>>; #endif #ifdef HAVE_ANKERL_UNORDERED_DENSE template<class K, class V> using ankerl_unordered_dense_map_fnv1a = ankerl::unordered_dense::map<K, V, fnv1a_hash, std::equal_to<K>, ::allocator< std::pair<K, V> >>; #endif // int main() { init_indices(); test<std_unordered_map>( "std::unordered_map" ); test<boost_unordered_map>( "boost::unordered_map" ); test<boost_unordered_node_map>( "boost::unordered_node_map" ); test<boost_unordered_flat_map>( "boost::unordered_flat_map" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map>( "ankerl::unordered_dense::map" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map>( "absl::node_hash_map" ); test<absl_flat_hash_map>( "absl::flat_hash_map" ); #endif test<std_unordered_map_fnv1a>( "std::unordered_map, FNV-1a" ); test<boost_unordered_map_fnv1a>( "boost::unordered_map, FNV-1a" ); test<boost_unordered_node_map_fnv1a>( "boost::unordered_node_map, FNV-1a" ); test<boost_unordered_flat_map_fnv1a>( "boost::unordered_flat_map, FNV-1a" ); #ifdef HAVE_ANKERL_UNORDERED_DENSE test<ankerl_unordered_dense_map_fnv1a>( "ankerl::unordered_dense::map, FNV-1a" ); #endif #ifdef HAVE_ABSEIL test<absl_node_hash_map_fnv1a>( "absl::node_hash_map, FNV-1a" ); test<absl_flat_hash_map_fnv1a>( "absl::flat_hash_map, FNV-1a" ); #endif std::cout << "---\n\n"; for( auto const& x: times ) { std::cout << std::setw( 38 ) << ( x.label_ + ": " ) << std::setw( 5 ) << x.time_ << " ms, " << std::setw( 9 ) << x.bytes_ << " bytes in " << x.count_ << " allocations\n"; } } #ifdef HAVE_ABSEIL # include "absl/container/internal/raw_hash_set.cc" # include "absl/hash/internal/hash.cc" # include "absl/hash/internal/low_level_hash.cc" # include "absl/hash/internal/city.cc" #endif
0
repos/unordered
repos/unordered/meta/explicit-failures-markup.xml
<?xml version="1.0" encoding="utf-8"?> <!-- Copyright 2017-2018 Daniel James Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) --> <explicit-failures-markup> <!-- unordered --> <library name="unordered"> <mark-expected-failures> <test name="unnecessary_copy_tests"/> <toolset name="borland-*"/> <toolset name="sun-*"/> <note author="Daniel James"> This tests whether inserting elements creates as few copies as I think is possible. If this fails it just means that the container might be a little inefficient. </note> </mark-expected-failures> <mark-expected-failures> <test name="compile_map_unordered_allocator"/> <toolset name="msvc-7.1"/> <note author="Daniel James"> This test fail because it's using unordered's internal allocator traits, which doesn't work on Visual C++ 7.1. It normally uses the one from Boost.Container by default. </note> </mark-expected-failures> <mark-expected-failures> <test name="noexcept_tests"/> <toolset name="gcc-4.3c+"/> <note author="Daniel James"> boost::is_nothrow_move_constructible and boost::is_nothrow_move_assignable don't seem to work on this compiler. I'd hope that anyone wanting noexcept support would use a more recent compiler anyway. </note> </mark-expected-failures> </library> </explicit-failures-markup>
0
repos/unordered
repos/unordered/meta/libraries.json
{ "key": "unordered", "name": "Unordered", "authors": [ "Daniel James" ], "maintainers": [ "Christian Mazakas <christian.mazakas -at- gmail.com>", "Joaquín M López Muñoz <joaquin.lopezmunoz -at- gmail.com>" ], "description": "Unordered associative containers.", "category": [ "Containers" ], "cxxstd": "11" }
0
repos/unordered
repos/unordered/test/quick.cpp
// Copyright 2022 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/unordered_map.hpp> #include <boost/core/lightweight_test.hpp> #include <string> int main() { boost::unordered_map<std::string, int> map; map[ "2" ] = 2; BOOST_TEST_EQ( map[ "1" ], 0 ); BOOST_TEST_EQ( map[ "2" ], 2 ); return boost::report_errors(); }
0
repos/unordered
repos/unordered/test/CMakeLists.txt
# Copyright 2018, 2019 Peter Dimov # Distributed under the Boost Software License, Version 1.0. # See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt include(BoostTestJamfile OPTIONAL RESULT_VARIABLE HAVE_BOOST_TEST) if(HAVE_BOOST_TEST) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) set(BOOST_TEST_LINK_LIBRARIES Boost::unordered Boost::core Boost::concept_check Boost::tuple) function(fca_tests) boost_test(PREFIX boost_unordered ${ARGN}) endfunction() function(foa_tests) boost_test(PREFIX boost_unordered_foa COMPILE_DEFINITIONS BOOST_UNORDERED_FOA_TESTS ${ARGN}) endfunction() function(cfoa_tests) boost_test(PREFIX boost_unordered_cfoa LINK_LIBRARIES Boost::compat Boost::iterator Threads::Threads ${ARGN}) endfunction() # FCA tests fca_tests(SOURCES unordered/prime_fmod_tests.cpp) fca_tests(SOURCES unordered/fwd_set_test.cpp) fca_tests(SOURCES unordered/fwd_map_test.cpp) fca_tests(SOURCES unordered/allocator_traits.cpp) fca_tests(SOURCES unordered/minimal_allocator.cpp) fca_tests(SOURCES unordered/compile_set.cpp) fca_tests(SOURCES unordered/compile_map.cpp) fca_tests(SOURCES unordered/noexcept_tests.cpp) fca_tests(SOURCES unordered/link_test_1.cpp unordered/link_test_2.cpp) fca_tests(SOURCES unordered/incomplete_test.cpp) fca_tests(SOURCES unordered/simple_tests.cpp) fca_tests(SOURCES unordered/equivalent_keys_tests.cpp) fca_tests(SOURCES unordered/constructor_tests.cpp) fca_tests(SOURCES unordered/copy_tests.cpp) fca_tests(SOURCES unordered/move_tests.cpp) fca_tests(SOURCES unordered/post_move_tests.cpp) fca_tests(SOURCES unordered/assign_tests.cpp) fca_tests(SOURCES unordered/insert_tests.cpp) fca_tests(SOURCES unordered/insert_stable_tests.cpp) fca_tests(SOURCES unordered/insert_hint_tests.cpp) fca_tests(SOURCES unordered/emplace_tests.cpp) fca_tests(SOURCES unordered/unnecessary_copy_tests.cpp) fca_tests(SOURCES unordered/erase_tests.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_SUPPRESS_DEPRECATED) fca_tests(SOURCES unordered/erase_equiv_tests.cpp) fca_tests(SOURCES unordered/extract_tests.cpp) fca_tests(SOURCES unordered/node_handle_tests.cpp) fca_tests(SOURCES unordered/merge_tests.cpp) fca_tests(SOURCES unordered/find_tests.cpp) fca_tests(SOURCES unordered/at_tests.cpp) fca_tests(SOURCES unordered/bucket_tests.cpp) fca_tests(SOURCES unordered/load_factor_tests.cpp) fca_tests(SOURCES unordered/rehash_tests.cpp) fca_tests(SOURCES unordered/equality_tests.cpp) fca_tests(SOURCES unordered/swap_tests.cpp) fca_tests(SOURCES unordered/deduction_tests.cpp) fca_tests(SOURCES unordered/scoped_allocator.cpp) fca_tests(SOURCES unordered/transparent_tests.cpp) fca_tests(SOURCES unordered/reserve_tests.cpp) fca_tests(SOURCES unordered/contains_tests.cpp) fca_tests(SOURCES unordered/erase_if.cpp) fca_tests(SOURCES unordered/scary_tests.cpp) fca_tests(SOURCES exception/constructor_exception_tests.cpp) fca_tests(SOURCES exception/copy_exception_tests.cpp) fca_tests(SOURCES exception/assign_exception_tests.cpp) fca_tests(SOURCES exception/move_assign_exception_tests.cpp) fca_tests(SOURCES exception/insert_exception_tests.cpp) fca_tests(SOURCES exception/erase_exception_tests.cpp) fca_tests(SOURCES exception/rehash_exception_tests.cpp) fca_tests(SOURCES exception/swap_exception_tests.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_SWAP_METHOD=2) fca_tests(SOURCES exception/merge_exception_tests.cpp) fca_tests(SOURCES exception/less_tests.cpp) fca_tests(SOURCES unordered/narrow_cast_tests.cpp) fca_tests(SOURCES quick.cpp) fca_tests(TYPE compile-fail NAME insert_node_type_fail_map COMPILE_DEFINITIONS UNORDERED_TEST_MAP SOURCES unordered/insert_node_type_fail.cpp) fca_tests(TYPE compile-fail NAME insert_node_type_fail_multimap COMPILE_DEFINITIONS UNORDERED_TEST_MULTIMAP SOURCES unordered/insert_node_type_fail.cpp) fca_tests(TYPE compile-fail NAME insert_node_type_fail_set COMPILE_DEFINITIONS UNORDERED_TEST_SET SOURCES unordered/insert_node_type_fail.cpp) fca_tests(TYPE compile-fail NAME insert_node_type_fail_multiset COMPILE_DEFINITIONS UNORDERED_TEST_MULTISET SOURCES unordered/insert_node_type_fail.cpp) # FOA tests foa_tests(SOURCES unordered/fwd_set_test.cpp) foa_tests(SOURCES unordered/fwd_map_test.cpp) foa_tests(SOURCES unordered/compile_set.cpp) foa_tests(SOURCES unordered/compile_map.cpp) foa_tests(SOURCES unordered/noexcept_tests.cpp) foa_tests(SOURCES unordered/incomplete_test.cpp) foa_tests(SOURCES unordered/simple_tests.cpp) foa_tests(SOURCES unordered/equivalent_keys_tests.cpp) foa_tests(SOURCES unordered/constructor_tests.cpp) foa_tests(SOURCES unordered/copy_tests.cpp) foa_tests(SOURCES unordered/move_tests.cpp) foa_tests(SOURCES unordered/post_move_tests.cpp) foa_tests(SOURCES unordered/assign_tests.cpp) foa_tests(SOURCES unordered/insert_tests.cpp) foa_tests(SOURCES unordered/insert_hint_tests.cpp) foa_tests(SOURCES unordered/emplace_tests.cpp) foa_tests(SOURCES unordered/erase_tests.cpp) foa_tests(SOURCES unordered/merge_tests.cpp) foa_tests(SOURCES unordered/find_tests.cpp) foa_tests(SOURCES unordered/at_tests.cpp) foa_tests(SOURCES unordered/load_factor_tests.cpp) foa_tests(SOURCES unordered/rehash_tests.cpp) foa_tests(SOURCES unordered/equality_tests.cpp) foa_tests(SOURCES unordered/swap_tests.cpp) foa_tests(SOURCES unordered/transparent_tests.cpp) foa_tests(SOURCES unordered/reserve_tests.cpp) foa_tests(SOURCES unordered/contains_tests.cpp) foa_tests(SOURCES unordered/erase_if.cpp) foa_tests(SOURCES unordered/scary_tests.cpp) foa_tests(SOURCES unordered/init_type_insert_tests.cpp) foa_tests(SOURCES unordered/max_load_tests.cpp) foa_tests(SOURCES unordered/extract_tests.cpp) foa_tests(SOURCES unordered/node_handle_tests.cpp) foa_tests(SOURCES unordered/uses_allocator.cpp) foa_tests(SOURCES unordered/link_test_1.cpp unordered/link_test_2.cpp ) foa_tests(SOURCES unordered/scoped_allocator.cpp) foa_tests(SOURCES unordered/hash_is_avalanching_test.cpp) foa_tests(SOURCES exception/constructor_exception_tests.cpp) foa_tests(SOURCES exception/copy_exception_tests.cpp) foa_tests(SOURCES exception/assign_exception_tests.cpp) foa_tests(SOURCES exception/move_assign_exception_tests.cpp) foa_tests(SOURCES exception/insert_exception_tests.cpp) foa_tests(SOURCES exception/erase_exception_tests.cpp) foa_tests(SOURCES exception/rehash_exception_tests.cpp) foa_tests(SOURCES exception/swap_exception_tests.cpp) foa_tests(SOURCES exception/merge_exception_tests.cpp) # CFOA tests cfoa_tests(SOURCES cfoa/insert_tests.cpp) cfoa_tests(SOURCES cfoa/erase_tests.cpp) cfoa_tests(SOURCES cfoa/try_emplace_tests.cpp) cfoa_tests(SOURCES cfoa/emplace_tests.cpp) cfoa_tests(SOURCES cfoa/visit_tests.cpp) cfoa_tests(SOURCES cfoa/constructor_tests.cpp) cfoa_tests(SOURCES cfoa/assign_tests.cpp) cfoa_tests(SOURCES cfoa/clear_tests.cpp) cfoa_tests(SOURCES cfoa/swap_tests.cpp) cfoa_tests(SOURCES cfoa/merge_tests.cpp) cfoa_tests(SOURCES cfoa/rehash_tests.cpp) cfoa_tests(SOURCES cfoa/equality_tests.cpp) cfoa_tests(SOURCES cfoa/fwd_tests.cpp) cfoa_tests(SOURCES cfoa/exception_insert_tests.cpp) cfoa_tests(SOURCES cfoa/exception_erase_tests.cpp) cfoa_tests(SOURCES cfoa/exception_constructor_tests.cpp) cfoa_tests(SOURCES cfoa/exception_assign_tests.cpp) cfoa_tests(SOURCES cfoa/exception_merge_tests.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test2.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test3.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test4.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test5.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test6.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test7.cpp) cfoa_tests(SOURCES cfoa/rw_spinlock_test8.cpp) endif()
0
repos/unordered/test
repos/unordered/test/objects/fwd.hpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #if !defined(BOOST_UNORDERED_TEST_OBJECTS_FWD_HEADER) #define BOOST_UNORDERED_TEST_OBJECTS_FWD_HEADER namespace test { class object; class hash; class less; class equal_to; template <class T> class allocator; } #endif
0
repos/unordered/test
repos/unordered/test/objects/cxx11_allocator.hpp
// Copyright 2006-2011 Daniel James. // Copyright 2022 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #if !defined(BOOST_UNORDERED_TEST_CXX11_ALLOCATOR_HEADER) #define BOOST_UNORDERED_TEST_CXX11_ALLOCATOR_HEADER #include <boost/config.hpp> #include <boost/limits.hpp> #include <cstddef> #include "../helpers/fwd.hpp" #include "../helpers/memory.hpp" namespace test { struct allocator_false { enum { is_select_on_copy = 0, is_propagate_on_swap = 0, is_propagate_on_assign = 0, is_propagate_on_move = 0, cxx11_construct = 0 }; }; struct allocator_flags_all { enum { is_select_on_copy = 1, is_propagate_on_swap = 1, is_propagate_on_assign = 1, is_propagate_on_move = 1, cxx11_construct = 1 }; }; struct select_copy : allocator_false { enum { is_select_on_copy = 1 }; }; struct propagate_swap : allocator_false { enum { is_propagate_on_swap = 1 }; }; struct propagate_assign : allocator_false { enum { is_propagate_on_assign = 1 }; }; struct propagate_move : allocator_false { enum { is_propagate_on_move = 1 }; }; struct no_select_copy : allocator_flags_all { enum { is_select_on_copy = 0 }; }; struct no_propagate_swap : allocator_flags_all { enum { is_propagate_on_swap = 0 }; }; struct no_propagate_assign : allocator_flags_all { enum { is_propagate_on_assign = 0 }; }; struct no_propagate_move : allocator_flags_all { enum { is_propagate_on_move = 0 }; }; template <typename Flag> struct swap_allocator_base { struct propagate_on_container_swap { enum { value = Flag::is_propagate_on_swap }; }; }; template <typename Flag> struct assign_allocator_base { struct propagate_on_container_copy_assignment { enum { value = Flag::is_propagate_on_assign }; }; }; template <typename Flag> struct move_allocator_base { struct propagate_on_container_move_assignment { enum { value = Flag::is_propagate_on_move }; }; }; namespace { // boostinspect:nounnamed bool force_equal_allocator_value = false; } struct force_equal_allocator { bool old_value_; explicit force_equal_allocator(bool value) : old_value_(force_equal_allocator_value) { force_equal_allocator_value = value; } ~force_equal_allocator() { force_equal_allocator_value = old_value_; } }; template <typename T> struct cxx11_allocator_base { int tag_; int selected_; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef T const* const_pointer; typedef T& reference; typedef T const& const_reference; typedef T value_type; explicit cxx11_allocator_base(int t) : tag_(t), selected_(0) { detail::tracker.allocator_ref(); } template <typename Y> cxx11_allocator_base(cxx11_allocator_base<Y> const& x) : tag_(x.tag_), selected_(x.selected_) { detail::tracker.allocator_ref(); } cxx11_allocator_base(cxx11_allocator_base const& x) : tag_(x.tag_), selected_(x.selected_) { detail::tracker.allocator_ref(); } ~cxx11_allocator_base() { detail::tracker.allocator_unref(); } cxx11_allocator_base& operator=(cxx11_allocator_base const& x) = default; pointer address(reference r) { return pointer(&r); } const_pointer address(const_reference r) { return const_pointer(&r); } pointer allocate(size_type n) { pointer ptr(static_cast<T*>(::operator new(n * sizeof(T)))); detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return ptr; } pointer allocate(size_type n, void const*) { pointer ptr(static_cast<T*>(::operator new(n * sizeof(T)))); detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return ptr; } void deallocate(pointer p, size_type n) { // Only checking tags when propagating swap. // Note that tags will be tested // properly in the normal allocator. detail::tracker.track_deallocate( (void*)p, n, sizeof(T), tag_, !force_equal_allocator_value); ::operator delete((void*)p); } template <class U, typename... Args> void construct(U* p, Args&&... args) { detail::tracker.track_construct((void*)p, sizeof(U), tag_); new (p) U(std::forward<Args>(args)...); } template <class U> void destroy(U* p) { detail::tracker.track_destroy((void*)p, sizeof(U), tag_); p->~U(); } size_type max_size() const { return (std::numeric_limits<size_type>::max)(); } }; template <typename T, typename Flags = propagate_swap, typename Enable = void> struct cxx11_allocator; template <typename T, typename Flags> struct cxx11_allocator<T, Flags, typename boost::disable_if_c<Flags::is_select_on_copy>::type> : public cxx11_allocator_base<T>, public swap_allocator_base<Flags>, public assign_allocator_base<Flags>, public move_allocator_base<Flags>, Flags { #if BOOST_WORKAROUND(BOOST_GCC_VERSION, < 402000) template <typename U> struct rebind { typedef cxx11_allocator<U, Flags> other; }; #endif explicit cxx11_allocator(int t = 0) : cxx11_allocator_base<T>(t) {} template <typename Y> cxx11_allocator(cxx11_allocator<Y, Flags> const& x) : cxx11_allocator_base<T>(x) { } cxx11_allocator(cxx11_allocator const& x) : cxx11_allocator_base<T>(x) {} cxx11_allocator& operator=(cxx11_allocator const& x) = default; // When not propagating swap, allocators are always equal // to avoid undefined behaviour. bool operator==(cxx11_allocator const& x) const { return force_equal_allocator_value || (this->tag_ == x.tag_); } bool operator!=(cxx11_allocator const& x) const { return !(*this == x); } }; template <typename T, typename Flags> struct cxx11_allocator<T, Flags, typename boost::enable_if_c<Flags::is_select_on_copy>::type> : public cxx11_allocator_base<T>, public swap_allocator_base<Flags>, public assign_allocator_base<Flags>, public move_allocator_base<Flags>, Flags { cxx11_allocator select_on_container_copy_construction() const { cxx11_allocator tmp(*this); ++tmp.selected_; return tmp; } #if BOOST_WORKAROUND(BOOST_GCC_VERSION, < 402000) template <typename U> struct rebind { typedef cxx11_allocator<U, Flags> other; }; #endif explicit cxx11_allocator(int t = 0) : cxx11_allocator_base<T>(t) {} template <typename Y> cxx11_allocator(cxx11_allocator<Y, Flags> const& x) : cxx11_allocator_base<T>(x) { } cxx11_allocator(cxx11_allocator const& x) : cxx11_allocator_base<T>(x) {} cxx11_allocator& operator=(cxx11_allocator const& x) = default; // When not propagating swap, allocators are always equal // to avoid undefined behaviour. bool operator==(cxx11_allocator const& x) const { return force_equal_allocator_value || (this->tag_ == x.tag_); } bool operator!=(cxx11_allocator const& x) const { return !(*this == x); } }; template <typename T, typename Flags> bool equivalent_impl(cxx11_allocator<T, Flags> const& x, cxx11_allocator<T, Flags> const& y, test::derived_type) { return x.tag_ == y.tag_; } // Function to check how many times an allocator has been selected, // return 0 for other allocators. struct convert_from_anything { template <typename T> convert_from_anything(T const&) {} }; inline int selected_count(convert_from_anything) { return 0; } template <typename T, typename Flags> int selected_count(cxx11_allocator<T, Flags> const& x) { return x.selected_; } } #endif
0
repos/unordered/test
repos/unordered/test/objects/exception.hpp
// Copyright 2006-2009 Daniel James. // Copyright 2022 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #if !defined(BOOST_UNORDERED_EXCEPTION_TEST_OBJECTS_HEADER) #define BOOST_UNORDERED_EXCEPTION_TEST_OBJECTS_HEADER #include "../helpers/exception_test.hpp" #include "../helpers/count.hpp" #include "../helpers/fwd.hpp" #include "../helpers/generators.hpp" #include "../helpers/memory.hpp" #include "./fwd.hpp" #include <boost/limits.hpp> #include <cstddef> #include <new> namespace test { namespace exception { class object; class hash; class equal_to; template <class T> class allocator; object generate(object const*, random_generator); std::pair<object, object> generate( std::pair<object, object> const*, random_generator); struct true_type { enum { value = true }; }; struct false_type { enum { value = false }; }; class object : private counted_object { public: int tag1_, tag2_; explicit object() : tag1_(0), tag2_(0) { UNORDERED_SCOPE(object::object()) { UNORDERED_EPOINT("Mock object default constructor.") } } explicit object(int t1, int t2 = 0) : tag1_(t1), tag2_(t2) { UNORDERED_SCOPE(object::object(int)) { UNORDERED_EPOINT("Mock object constructor by value.") } } object(object const& x) : counted_object(x), tag1_(x.tag1_), tag2_(x.tag2_) { UNORDERED_SCOPE(object::object(object)) { UNORDERED_EPOINT("Mock object copy constructor.") } } ~object() { tag1_ = -1; tag2_ = -1; } object& operator=(object const& x) { UNORDERED_SCOPE(object::operator=(object)) { tag1_ = x.tag1_; UNORDERED_EPOINT("Mock object assign operator 1.") tag2_ = x.tag2_; // UNORDERED_EPOINT("Mock object assign operator 2."); } return *this; } friend bool operator==(object const& x1, object const& x2) { UNORDERED_SCOPE(operator==(object, object)) { UNORDERED_EPOINT("Mock object equality operator.") } return x1.tag1_ == x2.tag1_ && x1.tag2_ == x2.tag2_; } friend bool operator!=(object const& x1, object const& x2) { UNORDERED_SCOPE(operator!=(object, object)) { UNORDERED_EPOINT("Mock object inequality operator.") } return !(x1.tag1_ == x2.tag1_ && x1.tag2_ == x2.tag2_); } // None of the last few functions are used by the unordered associative // containers - so there aren't any exception points. friend bool operator<(object const& x1, object const& x2) { return x1.tag1_ < x2.tag1_ || (x1.tag1_ == x2.tag1_ && x1.tag2_ < x2.tag2_); } friend object generate(object const*, random_generator g) { int* x = 0; return object(::test::generate(x, g), ::test::generate(x, g)); } friend std::ostream& operator<<(std::ostream& out, object const& o) { return out << "(" << o.tag1_ << "," << o.tag2_ << ")"; } }; std::pair<object, object> generate( std::pair<object, object> const*, random_generator g) { int* x = 0; return std::make_pair( object(::test::generate(x, g), ::test::generate(x, g)), object(::test::generate(x, g), ::test::generate(x, g))); } class hash { int tag_; public: hash(int t = 0) : tag_(t) { UNORDERED_SCOPE(hash::object()) { UNORDERED_EPOINT("Mock hash default constructor.") } } hash(hash const& x) : tag_(x.tag_) { UNORDERED_SCOPE(hash::hash(hash)) { UNORDERED_EPOINT("Mock hash copy constructor.") } } hash& operator=(hash const& x) { UNORDERED_SCOPE(hash::operator=(hash)) { UNORDERED_EPOINT("Mock hash assign operator 1.") tag_ = x.tag_; UNORDERED_EPOINT("Mock hash assign operator 2.") } return *this; } std::size_t operator()(object const& x) const { UNORDERED_SCOPE(hash::operator()(object)) { UNORDERED_EPOINT("Mock hash function.") } return hash_impl(x); } std::size_t operator()(std::pair<object, object> const& x) const { UNORDERED_SCOPE(hash::operator()(std::pair<object, object>)) { UNORDERED_EPOINT("Mock hash pair function.") } return hash_impl(x.first) * 193ul + hash_impl(x.second) * 97ul + 29ul; } std::size_t hash_impl(object const& x) const { unsigned result; switch (tag_) { case 1: result = static_cast<unsigned>(x.tag1_); break; case 2: result = static_cast<unsigned>(x.tag2_); break; default: result = static_cast<unsigned>(x.tag1_) + static_cast<unsigned>(x.tag2_); } return result; } friend bool operator==(hash const& x1, hash const& x2) { UNORDERED_SCOPE(operator==(hash, hash)) { UNORDERED_EPOINT("Mock hash equality function.") } return x1.tag_ == x2.tag_; } friend bool operator!=(hash const& x1, hash const& x2) { UNORDERED_SCOPE(hash::operator!=(hash, hash)) { UNORDERED_EPOINT("Mock hash inequality function.") } return x1.tag_ != x2.tag_; } #if defined(BOOST_UNORDERED_FOA_TESTS) friend void swap(hash&, hash&) noexcept; #endif }; #if defined(BOOST_UNORDERED_FOA_TESTS) void swap(hash& lhs, hash& rhs) noexcept { int tag = lhs.tag_; lhs.tag_ = rhs.tag_; rhs.tag_ = tag; } #endif class less { int tag_; public: less(int t = 0) : tag_(t) {} less(less const& x) : tag_(x.tag_) {} bool operator()(object const& x1, object const& x2) const { return less_impl(x1, x2); } bool operator()(std::pair<object, object> const& x1, std::pair<object, object> const& x2) const { if (less_impl(x1.first, x2.first)) { return true; } if (less_impl(x2.first, x1.first)) { return false; } return less_impl(x1.second, x2.second); } bool less_impl(object const& x1, object const& x2) const { switch (tag_) { case 1: return x1.tag1_ < x2.tag1_; case 2: return x1.tag2_ < x2.tag2_; default: return x1 < x2; } } friend bool operator==(less const& x1, less const& x2) { return x1.tag_ == x2.tag_; } friend bool operator!=(less const& x1, less const& x2) { return x1.tag_ != x2.tag_; } }; class equal_to { int tag_; public: equal_to(int t = 0) : tag_(t) { UNORDERED_SCOPE(equal_to::equal_to()) { UNORDERED_EPOINT("Mock equal_to default constructor.") } } equal_to(equal_to const& x) : tag_(x.tag_) { UNORDERED_SCOPE(equal_to::equal_to(equal_to)) { UNORDERED_EPOINT("Mock equal_to copy constructor.") } } equal_to& operator=(equal_to const& x) { UNORDERED_SCOPE(equal_to::operator=(equal_to)) { UNORDERED_EPOINT("Mock equal_to assign operator 1.") tag_ = x.tag_; UNORDERED_EPOINT("Mock equal_to assign operator 2.") } return *this; } bool operator()(object const& x1, object const& x2) const { UNORDERED_SCOPE(equal_to::operator()(object, object)) { UNORDERED_EPOINT("Mock equal_to function.") } return equal_impl(x1, x2); } bool operator()(std::pair<object, object> const& x1, std::pair<object, object> const& x2) const { UNORDERED_SCOPE(equal_to::operator()( std::pair<object, object>, std::pair<object, object>)) { UNORDERED_EPOINT("Mock equal_to function.") } return equal_impl(x1.first, x2.first) && equal_impl(x1.second, x2.second); } bool equal_impl(object const& x1, object const& x2) const { switch (tag_) { case 1: return x1.tag1_ == x2.tag1_; case 2: return x1.tag2_ == x2.tag2_; default: return x1 == x2; } } friend bool operator==(equal_to const& x1, equal_to const& x2) { UNORDERED_SCOPE(operator==(equal_to, equal_to)) { UNORDERED_EPOINT("Mock equal_to equality function.") } return x1.tag_ == x2.tag_; } friend bool operator!=(equal_to const& x1, equal_to const& x2) { UNORDERED_SCOPE(operator!=(equal_to, equal_to)) { UNORDERED_EPOINT("Mock equal_to inequality function.") } return x1.tag_ != x2.tag_; } friend less create_compare(equal_to x) { return less(x.tag_); } #if defined(BOOST_UNORDERED_FOA_TESTS) friend void swap(equal_to&, equal_to&) noexcept; #endif }; #if defined(BOOST_UNORDERED_FOA_TESTS) void swap(equal_to& lhs, equal_to& rhs) noexcept { int tag = lhs.tag_; lhs.tag_ = rhs.tag_; rhs.tag_ = tag; } #endif template <class T> class allocator { public: int tag_; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef T const* const_pointer; typedef T& reference; typedef T const& const_reference; typedef T value_type; template <class U> struct rebind { typedef allocator<U> other; }; explicit allocator(int t = 0) : tag_(t) { UNORDERED_SCOPE(allocator::allocator()) { UNORDERED_EPOINT("Mock allocator default constructor.") } test::detail::tracker.allocator_ref(); } template <class Y> allocator(allocator<Y> const& x) : tag_(x.tag_) { test::detail::tracker.allocator_ref(); } allocator(allocator const& x) : tag_(x.tag_) { test::detail::tracker.allocator_ref(); } ~allocator() { test::detail::tracker.allocator_unref(); } allocator& operator=(allocator const& x) { tag_ = x.tag_; return *this; } // If address throws, then it can't be used in erase or the // destructor, which is very limiting. I need to check up on // this. pointer address(reference r) { // UNORDERED_SCOPE(allocator::address(reference)) { // UNORDERED_EPOINT("Mock allocator address function."); //} return pointer(&r); } const_pointer address(const_reference r) { // UNORDERED_SCOPE(allocator::address(const_reference)) { // UNORDERED_EPOINT("Mock allocator const address function."); //} return const_pointer(&r); } pointer allocate(size_type n) { T* ptr = 0; UNORDERED_SCOPE(allocator::allocate(size_type)) { UNORDERED_EPOINT("Mock allocator allocate function.") using namespace std; ptr = (T*)malloc(n * sizeof(T)); if (!ptr) throw std::bad_alloc(); } test::detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return pointer(ptr); // return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } pointer allocate(size_type n, void const*) { T* ptr = 0; UNORDERED_SCOPE(allocator::allocate(size_type, const_pointer)) { UNORDERED_EPOINT("Mock allocator allocate function.") using namespace std; ptr = (T*)malloc(n * sizeof(T)); if (!ptr) throw std::bad_alloc(); } test::detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return pointer(ptr); // return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } void deallocate(pointer p, size_type n) { //::operator delete((void*) p); if (p) { test::detail::tracker.track_deallocate((void*)p, n, sizeof(T), tag_); using namespace std; free(p); } } template <class U, class... Args> void construct(U* p, Args&&... args) { UNORDERED_SCOPE(allocator::construct(U*, Args&&...)) { UNORDERED_EPOINT("Mock allocator construct function.") new (p) U(std::forward<Args>(args)...); } test::detail::tracker.track_construct((void*)p, sizeof(U), tag_); } template <class U> void destroy(U* p) { test::detail::tracker.track_destroy((void*)p, sizeof(U), tag_); p->~U(); } size_type max_size() const { UNORDERED_SCOPE(allocator::construct(pointer, T)) { UNORDERED_EPOINT("Mock allocator max_size function.") } return (std::numeric_limits<std::size_t>::max)(); } typedef true_type propagate_on_container_copy_assignment; typedef true_type propagate_on_container_move_assignment; typedef true_type propagate_on_container_swap; }; template <class T> void swap(allocator<T>& x, allocator<T>& y) { std::swap(x.tag_, y.tag_); } // It's pretty much impossible to write a compliant swap when these // two can throw. So they don't. template <class T> inline bool operator==(allocator<T> const& x, allocator<T> const& y) { // UNORDERED_SCOPE(operator==(allocator, allocator)) { // UNORDERED_EPOINT("Mock allocator equality operator."); //} return x.tag_ == y.tag_; } template <class T> inline bool operator!=(allocator<T> const& x, allocator<T> const& y) { // UNORDERED_SCOPE(operator!=(allocator, allocator)) { // UNORDERED_EPOINT("Mock allocator inequality operator."); //} return x.tag_ != y.tag_; } template <class T> class allocator2 { public: int tag_; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef T const* const_pointer; typedef T& reference; typedef T const& const_reference; typedef T value_type; template <class U> struct rebind { typedef allocator2<U> other; }; explicit allocator2(int t = 0) : tag_(t) { UNORDERED_SCOPE(allocator2::allocator2()) { UNORDERED_EPOINT("Mock allocator2 default constructor.") } test::detail::tracker.allocator_ref(); } allocator2(allocator<T> const& x) : tag_(x.tag_) { test::detail::tracker.allocator_ref(); } template <class Y> allocator2(allocator2<Y> const& x) : tag_(x.tag_) { test::detail::tracker.allocator_ref(); } allocator2(allocator2 const& x) : tag_(x.tag_) { test::detail::tracker.allocator_ref(); } ~allocator2() { test::detail::tracker.allocator_unref(); } allocator2& operator=(allocator2 const&) { return *this; } // If address throws, then it can't be used in erase or the // destructor, which is very limiting. I need to check up on // this. pointer address(reference r) { // UNORDERED_SCOPE(allocator2::address(reference)) { // UNORDERED_EPOINT("Mock allocator2 address function."); //} return pointer(&r); } const_pointer address(const_reference r) { // UNORDERED_SCOPE(allocator2::address(const_reference)) { // UNORDERED_EPOINT("Mock allocator2 const address function."); //} return const_pointer(&r); } pointer allocate(size_type n) { T* ptr = 0; UNORDERED_SCOPE(allocator2::allocate(size_type)) { UNORDERED_EPOINT("Mock allocator2 allocate function.") using namespace std; ptr = (T*)malloc(n * sizeof(T)); if (!ptr) throw std::bad_alloc(); } test::detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return pointer(ptr); // return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } pointer allocate(size_type n, void const*) { T* ptr = 0; UNORDERED_SCOPE(allocator2::allocate(size_type, const_pointer)) { UNORDERED_EPOINT("Mock allocator2 allocate function.") using namespace std; ptr = (T*)malloc(n * sizeof(T)); if (!ptr) throw std::bad_alloc(); } test::detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return pointer(ptr); // return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } void deallocate(pointer p, size_type n) { //::operator delete((void*) p); if (p) { test::detail::tracker.track_deallocate((void*)p, n, sizeof(T), tag_); using namespace std; free(p); } } template <class U, class... Args> void construct(U* p, Args&&... args) { UNORDERED_SCOPE(allocator2::construct(U*, Args&&...)) { UNORDERED_EPOINT("Mock allocator2 construct function.") new (p) U(std::forward<Args>(args)...); } test::detail::tracker.track_construct((void*)p, sizeof(U), tag_); } template <class U> void destroy(U* p) { test::detail::tracker.track_destroy((void*)p, sizeof(U), tag_); p->~U(); } size_type max_size() const { UNORDERED_SCOPE(allocator2::construct(pointer, T)) { UNORDERED_EPOINT("Mock allocator2 max_size function.") } return (std::numeric_limits<std::size_t>::max)(); } typedef false_type propagate_on_container_copy_assignment; typedef false_type propagate_on_container_move_assignment; typedef false_type propagate_on_container_swap; }; template <class T> void swap(allocator2<T>& x, allocator2<T>& y) { std::swap(x.tag_, y.tag_); } // It's pretty much impossible to write a compliant swap when these // two can throw. So they don't. template <class T> inline bool operator==(allocator2<T> const& x, allocator2<T> const& y) { // UNORDERED_SCOPE(operator==(allocator2, allocator2)) { // UNORDERED_EPOINT("Mock allocator2 equality operator."); //} return x.tag_ == y.tag_; } template <class T> inline bool operator!=(allocator2<T> const& x, allocator2<T> const& y) { // UNORDERED_SCOPE(operator!=(allocator2, allocator2)) { // UNORDERED_EPOINT("Mock allocator2 inequality operator."); //} return x.tag_ != y.tag_; } } } namespace test { template <typename X> struct equals_to_compare; template <> struct equals_to_compare<test::exception::equal_to> { typedef test::exception::less type; }; } // Workaround for ADL deficient compilers #if defined(BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP) namespace test { test::exception::object generate( test::exception::object const* x, random_generator g) { return test::exception::generate(x, g); } std::pair<test::exception::object, test::exception::object> generate( std::pair<test::exception::object, test::exception::object> const* x, random_generator g) { return test::exception::generate(x, g); } } #endif #endif
0
repos/unordered/test
repos/unordered/test/objects/minimal.hpp
// Copyright 2006-2009 Daniel James. // Copyright 2022 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // Define some minimal classes which provide the bare minimum concepts to // test that the containers don't rely on something that they shouldn't. // They are not intended to be good examples of how to implement the concepts. #if !defined(BOOST_UNORDERED_OBJECTS_MINIMAL_HEADER) #define BOOST_UNORDERED_OBJECTS_MINIMAL_HEADER #include <boost/core/addressof.hpp> #include <boost/core/lightweight_test.hpp> #include <boost/core/pointer_traits.hpp> #include <cstddef> #include <utility> #if defined(BOOST_MSVC) #pragma warning(push) #pragma warning(disable : 4100) // unreferenced formal parameter #endif #if !BOOST_WORKAROUND(BOOST_MSVC, == 1500) #define BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED 1 #else #define BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED 0 #endif namespace test { namespace minimal { class destructible; class copy_constructible; class copy_constructible_equality_comparable; class default_assignable; class assignable; struct ampersand_operator_used { ampersand_operator_used() { BOOST_TEST(false); } }; template <class T> class hash; template <class T> class equal_to; template <class T> class ptr; template <class T> class const_ptr; template <class T> class allocator; template <class T> class cxx11_allocator; struct constructor_param { operator int() const { return 0; } }; class destructible { public: destructible(constructor_param const&) {} ~destructible() {} void dummy_member() const {} private: destructible(destructible const&); destructible& operator=(destructible const&); }; class copy_constructible { public: copy_constructible(constructor_param const&) {} copy_constructible(copy_constructible const&) {} ~copy_constructible() {} void dummy_member() const {} private: copy_constructible& operator=(copy_constructible const&); copy_constructible() {} }; class copy_constructible_equality_comparable { public: copy_constructible_equality_comparable(constructor_param const&) {} copy_constructible_equality_comparable( copy_constructible_equality_comparable const&) { } ~copy_constructible_equality_comparable() {} void dummy_member() const {} private: copy_constructible_equality_comparable& operator=( copy_constructible_equality_comparable const&); copy_constructible_equality_comparable() {} #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; bool operator==(copy_constructible_equality_comparable, copy_constructible_equality_comparable) { return true; } bool operator!=(copy_constructible_equality_comparable, copy_constructible_equality_comparable) { return false; } class default_assignable { public: default_assignable(constructor_param const&) {} default_assignable() {} default_assignable(default_assignable const&) {} default_assignable& operator=(default_assignable const&) { return *this; } ~default_assignable() {} void dummy_member() const {} #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; class assignable { public: assignable(constructor_param const&) {} assignable(assignable const&) {} assignable& operator=(assignable const&) { return *this; } ~assignable() {} void dummy_member() const {} private: assignable() {} #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; struct movable_init { }; class movable1 { public: movable1(constructor_param const&) {} movable1() {} explicit movable1(movable_init) {} movable1(movable1 const&) = delete; movable1& operator=(movable1 const&) = delete; movable1(movable1&&) {} movable1& operator=(movable1&&) { return *this; } ~movable1() {} void dummy_member() const {} }; class movable2 { public: movable2(constructor_param const&) {} explicit movable2(movable_init) {} movable2(movable2&&) {} ~movable2() {} movable2& operator=(movable2&&) { return *this; } void dummy_member() const {} private: movable2() {} movable2(movable2 const&); movable2& operator=(movable2 const&); }; template <class T> class hash { public: hash(constructor_param const&) {} hash() {} hash(hash const&) {} hash& operator=(hash const&) { return *this; } ~hash() {} #if defined(BOOST_UNORDERED_FOA_TESTS) hash(hash&&) = default; hash& operator=(hash&&) = default; #endif std::size_t operator()(T const&) const { return 0; } #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; template <class T> class equal_to { public: equal_to(constructor_param const&) {} equal_to() {} equal_to(equal_to const&) {} equal_to& operator=(equal_to const&) { return *this; } ~equal_to() {} #if defined(BOOST_UNORDERED_FOA_TESTS) equal_to(equal_to&&) = default; equal_to& operator=(equal_to&&) = default; #endif bool operator()(T const&, T const&) const { return true; } #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; template <class T> class ptr; template <class T> class const_ptr; struct void_ptr { #if !defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS) template <typename T> friend class ptr; private: #endif void* ptr_; public: void_ptr() : ptr_(0) {} template <typename T> explicit void_ptr(ptr<T> const& x) : ptr_(x.ptr_) {} // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(void_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(void_ptr const& x) const { return ptr_ != x.ptr_; } }; class void_const_ptr { #if !defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS) template <typename T> friend class const_ptr; private: #endif void* ptr_; public: void_const_ptr() : ptr_(0) {} template <typename T> explicit void_const_ptr(const_ptr<T> const& x) : ptr_(x.ptr_) { } // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(void_const_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(void_const_ptr const& x) const { return ptr_ != x.ptr_; } }; template <class T> class ptr { friend class allocator<T>; friend class const_ptr<T>; friend struct void_ptr; T* ptr_; ptr(T* x) : ptr_(x) {} public: ptr() : ptr_(0) {} ptr(std::nullptr_t) : ptr_(0) {} explicit ptr(void_ptr const& x) : ptr_((T*)x.ptr_) {} T& operator*() const { return *ptr_; } T* operator->() const { return ptr_; } ptr& operator++() { ++ptr_; return *this; } ptr operator++(int) { ptr tmp(*this); ++ptr_; return tmp; } ptr operator+(std::ptrdiff_t s) const { return ptr<T>(ptr_ + s); } friend ptr operator+(std::ptrdiff_t s, ptr p) { return ptr<T>(s + p.ptr_); } ptr& operator+=(std::ptrdiff_t s) { ptr_ += s; return *this; } ptr& operator-=(std::ptrdiff_t s) { ptr_ -= s; return *this; } std::ptrdiff_t operator-(ptr p) const { return ptr_ - p.ptr_; } ptr operator-(std::ptrdiff_t s) const { return ptr(ptr_ - s); } T& operator[](std::ptrdiff_t s) const { return ptr_[s]; } bool operator!() const { return !ptr_; } static ptr pointer_to(T& p) { return ptr(std::addressof(p)); } // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(ptr const& x) const { return ptr_ != x.ptr_; } bool operator==(std::nullptr_t) const { return ptr_ == nullptr; } bool operator!=(std::nullptr_t) const { return ptr_ != nullptr; } bool operator<(ptr const& x) const { return ptr_ < x.ptr_; } bool operator>(ptr const& x) const { return ptr_ > x.ptr_; } bool operator<=(ptr const& x) const { return ptr_ <= x.ptr_; } bool operator>=(ptr const& x) const { return ptr_ >= x.ptr_; } #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; template <class T> class const_ptr { friend class allocator<T>; friend struct const_void_ptr; T const* ptr_; const_ptr(T const* ptr) : ptr_(ptr) {} public: const_ptr() : ptr_(0) {} const_ptr(ptr<T> const& x) : ptr_(x.ptr_) {} explicit const_ptr(void_const_ptr const& x) : ptr_((T const*)x.ptr_) {} T const& operator*() const { return *ptr_; } T const* operator->() const { return ptr_; } const_ptr& operator++() { ++ptr_; return *this; } const_ptr operator++(int) { const_ptr tmp(*this); ++ptr_; return tmp; } const_ptr operator+(std::ptrdiff_t s) const { return const_ptr(ptr_ + s); } friend const_ptr operator+(std::ptrdiff_t s, const_ptr p) { return ptr<T>(s + p.ptr_); } T const& operator[](int s) const { return ptr_[s]; } bool operator!() const { return !ptr_; } operator bool() const { return !!ptr_; } bool operator==(const_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(const_ptr const& x) const { return ptr_ != x.ptr_; } bool operator<(const_ptr const& x) const { return ptr_ < x.ptr_; } bool operator>(const_ptr const& x) const { return ptr_ > x.ptr_; } bool operator<=(const_ptr const& x) const { return ptr_ <= x.ptr_; } bool operator>=(const_ptr const& x) const { return ptr_ >= x.ptr_; } #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; template <class T> class allocator { public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef void_ptr void_pointer; typedef void_const_ptr const_void_pointer; typedef ptr<T> pointer; typedef const_ptr<T> const_pointer; typedef T& reference; typedef T const& const_reference; typedef T value_type; template <class U> struct rebind { typedef allocator<U> other; }; allocator() {} template <class Y> allocator(allocator<Y> const&) {} allocator(allocator const&) {} ~allocator() {} pointer address(reference r) { return pointer(&r); } const_pointer address(const_reference r) { return const_pointer(&r); } pointer allocate(size_type n) { return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } template <class Y> pointer allocate(size_type n, const_ptr<Y>) { return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } void deallocate(pointer p, size_type) { ::operator delete((void*)p.ptr_); } template <class U, class... Args> void construct(U* p, Args&&... args) { new ((void*)p) U(std::forward<Args>(args)...); } template <class U> void destroy(U* p) { p->~U(); } size_type max_size() const { return 1000; } #if defined(BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP) || \ BOOST_WORKAROUND(BOOST_MSVC, <= 1300) public: allocator& operator=(allocator const&) { return *this; } #else private: allocator& operator=(allocator const&); #endif #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; template <class T> class allocator<T const> { public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef void_ptr void_pointer; typedef void_const_ptr const_void_pointer; // Maybe these two should be const_ptr<T> typedef ptr<T const> pointer; typedef const_ptr<T const> const_pointer; typedef T const& reference; typedef T const& const_reference; typedef T const value_type; template <class U> struct rebind { typedef allocator<U> other; }; allocator() {} template <class Y> allocator(allocator<Y> const&) {} allocator(allocator const&) {} ~allocator() {} const_pointer address(const_reference r) { return const_pointer(&r); } pointer allocate(size_type n) { return pointer(static_cast<T const*>(::operator new(n * sizeof(T)))); } template <class Y> pointer allocate(size_type n, const_ptr<Y>) { return pointer(static_cast<T const*>(::operator new(n * sizeof(T)))); } void deallocate(pointer p, size_type) { ::operator delete((void*)p.ptr_); } template <class U, class... Args> void construct(U* p, Args&&... args) { new (p) U(std::forward<Args>(args)...); } template <class U> void destroy(U* p) { p->~U(); } size_type max_size() const { return 1000; } #if defined(BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP) || \ BOOST_WORKAROUND(BOOST_MSVC, <= 1300) public: allocator& operator=(allocator const&) { return *this; } #else private: allocator& operator=(allocator const&); #endif #if BOOST_UNORDERED_CHECK_ADDR_OPERATOR_NOT_USED ampersand_operator_used operator&() const { return ampersand_operator_used(); } #endif }; template <class T> inline bool operator==(allocator<T> const&, allocator<T> const&) { return true; } template <class T> inline bool operator!=(allocator<T> const&, allocator<T> const&) { return false; } template <class T> void swap(allocator<T>&, allocator<T>&) {} // C++11 allocator // // Not a fully minimal C++11 allocator, just what I support. Hopefully will // cut down further in the future. template <class T> class cxx11_allocator { public: typedef T value_type; // template <class U> struct rebind { typedef cxx11_allocator<U> other; }; cxx11_allocator() {} template <class Y> cxx11_allocator(cxx11_allocator<Y> const&) {} cxx11_allocator(cxx11_allocator const&) {} ~cxx11_allocator() {} T* address(T& r) { return &r; } T const* address(T const& r) { return &r; } T* allocate(std::size_t n) { return static_cast<T*>(::operator new(n * sizeof(T))); } template <class Y> T* allocate(std::size_t n, const_ptr<Y>) { return static_cast<T*>(::operator new(n * sizeof(T))); } void deallocate(T* p, std::size_t) { ::operator delete((void*)p); } template <class U, class... Args> void construct(U* p, Args&&... args) { new ((void*)p) U(std::forward<Args>(args)...); } template <class U> void destroy(U* p) { p->~U(); } std::size_t max_size() const { return 1000u; } }; template <class T> inline bool operator==(cxx11_allocator<T> const&, cxx11_allocator<T> const&) { return true; } template <class T> inline bool operator!=(cxx11_allocator<T> const&, cxx11_allocator<T> const&) { return false; } template <class T> void swap(cxx11_allocator<T>&, cxx11_allocator<T>&) {} } } #if defined(BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP) namespace boost { #else namespace test { namespace minimal { #endif std::size_t hash_value(test::minimal::copy_constructible_equality_comparable) { return 1; } #if !defined(BOOST_NO_ARGUMENT_DEPENDENT_LOOKUP) } } #else } #endif #if defined(BOOST_MSVC) #pragma warning(pop) #endif namespace boost { template <> struct pointer_traits< ::test::minimal::void_ptr> { template <class U> struct rebind_to { typedef ::test::minimal::ptr<U> type; }; template<class U> using rebind=typename rebind_to<U>::type; }; } #endif
0
repos/unordered/test
repos/unordered/test/objects/test.hpp
// Copyright 2006-2009 Daniel James. // Copyright 2022 Christian Mazakas // Copyright 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #if !defined(BOOST_UNORDERED_TEST_OBJECTS_HEADER) #define BOOST_UNORDERED_TEST_OBJECTS_HEADER #include "../helpers/count.hpp" #include "../helpers/fwd.hpp" #include "../helpers/memory.hpp" #include <boost/config.hpp> #include <boost/core/serialization.hpp> #include <boost/limits.hpp> #include <cstddef> template <class T> struct allocator1; template <class T> struct allocator2; namespace test { // Note that the default hash function will work for any equal_to (but not // very well). class object; class movable; class implicitly_convertible; class hash; class less; class equal_to; template <class T> class allocator1; template <class T> class allocator2; object generate(object const*, random_generator); movable generate(movable const*, random_generator); implicitly_convertible generate( implicitly_convertible const*, random_generator); inline void ignore_variable(void const*) {} class object : private counted_object { friend class hash; friend class equal_to; friend class less; int tag1_, tag2_; public: explicit object(int t1 = 0, int t2 = 0) : tag1_(t1), tag2_(t2) {} ~object() { tag1_ = -1; tag2_ = -1; } friend bool operator==(object const& x1, object const& x2) { return x1.tag1_ == x2.tag1_ && x1.tag2_ == x2.tag2_; } friend bool operator!=(object const& x1, object const& x2) { return x1.tag1_ != x2.tag1_ || x1.tag2_ != x2.tag2_; } friend bool operator<(object const& x1, object const& x2) { return x1.tag1_ < x2.tag1_ || (x1.tag1_ == x2.tag1_ && x1.tag2_ < x2.tag2_); } friend object generate(object const*, random_generator g) { int* x = 0; return object(generate(x, g), generate(x, g)); } friend std::ostream& operator<<(std::ostream& out, object const& o) { return out << "(" << o.tag1_ << "," << o.tag2_ << ")"; } template<typename Archive> void serialize(Archive& ar,unsigned int) { ar & boost::core::make_nvp("tag1", tag1_); ar & boost::core::make_nvp("tag2", tag2_); } }; class movable : private counted_object { friend class hash; friend class equal_to; friend class less; int tag1_, tag2_; public: explicit movable(int t1 = 0, int t2 = 0) : tag1_(t1), tag2_(t2) {} movable(movable const& x) : counted_object(x), tag1_(x.tag1_), tag2_(x.tag2_) { BOOST_TEST(x.tag1_ != -1); } movable(movable&& x) : counted_object(x), tag1_(x.tag1_), tag2_(x.tag2_) { BOOST_TEST(x.tag1_ != -1); x.tag1_ = -1; x.tag2_ = -1; } movable& operator=(movable const& x) // Copy assignment { BOOST_TEST(x.tag1_ != -1); tag1_ = x.tag1_; tag2_ = x.tag2_; return *this; } movable& operator=(movable&& x) // Move assignment { BOOST_TEST(x.tag1_ != -1); tag1_ = x.tag1_; tag2_ = x.tag2_; x.tag1_ = -1; x.tag2_ = -1; return *this; } ~movable() { tag1_ = -1; tag2_ = -1; } friend bool operator==(movable const& x1, movable const& x2) { BOOST_TEST(x1.tag1_ != -1 && x2.tag1_ != -1); return x1.tag1_ == x2.tag1_ && x1.tag2_ == x2.tag2_; } friend bool operator!=(movable const& x1, movable const& x2) { BOOST_TEST(x1.tag1_ != -1 && x2.tag1_ != -1); return x1.tag1_ != x2.tag1_ || x1.tag2_ != x2.tag2_; } friend bool operator<(movable const& x1, movable const& x2) { BOOST_TEST(x1.tag1_ != -1 && x2.tag1_ != -1); return x1.tag1_ < x2.tag1_ || (x1.tag1_ == x2.tag1_ && x1.tag2_ < x2.tag2_); } friend movable generate(movable const*, random_generator g) { int* x = 0; return movable(generate(x, g), generate(x, g)); } friend std::ostream& operator<<(std::ostream& out, movable const& o) { return out << "(" << o.tag1_ << "," << o.tag2_ << ")"; } }; class implicitly_convertible : private counted_object { int tag1_, tag2_; public: explicit implicitly_convertible(int t1 = 0, int t2 = 0) : tag1_(t1), tag2_(t2) { } operator object() const { return object(tag1_, tag2_); } operator movable() const { return movable(tag1_, tag2_); } friend implicitly_convertible generate( implicitly_convertible const*, random_generator g) { int* x = 0; return implicitly_convertible(generate(x, g), generate(x, g)); } friend std::ostream& operator<<( std::ostream& out, implicitly_convertible const& o) { return out << "(" << o.tag1_ << "," << o.tag2_ << ")"; } }; // Note: This is a deliberately bad hash function. class hash BOOST_FINAL { int type_; public: hash() : type_(0) {} explicit hash(int t) : type_(t) {} std::size_t operator()(object const& x) const { unsigned result; switch (type_) { case 1: result = static_cast<unsigned>(x.tag1_); break; case 2: result = static_cast<unsigned>(x.tag2_); break; default: result = static_cast<unsigned>(x.tag1_) + static_cast<unsigned>(x.tag2_); } return result; } std::size_t operator()(movable const& x) const { unsigned result; switch (type_) { case 1: result = static_cast<unsigned>(x.tag1_); break; case 2: result = static_cast<unsigned>(x.tag2_); break; default: result = static_cast<unsigned>(x.tag1_) + static_cast<unsigned>(x.tag2_); } return result; } std::size_t operator()(int x) const { unsigned result; switch (type_) { case 1: result = static_cast<unsigned>(x); break; case 2: result = static_cast<unsigned>(x) * 7; break; default: result = static_cast<unsigned>(x) * 256; } return result; } friend bool operator==(hash const& x1, hash const& x2) { return x1.type_ == x2.type_; } friend bool operator!=(hash const& x1, hash const& x2) { return x1.type_ != x2.type_; } }; std::size_t hash_value(test::object const& x) { return hash()(x); } std::size_t hash_value(test::movable const& x) { return hash()(x); } class less { int type_; public: explicit less(int t = 0) : type_(t) {} bool operator()(object const& x1, object const& x2) const { switch (type_) { case 1: return x1.tag1_ < x2.tag1_; case 2: return x1.tag2_ < x2.tag2_; default: return x1 < x2; } } bool operator()(movable const& x1, movable const& x2) const { switch (type_) { case 1: return x1.tag1_ < x2.tag1_; case 2: return x1.tag2_ < x2.tag2_; default: return x1 < x2; } } bool operator()(int x1, int x2) const { return x1 < x2; } friend bool operator==(less const& x1, less const& x2) { return x1.type_ == x2.type_; } }; class equal_to BOOST_FINAL { int type_; public: equal_to() : type_(0) {} explicit equal_to(int t) : type_(t) {} bool operator()(object const& x1, object const& x2) const { switch (type_) { case 1: return x1.tag1_ == x2.tag1_; case 2: return x1.tag2_ == x2.tag2_; default: return x1 == x2; } } bool operator()(movable const& x1, movable const& x2) const { switch (type_) { case 1: return x1.tag1_ == x2.tag1_; case 2: return x1.tag2_ == x2.tag2_; default: return x1 == x2; } } bool operator()(int x1, int x2) const { return x1 == x2; } friend bool operator==(equal_to const& x1, equal_to const& x2) { return x1.type_ == x2.type_; } friend bool operator!=(equal_to const& x1, equal_to const& x2) { return x1.type_ != x2.type_; } friend less create_compare(equal_to x) { return less(x.type_); } }; // allocator1 only has the old fashioned 'construct' method and has // a few less typedefs. allocator2 uses a custom pointer class. template <class T> class allocator1 { public: int tag_; typedef T value_type; template <class U> struct rebind { typedef allocator1<U> other; }; allocator1() : tag_(0) { detail::tracker.allocator_ref(); } explicit allocator1(int t) : tag_(t) { detail::tracker.allocator_ref(); } template <class Y> allocator1(allocator1<Y> const& x) : tag_(x.tag_) { detail::tracker.allocator_ref(); } allocator1(allocator1 const& x) : tag_(x.tag_) { detail::tracker.allocator_ref(); } ~allocator1() { detail::tracker.allocator_unref(); } T* allocate(std::size_t n) { T* ptr(static_cast<T*>(::operator new(n * sizeof(T)))); detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return ptr; } T* allocate(std::size_t n, void const*) { T* ptr(static_cast<T*>(::operator new(n * sizeof(T)))); detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return ptr; } void deallocate(T* p, std::size_t n) { detail::tracker.track_deallocate((void*)p, n, sizeof(T), tag_); ::operator delete((void*)p); } template <typename U, typename... Args> void construct(U* p, Args&&... args) { detail::tracker.track_construct((void*)p, sizeof(U), tag_); new (p) U(std::forward<Args>(args)...); } template <typename U> void destroy(U* p) { detail::tracker.track_destroy((void*)p, sizeof(U), tag_); p->~U(); // Work around MSVC buggy unused parameter warning. ignore_variable(&p); } bool operator==(allocator1 const& x) const { return tag_ == x.tag_; } bool operator!=(allocator1 const& x) const { return tag_ != x.tag_; } enum { is_select_on_copy = false, is_propagate_on_swap = false, is_propagate_on_assign = false, is_propagate_on_move = false }; }; template <class T> class ptr; template <class T> class const_ptr; struct void_ptr { #if !defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS) template <typename T> friend class ptr; private: #endif void* ptr_; public: void_ptr() : ptr_(0) {} template <typename T> explicit void_ptr(ptr<T> const& x) : ptr_(x.ptr_) {} // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(void_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(void_ptr const& x) const { return ptr_ != x.ptr_; } }; class void_const_ptr { #if !defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS) template <typename T> friend class const_ptr; private: #endif void* ptr_; public: void_const_ptr() : ptr_(0) {} template <typename T> explicit void_const_ptr(const_ptr<T> const& x) : ptr_(x.ptr_) { } // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(void_const_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(void_const_ptr const& x) const { return ptr_ != x.ptr_; } }; template <class T> class ptr { friend struct ::allocator1<T>; friend struct ::allocator2<T>; friend class allocator2<T>; friend class const_ptr<T>; friend struct void_ptr; T* ptr_; ptr(T* x) : ptr_(x) {} public: ptr() : ptr_(0) {} ptr(std::nullptr_t) : ptr_(nullptr) {} explicit ptr(void_ptr const& x) : ptr_((T*)x.ptr_) {} T& operator*() const { return *ptr_; } T* operator->() const { return ptr_; } ptr& operator++() { ++ptr_; return *this; } ptr operator++(int) { ptr tmp(*this); ++ptr_; return tmp; } ptr operator+(std::ptrdiff_t s) const { return ptr<T>(ptr_ + s); } friend ptr operator+(std::ptrdiff_t s, ptr p) { return ptr<T>(s + p.ptr_); } std::ptrdiff_t operator-(ptr p) const { return ptr_ - p.ptr_; } ptr operator-(std::ptrdiff_t s) const { return ptr(ptr_ - s); } ptr& operator+=(std::ptrdiff_t s) { ptr_ += s; return *this; } ptr& operator-=(std::ptrdiff_t s) { ptr_ -= s; return *this; } T& operator[](std::ptrdiff_t s) const { return ptr_[s]; } bool operator!() const { return !ptr_; } static ptr pointer_to(T& p) { return ptr(&p); } // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(ptr const& x) const { return ptr_ != x.ptr_; } bool operator<(ptr const& x) const { return ptr_ < x.ptr_; } bool operator>(ptr const& x) const { return ptr_ > x.ptr_; } bool operator<=(ptr const& x) const { return ptr_ <= x.ptr_; } bool operator>=(ptr const& x) const { return ptr_ >= x.ptr_; } }; template <class T> class const_ptr { friend class allocator2<T>; friend struct const_void_ptr; T const* ptr_; const_ptr(T const* ptr) : ptr_(ptr) {} public: const_ptr() : ptr_(0) {} const_ptr(ptr<T> const& x) : ptr_(x.ptr_) {} explicit const_ptr(void_const_ptr const& x) : ptr_((T const*)x.ptr_) {} T const& operator*() const { return *ptr_; } T const* operator->() const { return ptr_; } const_ptr& operator++() { ++ptr_; return *this; } const_ptr operator++(int) { const_ptr tmp(*this); ++ptr_; return tmp; } const_ptr operator+(std::ptrdiff_t s) const { return const_ptr(ptr_ + s); } friend const_ptr operator+(std::ptrdiff_t s, const_ptr p) { return ptr<T>(s + p.ptr_); } T const& operator[](int s) const { return ptr_[s]; } bool operator!() const { return !ptr_; } operator bool() const { return !!ptr_; } bool operator==(const_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(const_ptr const& x) const { return ptr_ != x.ptr_; } bool operator<(const_ptr const& x) const { return ptr_ < x.ptr_; } bool operator>(const_ptr const& x) const { return ptr_ > x.ptr_; } bool operator<=(const_ptr const& x) const { return ptr_ <= x.ptr_; } bool operator>=(const_ptr const& x) const { return ptr_ >= x.ptr_; } }; template <class T> class allocator2 { #ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS public: #else template <class> friend class allocator2; #endif int tag_; public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef void_ptr void_pointer; typedef void_const_ptr const_void_pointer; typedef ptr<T> pointer; typedef const_ptr<T> const_pointer; typedef T& reference; typedef T const& const_reference; typedef T value_type; template <class U> struct rebind { typedef allocator2<U> other; }; allocator2() : tag_(0) { detail::tracker.allocator_ref(); } explicit allocator2(int t) : tag_(t) { detail::tracker.allocator_ref(); } template <class Y> allocator2(allocator2<Y> const& x) : tag_(x.tag_) { detail::tracker.allocator_ref(); } allocator2(allocator2 const& x) : tag_(x.tag_) { detail::tracker.allocator_ref(); } ~allocator2() { detail::tracker.allocator_unref(); } pointer address(reference r) { return pointer(&r); } const_pointer address(const_reference r) { return const_pointer(&r); } pointer allocate(size_type n) { pointer p(static_cast<T*>(::operator new(n * sizeof(T)))); detail::tracker.track_allocate((void*)p.ptr_, n, sizeof(T), tag_); return p; } pointer allocate(size_type n, void const*) { pointer ptr(static_cast<T*>(::operator new(n * sizeof(T)))); detail::tracker.track_allocate((void*)ptr, n, sizeof(T), tag_); return ptr; } void deallocate(pointer p, size_type n) { detail::tracker.track_deallocate((void*)p.ptr_, n, sizeof(T), tag_); ::operator delete((void*)p.ptr_); } template <class U, class... Args> void construct(U* p, Args&&... args) { detail::tracker.track_construct((void*)p, sizeof(U), tag_); new (p) U(std::forward<Args>(args)...); } template <class U> void destroy(U* p) { detail::tracker.track_destroy((void*)p, sizeof(U), tag_); p->~U(); } size_type max_size() const { return (std::numeric_limits<size_type>::max)(); } bool operator==(allocator2 const& x) const { return tag_ == x.tag_; } bool operator!=(allocator2 const& x) const { return tag_ != x.tag_; } enum { is_select_on_copy = false, is_propagate_on_swap = false, is_propagate_on_assign = false, is_propagate_on_move = false }; }; template <class T> bool equivalent_impl( allocator1<T> const& x, allocator1<T> const& y, test::derived_type) { return x == y; } template <class T> bool equivalent_impl( allocator2<T> const& x, allocator2<T> const& y, test::derived_type) { return x == y; } } namespace boost { template <> struct pointer_traits< ::test::void_ptr> { template <class U> struct rebind_to { typedef ::test::ptr<U> type; }; template<class U> using rebind=typename rebind_to<U>::type; }; } // namespace boost #endif
0
repos/unordered/test
repos/unordered/test/exception/less_tests.cpp
// Copyright 2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/helpers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/strong.hpp" #include "../helpers/tracker.hpp" #include <vector> UNORDERED_AUTO_TEST (less_osx_regression) { DISABLE_EXCEPTIONS; typedef test_pair_set::value_type value_type; typedef test::exception::object object; std::vector<value_type> v; v.push_back(value_type(object(12, 98), object(88, 13))); v.push_back(value_type(object(24, 71), object(62, 84))); v.push_back(value_type(object(30, 0), object(5, 73))); v.push_back(value_type(object(34, 64), object(79, 58))); v.push_back(value_type(object(36, 95), object(64, 23))); v.push_back(value_type(object(42, 89), object(68, 44))); v.push_back(value_type(object(42, 26), object(93, 64))); v.push_back(value_type(object(86, 86), object(16, 62))); v.push_back(value_type(object(86, 86), object(75, 23))); v.push_back(value_type(object(92, 37), object(41, 90))); BOOST_TEST_EQ(v.size(), 10u); std::set<value_type, test::exception::less> s; s.insert(v.begin(), v.end()); BOOST_TEST_EQ(s.size(), v.size()); test::ordered<test_pair_set> tracker; test_pair_set x; for (std::vector<value_type>::iterator it = v.begin(); it != v.end(); ++it) { x.insert(*it); } tracker.insert(v.begin(), v.end()); tracker.compare(x); } RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/swap_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Copyright 2022 Christian Mazakas. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #define BOOST_ENABLE_ASSERT_HANDLER #include <boost/assert.hpp> #include "./containers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/tracker.hpp" #include "../objects/test.hpp" #include <sstream> namespace boost { void assertion_failed( char const* expr, char const* function, char const* file, long line) { std::stringstream ss; ss << expr << "\nin " << function << " failed at : " << file << ", line " << line; throw std::runtime_error(ss.str()); } void assertion_failed_msg(char const* expr, char const* msg, char const* function, char const* file, long line) { std::stringstream ss; ss << expr << "\nin " << function << " failed at : " << file << ", line " << line << "\n" << msg; throw std::runtime_error(ss.str()); } } // namespace boost #if defined(BOOST_MSVC) #pragma warning(disable : 4512) // assignment operator could not be generated #endif test::seed_t initialize_seed(9387); template <class T> struct self_swap_base : public test::exception_base { test::random_values<T> values; self_swap_base(std::size_t count = 0) : values(count, test::limited_range) {} typedef T data_type; T init() const { return T(values.begin(), values.end()); } void run(T& x) const { x.swap(x); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } void check BOOST_PREVENT_MACRO_SUBSTITUTION(T const& x) const { (void)x; BOOST_ERROR("An exception leaked when it should not have. Allocator " "equality assertion must precede all other ops"); } }; template <class T> struct self_swap_test1 : self_swap_base<T> { }; template <class T> struct self_swap_test2 : self_swap_base<T> { self_swap_test2() : self_swap_base<T>(100) {} }; template <class T> struct swap_base : public test::exception_base { const test::random_values<T> x_values, y_values; const T initial_x, initial_y; typedef typename T::hasher hasher; typedef typename T::key_equal key_equal; typedef typename T::allocator_type allocator_type; swap_base(unsigned int count1, unsigned int count2, int tag1, int tag2) : x_values(count1, test::limited_range), y_values(count2, test::limited_range), initial_x(x_values.begin(), x_values.end(), 0, hasher(tag1), key_equal(tag1), allocator_type(tag1)), initial_y(y_values.begin(), y_values.end(), 0, hasher(tag2), key_equal(tag2), allocator_type(T::allocator_type::propagate_on_container_swap::value ? tag2 : tag1)) { } struct data_type { data_type(T const& x_, T const& y_) : x(x_), y(y_) {} T x, y; }; data_type init() const { return data_type(initial_x, initial_y); } void run(data_type& d) const { try { d.x.swap(d.y); } catch (std::runtime_error&) { } DISABLE_EXCEPTIONS; test::check_container(d.x, this->y_values); test::check_equivalent_keys(d.x); test::check_container(d.y, this->x_values); test::check_equivalent_keys(d.y); } void check BOOST_PREVENT_MACRO_SUBSTITUTION(data_type const& d) const { std::string scope(test::scope); // TODO: In C++11 exceptions are only allowed in the swap function. BOOST_TEST(scope == "hash::hash(hash)" || scope == "hash::operator=(hash)" || scope == "equal_to::equal_to(equal_to)" || scope == "equal_to::operator=(equal_to)"); test::check_equivalent_keys(d.x); test::check_equivalent_keys(d.y); } }; template <class T> struct swap_test1 : swap_base<T> { swap_test1() : swap_base<T>(0, 0, 0, 0) {} }; template <class T> struct swap_test2 : swap_base<T> { swap_test2() : swap_base<T>(60, 0, 0, 0) {} }; template <class T> struct swap_test3 : swap_base<T> { swap_test3() : swap_base<T>(0, 60, 0, 0) {} }; template <class T> struct swap_test4 : swap_base<T> { swap_test4() : swap_base<T>(10, 10, 1, 2) {} }; template <class T> struct unequal_alloc_swap_base : public test::exception_base { const test::random_values<T> x_values, y_values; const T initial_x, initial_y; typedef typename T::hasher hasher; typedef typename T::key_equal key_equal; typedef typename T::allocator_type allocator_type; unequal_alloc_swap_base(unsigned int count1, unsigned int count2) : x_values(count1, test::limited_range), y_values(count2, test::limited_range), initial_x(x_values.begin(), x_values.end(), 0, allocator_type(1337)), initial_y(y_values.begin(), y_values.end(), 0, allocator_type(7331)) { } struct data_type { data_type(T const& x_, T const& y_) : x(x_), y(y_) {} T x, y; }; data_type init() const { return data_type(initial_x, initial_y); } void run(data_type& d) const { bool assert_threw = false; BOOST_TEST(d.x.get_allocator() != d.y.get_allocator()); try { d.x.swap(d.y); } catch (std::runtime_error&) { assert_threw = true; } DISABLE_EXCEPTIONS; BOOST_TEST(assert_threw); test::check_container(d.x, this->x_values); test::check_equivalent_keys(d.x); test::check_container(d.y, this->y_values); test::check_equivalent_keys(d.y); } void check BOOST_PREVENT_MACRO_SUBSTITUTION(data_type const& d) const { std::string scope(test::scope); // TODO: In C++11 exceptions are only allowed in the swap function. BOOST_TEST(scope == "hash::hash(hash)" || scope == "hash::operator=(hash)" || scope == "equal_to::equal_to(equal_to)" || scope == "equal_to::operator=(equal_to)"); test::check_equivalent_keys(d.x); test::check_equivalent_keys(d.y); } }; template <class T> struct unequal_alloc_swap_test1 : unequal_alloc_swap_base<T> { unequal_alloc_swap_test1() : unequal_alloc_swap_base<T>(0, 0) {} }; template <class T> struct unequal_alloc_swap_test2 : unequal_alloc_swap_base<T> { unequal_alloc_swap_test2() : unequal_alloc_swap_base<T>(0, 10) {} }; template <class T> struct unequal_alloc_swap_test3 : unequal_alloc_swap_base<T> { unequal_alloc_swap_test3() : unequal_alloc_swap_base<T>(10, 0) {} }; template <class T> struct unequal_alloc_swap_test4 : unequal_alloc_swap_base<T> { unequal_alloc_swap_test4() : unequal_alloc_swap_base<T>(10, 10) {} }; #if defined(BOOST_UNORDERED_FOA_TESTS) using unordered_flat_set = boost::unordered_flat_set<int, boost::hash<int>, std::equal_to<int>, test::allocator1<int> >; using unordered_flat_map = boost::unordered_flat_map<int, int, boost::hash<int>, std::equal_to<int>, test::allocator1<std::pair<int const, int> > >; using unordered_node_set = boost::unordered_node_set<int, boost::hash<int>, std::equal_to<int>, test::allocator1<int> >; using unordered_node_map = boost::unordered_node_map<int, int, boost::hash<int>, std::equal_to<int>, test::allocator1<std::pair<int const, int> > >; #define SWAP_CONTAINER_SEQ \ (unordered_flat_set)(unordered_flat_map) \ (unordered_node_set)(unordered_node_map) #else typedef boost::unordered_set<int, boost::hash<int>, std::equal_to<int>, test::allocator1<int> > unordered_set; typedef boost::unordered_map<int, int, boost::hash<int>, std::equal_to<int>, test::allocator1<std::pair<int const, int> > > unordered_map; typedef boost::unordered_multiset<int, boost::hash<int>, std::equal_to<int>, test::allocator1<int> > unordered_multiset; typedef boost::unordered_multimap<int, int, boost::hash<int>, std::equal_to<int>, test::allocator1<std::pair<int const, int> > > unordered_multimap; #define SWAP_CONTAINER_SEQ \ (unordered_set)(unordered_map)(unordered_multiset)(unordered_multimap) #endif // FOA containers deliberately choose to not offer the strong exception // guarantee so we can't reliably test what happens if swapping one of the data // members throws // // clang-format off #if !defined(BOOST_UNORDERED_FOA_TESTS) EXCEPTION_TESTS( (self_swap_test1)(self_swap_test2) (swap_test1)(swap_test2)(swap_test3)(swap_test4), CONTAINER_SEQ) #endif // want to prove that when assertions are defined as throwing operations that we // uphold invariants EXCEPTION_TESTS( (unequal_alloc_swap_test1)(unequal_alloc_swap_test2) (unequal_alloc_swap_test3)(unequal_alloc_swap_test4), SWAP_CONTAINER_SEQ) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/constructor_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/input_iterator.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/tracker.hpp" template <typename T> inline void avoid_unused_warning(T const&) {} test::seed_t initialize_seed(91274); struct objects { test::exception::object obj; test::exception::hash hash; test::exception::equal_to equal_to; test::exception::allocator<test::exception::object> allocator; }; template <class T> struct construct_test1 : public objects, test::exception_base { void run() const { T x; DISABLE_EXCEPTIONS; BOOST_TEST(x.empty()); test::check_equivalent_keys(x); } }; template <class T> struct construct_test2 : public objects, test::exception_base { void run() const { T x(300); DISABLE_EXCEPTIONS; BOOST_TEST(x.empty()); test::check_equivalent_keys(x); } }; template <class T> struct construct_test3 : public objects, test::exception_base { void run() const { T x(0, hash); DISABLE_EXCEPTIONS; BOOST_TEST(x.empty()); test::check_equivalent_keys(x); } }; template <class T> struct construct_test4 : public objects, test::exception_base { void run() const { T x(0, hash, equal_to); DISABLE_EXCEPTIONS; BOOST_TEST(x.empty()); test::check_equivalent_keys(x); } }; template <class T> struct construct_test5 : public objects, test::exception_base { void run() const { T x(50, hash, equal_to, allocator); DISABLE_EXCEPTIONS; BOOST_TEST(x.empty()); test::check_equivalent_keys(x); } }; template <class T> struct construct_test6 : public objects, test::exception_base { void run() const { T x(allocator); DISABLE_EXCEPTIONS; BOOST_TEST(x.empty()); test::check_equivalent_keys(x); } }; template <class T> struct range : public test::exception_base { test::random_values<T> values; range() : values(5, test::limited_range) {} range(unsigned int count) : values(count, test::limited_range) {} }; template <class T> struct range_construct_test1 : public range<T>, objects { void run() const { T x(this->values.begin(), this->values.end()); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct range_construct_test2 : public range<T>, objects { void run() const { T x(this->values.begin(), this->values.end(), 0); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct range_construct_test3 : public range<T>, objects { void run() const { T x(this->values.begin(), this->values.end(), 0, hash); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct range_construct_test4 : public range<T>, objects { void run() const { T x(this->values.begin(), this->values.end(), 100, hash, equal_to); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; // Need to run at least one test with a fairly large number // of objects in case it triggers a rehash. template <class T> struct range_construct_test5 : public range<T>, objects { range_construct_test5() : range<T>(60) {} void run() const { T x(this->values.begin(), this->values.end(), 0, hash, equal_to, allocator); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct input_range_construct_test : public range<T>, objects { input_range_construct_test() : range<T>(60) {} void run() const { typename test::random_values<T>::const_iterator begin = this->values.begin(), end = this->values.end(); T x(test::input_iterator(begin), test::input_iterator(end), 0, hash, equal_to, allocator); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct copy_range_construct_test : public range<T>, objects { copy_range_construct_test() : range<T>(60) {} void run() const { T x(test::copy_iterator(this->values.begin()), test::copy_iterator(this->values.end()), 0, hash, equal_to, allocator); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; // clang-format off EXCEPTION_TESTS( (construct_test1)(construct_test2)(construct_test3)(construct_test4) (construct_test5)(construct_test6)(range_construct_test1) (range_construct_test2)(range_construct_test3)(range_construct_test4) (range_construct_test5)(input_range_construct_test) (copy_range_construct_test), CONTAINER_SEQ) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/move_assign_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/tracker.hpp" #if defined(BOOST_MSVC) #pragma warning( \ disable : 4512) // move_assignment operator could not be generated #endif test::seed_t initialize_seed(12847); template <class T> struct move_assign_base : public test::exception_base { test::random_values<T> x_values, y_values; T x, y; int t1, t2; typedef typename T::hasher hasher; typedef typename T::key_equal key_equal; typedef typename T::allocator_type allocator_type; move_assign_base(int tag1, int tag2, float mlf1 = 1.0, float mlf2 = 1.0) : x_values(), y_values(), x(0, hasher(tag1), key_equal(tag1), allocator_type(tag1)), y(0, hasher(tag2), key_equal(tag2), allocator_type(tag2)), t1(tag1), t2(tag2) { x.max_load_factor(mlf1); y.max_load_factor(mlf2); } typedef T data_type; T init() const { return T(x); } void run(T& x1) const { test::exceptions_enable disable_exceptions(false); T y1 = y; disable_exceptions.release(); x1 = std::move(y1); DISABLE_EXCEPTIONS; test::check_container(x1, y_values); test::check_equivalent_keys(x1); } void check BOOST_PREVENT_MACRO_SUBSTITUTION(T const& x1) const { test::check_equivalent_keys(x1); if (x1.hash_function() == hasher(t1)) { BOOST_TEST(x1.key_eq() == key_equal(t1)); } if (x1.hash_function() == hasher(t2)) { BOOST_TEST(x1.key_eq() == key_equal(t2)); } if (x1.key_eq() == key_equal(t1)) { BOOST_TEST(x1.hash_function() == hasher(t1)); } if (x1.key_eq() == key_equal(t2)) { BOOST_TEST(x1.hash_function() == hasher(t2)); } // If the container is empty at the point of the exception, the // internal structure is hidden, this exposes it, at the cost of // messing up the data. if (x_values.size()) { T& x2 = const_cast<T&>(x1); x2.emplace(*x_values.begin()); test::check_equivalent_keys(x2); } } }; template <class T> struct move_assign_values : move_assign_base<T> { move_assign_values(unsigned int count1, unsigned int count2, int tag1, int tag2, float mlf1 = 1.0, float mlf2 = 1.0) : move_assign_base<T>(tag1, tag2, mlf1, mlf2) { this->x_values.fill(count1, test::limited_range); this->y_values.fill(count2, test::limited_range); this->x.insert(this->x_values.begin(), this->x_values.end()); this->y.insert(this->y_values.begin(), this->y_values.end()); } }; template <class T> struct move_assign_test1 : move_assign_values<T> { move_assign_test1() : move_assign_values<T>(0, 0, 0, 0) {} }; template <class T> struct move_assign_test2 : move_assign_values<T> { move_assign_test2() : move_assign_values<T>(60, 0, 0, 0) {} }; template <class T> struct move_assign_test3 : move_assign_values<T> { move_assign_test3() : move_assign_values<T>(0, 60, 0, 0) {} }; template <class T> struct move_assign_test4 : move_assign_values<T> { move_assign_test4() : move_assign_values<T>(10, 10, 1, 2) {} }; template <class T> struct move_assign_test4a : move_assign_values<T> { move_assign_test4a() : move_assign_values<T>(10, 100, 1, 2) {} }; template <class T> struct move_assign_test5 : move_assign_values<T> { move_assign_test5() : move_assign_values<T>(5, 60, 0, 0, 1.0f, 0.1f) {} }; template <class T> struct equivalent_test1 : move_assign_base<T> { equivalent_test1() : move_assign_base<T>(0, 0) { test::random_values<T> x_values2(10, test::limited_range); this->x_values.insert(x_values2.begin(), x_values2.end()); this->x_values.insert(x_values2.begin(), x_values2.end()); test::random_values<T> y_values2(10, test::limited_range); this->y_values.insert(y_values2.begin(), y_values2.end()); this->y_values.insert(y_values2.begin(), y_values2.end()); this->x.insert(this->x_values.begin(), this->x_values.end()); this->y.insert(this->y_values.begin(), this->y_values.end()); } }; // clang-format off EXCEPTION_TESTS( (move_assign_test1)(move_assign_test2)(move_assign_test3) (move_assign_test4)(move_assign_test4a)(move_assign_test5) (equivalent_test1), CONTAINER_SEQ) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/insert_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Copyright 2022-2023 Christian Mazakas. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/helpers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/strong.hpp" #include "../helpers/tracker.hpp" #include <boost/tuple/tuple.hpp> #include <cmath> #include <string> test::seed_t initialize_seed(747373); // Fill in a container so that it's about to rehash template <typename T> void rehash_prep(T& x) { using namespace std; typedef typename T::size_type size_type; x.max_load_factor(0.25); size_type bucket_count = x.bucket_count(); size_type initial_elements = static_cast<size_type>( ceil((double)bucket_count * (double)x.max_load_factor()) - 1); test::random_values<T> v(initial_elements); x.insert(v.begin(), v.end()); BOOST_TEST(bucket_count == x.bucket_count()); } // Overload to generate inserters that need type information. template <typename Inserter, typename T> Inserter generate(Inserter inserter, T&) { return inserter; } // Get the iterator returned from an insert/emplace. template <typename T> T get_iterator(T const& x) { return x; } template <typename T> T get_iterator(std::pair<T, bool> const& x) { return x.first; } // Generic insert exception test for typical single element inserts.. template <typename T, typename Inserter, typename Values> void insert_exception_test_impl(T x, Inserter insert, Values const& v) { test::strong<T> strong; test::ordered<T> tracker; tracker.insert(x.begin(), x.end()); try { ENABLE_EXCEPTIONS; for (typename Values::const_iterator it = v.begin(); it != v.end(); ++it) { strong.store(x, test::detail::tracker.count_allocations); insert(x, it); } } catch (...) { test::check_equivalent_keys(x); insert.exception_check(x, strong); throw; } test::check_equivalent_keys(x); insert.track(tracker, v.begin(), v.end()); tracker.compare(x); } // Simple insert exception test template <typename T, typename Inserter> void insert_exception_test(T*, Inserter insert, test::random_generator gen) { for (int i = 0; i < 5; ++i) { test::random_values<T> v(10, gen); T x; EXCEPTION_LOOP(insert_exception_test_impl(x, generate(insert, x), v)) } } // Insert into a container which is about to hit its max load, so that it // rehashes. template <typename T, typename Inserter> void insert_rehash_exception_test( T*, Inserter insert, test::random_generator gen) { for (int i = 0; i < 5; ++i) { T x(1); rehash_prep(x); test::random_values<T> v2(5, gen); EXCEPTION_LOOP(insert_exception_test_impl(x, generate(insert, x), v2)) } } // Various methods for inserting a single element struct inserter_base { template <typename T> void exception_check(T& x, test::strong<T>& strong) { std::string scope(test::scope); if (scope.find("hash::operator()") == std::string::npos) strong.test(x, test::detail::tracker.count_allocations); } template <typename T, typename Iterator> void track(T& tracker, Iterator begin, Iterator end) { tracker.insert(begin, end); } }; struct insert_lvalue_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.insert(*it); } } insert_lvalue; struct insert_lvalue_begin_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.insert(x.begin(), *it); } } insert_lvalue_begin; struct insert_lvalue_end_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.insert(x.end(), *it); } } insert_lvalue_end; template <typename T> struct insert_lvalue_pos_type_impl : inserter_base { typename T::iterator pos; insert_lvalue_pos_type_impl(T& x) : pos(x.begin()) {} template <typename Iterator> void operator()(T& x, Iterator it) { pos = get_iterator(x.insert(pos, *it)); } }; struct insert_lvalue_pos_type { template <typename T> friend insert_lvalue_pos_type_impl<T> generate(insert_lvalue_pos_type, T& x) { return insert_lvalue_pos_type_impl<T>(x); } } insert_lvalue_pos; struct insert_single_item_range_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.insert(it, test::next(it)); } } insert_single_item_range; struct emplace_lvalue_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.emplace(*it); } } emplace_lvalue; struct emplace_lvalue_begin_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.emplace_hint(x.begin(), *it); } } emplace_lvalue_begin; struct emplace_lvalue_end_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.emplace_hint(x.end(), *it); } } emplace_lvalue_end; template <typename T> struct emplace_lvalue_pos_type_impl : inserter_base { typename T::iterator pos; emplace_lvalue_pos_type_impl(T& x) : pos(x.begin()) {} template <typename Iterator> void operator()(T& x, Iterator it) { pos = get_iterator(x.emplace_hint(pos, *it)); } }; struct emplace_lvalue_pos_type { template <typename T> friend emplace_lvalue_pos_type_impl<T> generate(emplace_lvalue_pos_type, T& x) { return emplace_lvalue_pos_type_impl<T>(x); } } emplace_lvalue_pos; // Run the exception tests in various combinations. using test::default_generator; using test::limited_range; using test::generate_collisions; #ifdef BOOST_UNORDERED_FOA_TESTS test_set* test_set_; test_map* test_map_; test_node_set* test_node_set_; test_node_map* test_node_map_; // clang-format off UNORDERED_TEST(insert_exception_test, ((test_set_)(test_map_)(test_node_set_)(test_node_map_)) ((insert_lvalue)(insert_lvalue_begin)(insert_lvalue_end) (insert_lvalue_pos)(insert_single_item_range) (emplace_lvalue)(emplace_lvalue_begin)(emplace_lvalue_end) (emplace_lvalue_pos) ) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_rehash_exception_test, ((test_set_)(test_map_)(test_node_set_)(test_node_map_)) ((insert_lvalue)(insert_lvalue_begin)(insert_lvalue_end) (insert_lvalue_pos)(insert_single_item_range) (emplace_lvalue)(emplace_lvalue_begin)(emplace_lvalue_end) (emplace_lvalue_pos) ) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #else test_set* test_set_; test_multiset* test_multiset_; test_map* test_map_; test_multimap* test_multimap_; // clang-format off UNORDERED_TEST(insert_exception_test, ((test_set_)(test_multiset_)(test_map_)(test_multimap_)) ((insert_lvalue)(insert_lvalue_begin)(insert_lvalue_end) (insert_lvalue_pos)(insert_single_item_range) (emplace_lvalue)(emplace_lvalue_begin)(emplace_lvalue_end) (emplace_lvalue_pos) ) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_rehash_exception_test, ((test_set_)(test_multiset_)(test_map_)(test_multimap_)) ((insert_lvalue)(insert_lvalue_begin)(insert_lvalue_end) (insert_lvalue_pos)(insert_single_item_range) (emplace_lvalue)(emplace_lvalue_begin)(emplace_lvalue_end) (emplace_lvalue_pos) ) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #endif // Repeat insert tests with pairs struct pair_emplace_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { #ifdef BOOST_UNORDERED_FOA_TESTS x.emplace(std::piecewise_construct, std::make_tuple(it->first), std::make_tuple(it->second)); #else x.emplace(boost::unordered::piecewise_construct, boost::make_tuple(it->first), boost::make_tuple(it->second)); #endif } } pair_emplace; struct pair_emplace2_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { #ifdef BOOST_UNORDERED_FOA_TESTS x.emplace_hint(x.begin(), std::piecewise_construct, std::make_tuple(it->first), std::make_tuple(it->second.tag1_, it->second.tag2_)); #else x.emplace_hint(x.begin(), boost::unordered::piecewise_construct, boost::make_tuple(it->first), boost::make_tuple(it->second.tag1_, it->second.tag2_)); #endif } } pair_emplace2; #ifdef BOOST_UNORDERED_FOA_TESTS test_pair_set* test_pair_set_; test_pair_node_set* test_pair_node_set_; // clang-format off UNORDERED_TEST(insert_exception_test, ((test_pair_set_)(test_map_)(test_pair_node_set_)(test_node_map_)) ((pair_emplace)(pair_emplace2)) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_rehash_exception_test, ((test_pair_set_)(test_map_)(test_pair_node_set_)(test_node_map_)) ((pair_emplace)(pair_emplace2)) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #else test_pair_set* test_pair_set_; test_pair_multiset* test_pair_multiset_; // clang-format off UNORDERED_TEST(insert_exception_test, ((test_pair_set_)(test_pair_multiset_)(test_map_)(test_multimap_)) ((pair_emplace)(pair_emplace2)) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_rehash_exception_test, ((test_pair_set_)(test_pair_multiset_)(test_map_)(test_multimap_)) ((pair_emplace)(pair_emplace2)) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #endif // Test inserting using operator[] struct try_emplace_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.try_emplace(it->first, it->second); } } try_emplace; struct try_emplace2_type : inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.try_emplace(it->first, it->second.tag1_, it->second.tag2_); } } try_emplace2; struct map_inserter_base { template <typename T> void exception_check(T& x, test::strong<T>& strong) { std::string scope(test::scope); if (scope.find("hash::operator()") == std::string::npos && scope.find("::operator=") == std::string::npos) strong.test(x, test::detail::tracker.count_allocations); } template <typename T, typename Iterator> void track(T& tracker, Iterator begin, Iterator end) { for (; begin != end; ++begin) { tracker[begin->first] = begin->second; } } }; struct map_insert_operator_type : map_inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x[it->first] = it->second; } } map_insert_operator; struct map_insert_or_assign_type : map_inserter_base { template <typename T, typename Iterator> void operator()(T& x, Iterator it) { x.insert_or_assign(it->first, it->second); } } map_insert_or_assign; #ifdef BOOST_UNORDERED_FOA_TESTS // clang-format off UNORDERED_TEST(insert_exception_test, ((test_map_)(test_node_map_)) ((try_emplace)(try_emplace2)(map_insert_operator)(map_insert_or_assign)) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_rehash_exception_test, ((test_map_)(test_node_map_)) ((try_emplace)(try_emplace2)(map_insert_operator)(map_insert_or_assign)) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #else // clang-format off UNORDERED_TEST(insert_exception_test, ((test_map_)) ((try_emplace)(try_emplace2)(map_insert_operator)(map_insert_or_assign)) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_rehash_exception_test, ((test_map_)) ((try_emplace)(try_emplace2)(map_insert_operator)(map_insert_or_assign)) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #endif // Range insert tests template <typename T, typename Values> void insert_range_exception_test_impl(T x, Values const& v) { test::ordered<T> tracker; tracker.insert(x.begin(), x.end()); try { ENABLE_EXCEPTIONS; x.insert(v.begin(), v.end()); } catch (...) { test::check_equivalent_keys(x); throw; } test::check_equivalent_keys(x); tracker.insert(v.begin(), v.end()); tracker.compare(x); } template <typename T> void insert_range_exception_test(T*, test::random_generator gen) { for (int i = 0; i < 5; ++i) { test::random_values<T> v(10, gen); T x; EXCEPTION_LOOP(insert_range_exception_test_impl(x, v)) } } template <typename T> void insert_range_rehash_exception_test(T*, test::random_generator gen) { for (int i = 0; i < 5; ++i) { T x(1); rehash_prep(x); test::random_values<T> v2(5, gen); EXCEPTION_LOOP(insert_range_exception_test_impl(x, v2)) } } #ifdef BOOST_UNORDERED_FOA_TESTS // clang-format off UNORDERED_TEST(insert_range_exception_test, ((test_set_)(test_map_)(test_node_set_)(test_node_map_)) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_range_rehash_exception_test, ((test_set_)(test_map_)(test_node_set_)(test_node_map_)) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #else // clang-format off UNORDERED_TEST(insert_range_exception_test, ((test_set_)(test_multiset_)(test_map_)(test_multimap_)) ((default_generator)(limited_range)(generate_collisions)) ) UNORDERED_TEST(insert_range_rehash_exception_test, ((test_set_)(test_multiset_)(test_map_)(test_multimap_)) ((default_generator)(limited_range)(generate_collisions)) ) // clang-format on #endif RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/merge_exception_tests.cpp
// Copyright 2017-2018 Daniel James. // Copyright 2022-2023 Christian Mazakas. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "../helpers/exception_test.hpp" #include "../helpers/invariants.hpp" #include "../helpers/metafunctions.hpp" #include "../helpers/random_values.hpp" #include "./containers.hpp" template <typename T1, typename T2> void merge_exception_test(T1 x, T2 y) { std::size_t size = x.size() + y.size(); try { ENABLE_EXCEPTIONS; x.merge(y); } catch (...) { test::check_equivalent_keys(x); test::check_equivalent_keys(y); throw; } // Not a full check, just want to make sure the merge completed. BOOST_TEST(size == x.size() + y.size()); if (y.size()) { BOOST_TEST(test::has_unique_keys<T1>::value); for (typename T2::iterator it = y.begin(); it != y.end(); ++it) { BOOST_TEST(x.find(test::get_key<T2>(*it)) != x.end()); } } test::check_equivalent_keys(x); test::check_equivalent_keys(y); } template <typename T1, typename T2> void merge_exception_test(T1 const*, T2 const*, std::size_t count12, int tag12, test::random_generator gen1, test::random_generator gen2) { std::size_t count1 = count12 / 256; std::size_t count2 = count12 % 256; int tag1 = tag12 / 256; int tag2 = tag12 % 256; test::random_values<T1> v1(count1, gen1); test::random_values<T2> v2(count2, gen2); T1 x(v1.begin(), v1.end(), 0, test::exception::hash(tag1), test::exception::equal_to(tag1)); T2 y(v2.begin(), v2.end(), 0, test::exception::hash(tag2), test::exception::equal_to(tag2)); EXCEPTION_LOOP(merge_exception_test(x, y)) } using test::default_generator; using test::generate_collisions; using test::limited_range; #ifdef BOOST_UNORDERED_FOA_TESTS boost::unordered_flat_set<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> >* test_set_; boost::unordered_flat_map<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> >* test_map_; boost::unordered_node_set<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> >* test_node_set_; boost::unordered_node_map<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> >* test_node_map_; // clang-format off UNORDERED_MULTI_TEST(set_merge, merge_exception_test, ((test_set_)) ((test_set_)) ((0x0000)(0x6400)(0x0064)(0x0a64)(0x3232)) ((0x0000)(0x0001)(0x0102)) ((default_generator)(limited_range)) ((default_generator)(limited_range)) ) UNORDERED_MULTI_TEST(map_merge, merge_exception_test, ((test_map_)) ((test_map_)) ((0x0000)(0x6400)(0x0064)(0x0a64)(0x3232)) ((0x0101)(0x0200)(0x0201)) ((default_generator)(limited_range)) ((default_generator)(limited_range)) ) // Run fewer generate_collisions tests, as they're slow. UNORDERED_MULTI_TEST(set_merge_collisions, merge_exception_test, ((test_set_)) ((test_set_)) ((0x0a0a)) ((0x0202)(0x0100)(0x0201)) ((generate_collisions)) ((generate_collisions)) ) UNORDERED_MULTI_TEST(map_merge_collisions, merge_exception_test, ((test_map_)) ((test_map_)) ((0x0a0a)) ((0x0000)(0x0002)(0x0102)) ((generate_collisions)) ((generate_collisions)) ) UNORDERED_MULTI_TEST(node_set_merge, merge_exception_test, ((test_node_set_)) ((test_node_set_)) (/* (0x0000)(0x6400) */(0x0064)/* (0x0a64)(0x3232) */) ((0x0000)(0x0001)(0x0102)) ((default_generator)(limited_range)) ((default_generator)(limited_range)) ) UNORDERED_MULTI_TEST(node_map_merge, merge_exception_test, ((test_node_map_)) ((test_node_map_)) ((0x0000)(0x6400)(0x0064)(0x0a64)(0x3232)) ((0x0101)(0x0200)(0x0201)) ((default_generator)(limited_range)) ((default_generator)(limited_range)) ) // Run fewer generate_collisions tests, as they're slow. UNORDERED_MULTI_TEST(node_set_merge_collisions, merge_exception_test, ((test_node_set_)) ((test_node_set_)) ((0x0a0a)) ((0x0202)(0x0100)(0x0201)) ((generate_collisions)) ((generate_collisions)) ) UNORDERED_MULTI_TEST(node_map_merge_collisions, merge_exception_test, ((test_node_map_)) ((test_node_map_)) ((0x0a0a)) ((0x0000)(0x0002)(0x0102)) ((generate_collisions)) ((generate_collisions)) ) // clang-format on #else boost::unordered_set<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> >* test_set_; boost::unordered_multiset<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> >* test_multiset_; boost::unordered_map<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> >* test_map_; boost::unordered_multimap<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> >* test_multimap_; // clang-format off UNORDERED_MULTI_TEST(set_merge, merge_exception_test, ((test_set_)(test_multiset_)) ((test_set_)(test_multiset_)) ((0x0000)(0x6400)(0x0064)(0x0a64)(0x3232)) ((0x0000)(0x0001)(0x0102)) ((default_generator)(limited_range)) ((default_generator)(limited_range)) ) UNORDERED_MULTI_TEST(map_merge, merge_exception_test, ((test_map_)(test_multimap_)) ((test_map_)(test_multimap_)) ((0x0000)(0x6400)(0x0064)(0x0a64)(0x3232)) ((0x0101)(0x0200)(0x0201)) ((default_generator)(limited_range)) ((default_generator)(limited_range)) ) // Run fewer generate_collisions tests, as they're slow. UNORDERED_MULTI_TEST(set_merge_collisions, merge_exception_test, ((test_set_)(test_multiset_)) ((test_set_)(test_multiset_)) ((0x0a0a)) ((0x0202)(0x0100)(0x0201)) ((generate_collisions)) ((generate_collisions)) ) UNORDERED_MULTI_TEST(map_merge_collisions, merge_exception_test, ((test_map_)(test_multimap_)) ((test_map_)(test_multimap_)) ((0x0a0a)) ((0x0000)(0x0002)(0x0102)) ((generate_collisions)) ((generate_collisions)) ) // clang-format on #endif RUN_TESTS_QUIET()
0
repos/unordered/test
repos/unordered/test/exception/assign_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/tracker.hpp" #if defined(BOOST_MSVC) #pragma warning(disable : 4512) // assignment operator could not be generated #endif #if defined(__clang__) && defined(__has_warning) #if __has_warning("-Wself-assign-overloaded") #pragma clang diagnostic ignored "-Wself-assign-overloaded" #endif #endif test::seed_t initialize_seed(12847); template <class T> struct self_assign_base : public test::exception_base { test::random_values<T> values; self_assign_base(std::size_t count = 0) : values(count, test::limited_range) { } typedef T data_type; T init() const { return T(values.begin(), values.end()); } void run(T& x) const { x = x; DISABLE_EXCEPTIONS; test::check_container(x, values); test::check_equivalent_keys(x); } void check BOOST_PREVENT_MACRO_SUBSTITUTION(T const& x) const { test::check_equivalent_keys(x); } }; template <class T> struct self_assign_test1 : self_assign_base<T> { }; template <class T> struct self_assign_test2 : self_assign_base<T> { self_assign_test2() : self_assign_base<T>(100) {} }; template <class T> struct assign_base : public test::exception_base { test::random_values<T> x_values, y_values; T x, y; int t1; int t2; typedef typename T::hasher hasher; typedef typename T::key_equal key_equal; typedef typename T::allocator_type allocator_type; assign_base(int tag1, int tag2, float mlf1 = 1.0, float mlf2 = 1.0) : x_values(), y_values(), x(0, hasher(tag1), key_equal(tag1), allocator_type(tag1)), y(0, hasher(tag2), key_equal(tag2), allocator_type(tag2)), t1(tag1), t2(tag2) { x.max_load_factor(mlf1); y.max_load_factor(mlf2); } typedef T data_type; T init() const { return T(x); } void run(T& x1) const { x1 = y; DISABLE_EXCEPTIONS; test::check_container(x1, y_values); test::check_equivalent_keys(x1); } void check BOOST_PREVENT_MACRO_SUBSTITUTION(T const& x1) const { test::check_equivalent_keys(x1); if (x1.hash_function() == hasher(t1)) { BOOST_TEST(x1.key_eq() == key_equal(t1)); } if (x1.hash_function() == hasher(t2)) { BOOST_TEST(x1.key_eq() == key_equal(t2)); } if (x1.key_eq() == key_equal(t1)) { BOOST_TEST(x1.hash_function() == hasher(t1)); } if (x1.key_eq() == key_equal(t2)) { BOOST_TEST(x1.hash_function() == hasher(t2)); } // If the container is empty at the point of the exception, the // internal structure is hidden, this exposes it, at the cost of // messing up the data. if (x_values.size()) { T& x2 = const_cast<T&>(x1); x2.emplace(*x_values.begin()); test::check_equivalent_keys(x2); } } }; template <class T> struct assign_values : assign_base<T> { assign_values(unsigned int count1, unsigned int count2, int tag1, int tag2, test::random_generator gen = test::default_generator, float mlf1 = 1.0, float mlf2 = 1.0) : assign_base<T>(tag1, tag2, mlf1, mlf2) { this->x_values.fill(count1, gen); this->y_values.fill(count2, gen); this->x.insert(this->x_values.begin(), this->x_values.end()); this->y.insert(this->y_values.begin(), this->y_values.end()); } }; template <class T> struct assign_test1 : assign_values<T> { assign_test1() : assign_values<T>(0, 0, 0, 0) {} }; template <class T> struct assign_test2 : assign_values<T> { assign_test2() : assign_values<T>(60, 0, 0, 0) {} }; template <class T> struct assign_test2a : assign_values<T> { assign_test2a() : assign_values<T>(60, 0, 0, 0, test::limited_range) {} }; template <class T> struct assign_test3 : assign_values<T> { assign_test3() : assign_values<T>(0, 60, 0, 0) {} }; template <class T> struct assign_test3a : assign_values<T> { assign_test3a() : assign_values<T>(0, 60, 0, 0, test::limited_range) {} }; template <class T> struct assign_test4 : assign_values<T> { assign_test4() : assign_values<T>(10, 10, 1, 2) {} }; template <class T> struct assign_test4a : assign_values<T> { assign_test4a() : assign_values<T>(10, 100, 1, 2) {} }; template <class T> struct assign_test4b : assign_values<T> { assign_test4b() : assign_values<T>(10, 100, 1, 2, test::limited_range) {} }; template <class T> struct assign_test5 : assign_values<T> { assign_test5() : assign_values<T>(5, 60, 0, 0, test::default_generator, 1.0f, 0.1f) { } }; template <class T> struct equivalent_test1 : assign_base<T> { equivalent_test1() : assign_base<T>(0, 0) { test::random_values<T> x_values2(10); this->x_values.insert(x_values2.begin(), x_values2.end()); this->x_values.insert(x_values2.begin(), x_values2.end()); test::random_values<T> y_values2(10); this->y_values.insert(y_values2.begin(), y_values2.end()); this->y_values.insert(y_values2.begin(), y_values2.end()); this->x.insert(this->x_values.begin(), this->x_values.end()); this->y.insert(this->y_values.begin(), this->y_values.end()); } }; // clang-format off EXCEPTION_TESTS_REPEAT(5, (self_assign_test1)(self_assign_test2) (assign_test1)(assign_test2)(assign_test2a) (assign_test3)(assign_test3a) (assign_test4)(assign_test4a)(assign_test4b) (assign_test5) (equivalent_test1), CONTAINER_SEQ) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/containers.hpp
// Copyright 2006-2009 Daniel James. // Copyright 2022 Christian Mazakas. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "../helpers/unordered.hpp" #include "../objects/exception.hpp" #ifdef BOOST_UNORDERED_FOA_TESTS typedef boost::unordered_flat_set<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> > test_set; typedef boost::unordered_flat_map<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> > test_map; typedef boost::unordered_flat_set< std::pair<test::exception::object, test::exception::object>, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> > test_pair_set; typedef boost::unordered_node_set<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> > test_node_set; typedef boost::unordered_node_map<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> > test_node_map; typedef boost::unordered_node_set< std::pair<test::exception::object, test::exception::object>, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> > test_pair_node_set; #define CONTAINER_SEQ (test_set)(test_map)(test_node_set)(test_node_map) #define CONTAINER_PAIR_SEQ (test_pair_set)(test_map)(test_pair_node_set)(test_node_map) #else typedef boost::unordered_set<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> > test_set; typedef boost::unordered_multiset<test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> > test_multiset; typedef boost::unordered_map<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> > test_map; typedef boost::unordered_multimap<test::exception::object, test::exception::object, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> > test_multimap; typedef boost::unordered_set< std::pair<test::exception::object, test::exception::object>, test::exception::hash, test::exception::equal_to, test::exception::allocator<test::exception::object> > test_pair_set; typedef boost::unordered_multiset< std::pair<test::exception::object, test::exception::object>, test::exception::hash, test::exception::equal_to, test::exception::allocator2<test::exception::object> > test_pair_multiset; #define CONTAINER_SEQ (test_set)(test_multiset)(test_map)(test_multimap) #define CONTAINER_PAIR_SEQ \ (test_pair_set)(test_pair_multiset)(test_map)(test_multimap) #endif
0
repos/unordered/test
repos/unordered/test/exception/rehash_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/strong.hpp" #include "../helpers/tracker.hpp" #include <string> test::seed_t initialize_seed(3298597); template <class T> struct rehash_test_base : public test::exception_base { test::random_values<T> values; unsigned int n; rehash_test_base(unsigned int count = 100, unsigned int n_ = 0) : values(count, test::limited_range), n(n_) { } typedef T data_type; typedef test::strong<T> strong_type; data_type init() const { T x(values.begin(), values.end(), n); return x; } void check BOOST_PREVENT_MACRO_SUBSTITUTION( T const& x, strong_type const& strong) const { std::string scope(test::scope); if (scope.find("hash::operator()") == std::string::npos && scope.find("equal_to::operator()") == std::string::npos && scope != "operator==(object, object)") strong.test(x); test::check_equivalent_keys(x); } }; template <class T> struct rehash_test0 : rehash_test_base<T> { rehash_test0() : rehash_test_base<T>(0) {} void run(T& x) const { x.rehash(0); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct rehash_test1 : rehash_test_base<T> { rehash_test1() : rehash_test_base<T>(0) {} void run(T& x) const { x.rehash(200); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct rehash_test2 : rehash_test_base<T> { rehash_test2() : rehash_test_base<T>(0, 200) {} void run(T& x) const { x.rehash(0); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct rehash_test3 : rehash_test_base<T> { rehash_test3() : rehash_test_base<T>(10, 0) {} void run(T& x) const { x.rehash(200); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct rehash_test4 : rehash_test_base<T> { rehash_test4() : rehash_test_base<T>(10, 200) {} void run(T& x) const { x.rehash(0); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; template <class T> struct rehash_test5 : rehash_test_base<T> { rehash_test5() : rehash_test_base<T>(200, 10) {} void run(T& x) const { x.rehash(0); DISABLE_EXCEPTIONS; test::check_container(x, this->values); test::check_equivalent_keys(x); } }; // clang-format off EXCEPTION_TESTS( (rehash_test0)(rehash_test1)(rehash_test2)(rehash_test3)(rehash_test4) (rehash_test5), CONTAINER_SEQ) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/copy_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" #include "../helpers/tracker.hpp" template <typename T> inline void avoid_unused_warning(T const&) {} test::seed_t initialize_seed(73041); template <class T> struct copy_test1 : public test::exception_base { T x; void run() const { T y(x); DISABLE_EXCEPTIONS; BOOST_TEST(y.empty()); test::check_equivalent_keys(y); } }; template <class T> struct copy_test2 : public test::exception_base { test::random_values<T> values; T x; copy_test2() : values(5, test::limited_range), x(values.begin(), values.end()) { } void run() const { T y(x); DISABLE_EXCEPTIONS; test::check_container(y, this->values); test::check_equivalent_keys(y); } }; template <class T> struct copy_test3 : public test::exception_base { test::random_values<T> values; T x; copy_test3() : values(100), x(values.begin(), values.end()) {} void run() const { T y(x); DISABLE_EXCEPTIONS; test::check_container(y, this->values); test::check_equivalent_keys(y); } }; template <class T> struct copy_test3a : public test::exception_base { test::random_values<T> values; T x; copy_test3a() : values(100, test::limited_range), x(values.begin(), values.end()) { } void run() const { T y(x); DISABLE_EXCEPTIONS; test::check_container(y, this->values); test::check_equivalent_keys(y); } }; template <class T> struct copy_with_allocator_test : public test::exception_base { test::random_values<T> values; T x; test::exception::allocator<test::exception::object> allocator; copy_with_allocator_test() : values(100), x(values.begin(), values.end()) {} void run() const { T y(x, allocator); DISABLE_EXCEPTIONS; test::check_container(y, this->values); test::check_equivalent_keys(y); } }; // clang-format off EXCEPTION_TESTS( (copy_test1)(copy_test2)(copy_test3)(copy_test3a)(copy_with_allocator_test), CONTAINER_SEQ) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/exception/erase_exception_tests.cpp
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "./containers.hpp" #include "../helpers/helpers.hpp" #include "../helpers/invariants.hpp" #include "../helpers/random_values.hpp" test::seed_t initialize_seed(835193); template <class T> struct erase_test_base : public test::exception_base { test::random_values<T> values; erase_test_base(unsigned int count = 5) : values(count, test::limited_range) { } typedef T data_type; data_type init() const { return T(values.begin(), values.end()); } void check BOOST_PREVENT_MACRO_SUBSTITUTION(T const& x) const { std::string scope(test::scope); BOOST_TEST(scope.find("hash::") != std::string::npos || scope.find("equal_to::") != std::string::npos || scope == "operator==(object, object)"); test::check_equivalent_keys(x); } }; template <class T> struct erase_by_key_test1 : public erase_test_base<T> { void run(T& x) const { typedef typename test::random_values<T>::const_iterator iterator; for (iterator it = this->values.begin(), end = this->values.end(); it != end; ++it) { x.erase(test::get_key<T>(*it)); } DISABLE_EXCEPTIONS; BOOST_TEST(x.empty()); test::check_equivalent_keys(x); } }; EXCEPTION_TESTS((erase_by_key_test1), CONTAINER_SEQ) RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/rw_spinlock_test2.cpp
// Copyright 2023 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/unordered/detail/foa/rw_spinlock.hpp> #include <boost/core/lightweight_test.hpp> #include <mutex> using boost::unordered::detail::foa::rw_spinlock; static rw_spinlock sp; static rw_spinlock sp2; int main() { BOOST_TEST( sp.try_lock() ); BOOST_TEST( !sp.try_lock() ); BOOST_TEST( sp2.try_lock() ); BOOST_TEST( !sp.try_lock() ); BOOST_TEST( !sp2.try_lock() ); sp.unlock(); sp2.unlock(); sp.lock(); BOOST_TEST( !sp.try_lock() ); sp2.lock(); BOOST_TEST( !sp.try_lock() ); BOOST_TEST( !sp2.try_lock() ); sp.unlock(); sp2.unlock(); { std::lock_guard<rw_spinlock> lock( sp ); BOOST_TEST( !sp.try_lock() ); std::lock_guard<rw_spinlock> lock2( sp2 ); BOOST_TEST( !sp.try_lock() ); BOOST_TEST( !sp2.try_lock() ); } return boost::report_errors(); }
0
repos/unordered/test
repos/unordered/test/cfoa/rw_spinlock_test.cpp
// Copyright 2023 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/unordered/detail/foa/rw_spinlock.hpp> #include <mutex> using boost::unordered::detail::foa::rw_spinlock; // Sanity check only static rw_spinlock sp; static rw_spinlock sp2; int main() { sp.lock(); sp2.lock(); sp.unlock(); sp2.unlock(); { std::lock_guard<rw_spinlock> lock( sp ); std::lock_guard<rw_spinlock> lock2( sp2 ); } }
0
repos/unordered/test
repos/unordered/test/cfoa/erase_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> #include <boost/core/ignore_unused.hpp> namespace { test::seed_t initialize_seed(3292023); struct lvalue_eraser_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor + value_type_cardinality * x.size()); thread_runner(values, [&values, &num_erased, &x](boost::span<T>) { for (auto const& v : values) { auto count = x.erase(get_key(v)); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); } }); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ(raii::destructor, old_d + value_type_cardinality * old_size); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST(x.empty()); BOOST_TEST_EQ(num_erased, old_size); } } lvalue_eraser; struct transp_lvalue_eraser_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor + value_type_cardinality * x.size()); thread_runner(values, [&num_erased, &x](boost::span<T> s) { for (auto const& v : s) { auto count = x.erase(get_key(v).x_); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); } }); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * num_erased); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST(x.empty()); BOOST_TEST_EQ(num_erased, old_size); } } transp_lvalue_eraser; struct lvalue_eraser_if_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { if (get_value(v).x_ > threshold) { ++expected_erasures; } }); thread_runner(values, [&num_erased, &x, threshold](boost::span<T> s) { for (auto const& v : s) { auto count = x.erase_if(get_key(v), [threshold](arg_type& w) { return get_value(w).x_ > threshold; }); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); } }); BOOST_TEST_EQ(num_erased, expected_erasures); BOOST_TEST_EQ(x.size(), old_size - num_erased); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * num_erased); } } lvalue_eraser_if; struct transp_lvalue_eraser_if_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { if (get_value(v).x_ > threshold) { ++expected_erasures; } }); thread_runner(values, [&num_erased, &x, threshold](boost::span<T> s) { for (auto const& v : s) { auto count = x.erase_if(get_key(v).x_, [threshold](arg_type& w) { return get_value(w).x_ > threshold; }); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); } }); BOOST_TEST_EQ(num_erased, expected_erasures); BOOST_TEST_EQ(x.size(), old_size - num_erased); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * num_erased); } } transp_lvalue_eraser_if; struct erase_if_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { if (get_value(v).x_ > threshold) { ++expected_erasures; } }); thread_runner( values, [&num_erased, &x, threshold](boost::span<T> /* s */) { for (std::size_t i = 0; i < 128; ++i) { auto count = x.erase_if( [threshold](arg_type& v) { return get_value(v).x_ > threshold; }); num_erased += count; } }); BOOST_TEST_EQ(num_erased, expected_erasures); BOOST_TEST_EQ(x.size(), old_size - num_erased); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * num_erased); } } erase_if; struct free_fn_erase_if_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { if (get_value(v).x_ > threshold) { ++expected_erasures; } }); thread_runner( values, [&num_erased, &x, threshold](boost::span<T> /* s */) { for (std::size_t i = 0; i < 128; ++i) { auto count = boost::unordered::erase_if(x, [threshold](arg_type& v) { return get_value(v).x_ > threshold; }); num_erased += count; } }); BOOST_TEST_EQ(num_erased, expected_erasures); BOOST_TEST_EQ(x.size(), old_size - num_erased); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * num_erased); } } free_fn_erase_if; struct erase_if_exec_policy_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_invokes{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { if (get_value(v).x_ > threshold) { ++expected_erasures; } }); thread_runner(values, [&num_invokes, &x, threshold](boost::span<T> s) { (void)s; x.erase_if( std::execution::par, [&num_invokes, threshold](arg_type& v) { ++num_invokes; return get_value(v).x_ > threshold; }); }); BOOST_TEST_GE(+num_invokes, old_size); BOOST_TEST_LE(+num_invokes, old_size * num_threads); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * expected_erasures); #else (void)values; (void)x; #endif } } erase_if_exec_policy; template <class X, class GF, class F> void erase(X*, GF gen_factory, F eraser, test::random_generator rg) { auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { X x; x.insert(values.begin(), values.end()); BOOST_TEST_EQ(x.size(), reference_cont.size()); test_fuzzy_matches_reference(x, reference_cont, rg); eraser(values, x); test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } boost::unordered::concurrent_flat_map<raii, raii>* map; boost::unordered::concurrent_flat_set<raii>* set; boost::unordered::concurrent_flat_map<raii, raii, transp_hash, transp_key_equal>* transparent_map; boost::unordered::concurrent_flat_set<raii, transp_hash, transp_key_equal>* transparent_set; } // namespace using test::default_generator; using test::limited_range; using test::sequential; // clang-format off UNORDERED_TEST( erase, ((map)(set)) ((value_type_generator_factory)(init_type_generator_factory)) ((lvalue_eraser)(lvalue_eraser_if)(erase_if)(free_fn_erase_if)(erase_if_exec_policy)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( erase, ((transparent_map)(transparent_set)) ((value_type_generator_factory)(init_type_generator_factory)) ((transp_lvalue_eraser)(transp_lvalue_eraser_if)(erase_if_exec_policy)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/fwd_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/config/workaround.hpp> #include <boost/unordered/concurrent_flat_map_fwd.hpp> #include <boost/unordered/concurrent_flat_set_fwd.hpp> #include <limits> test::seed_t initialize_seed{32304628}; using test::default_generator; using test::limited_range; using test::sequential; template <class T> void swap_call(boost::unordered::concurrent_flat_map<T, T>& x1, boost::unordered::concurrent_flat_map<T, T>& x2) { swap(x1, x2); } template <class T> bool equal_call(boost::unordered::concurrent_flat_map<T, T>& x1, boost::unordered::concurrent_flat_map<T, T>& x2) { return x1 == x2; } template <class T> bool unequal_call(boost::unordered::concurrent_flat_map<T, T>& x1, boost::unordered::concurrent_flat_map<T, T>& x2) { return x1 != x2; } template <class T> void swap_call(boost::unordered::concurrent_flat_set<T>& x1, boost::unordered::concurrent_flat_set<T>& x2) { swap(x1, x2); } template <class T> bool equal_call(boost::unordered::concurrent_flat_set<T>& x1, boost::unordered::concurrent_flat_set<T>& x2) { return x1 == x2; } template <class T> bool unequal_call(boost::unordered::concurrent_flat_set<T>& x1, boost::unordered::concurrent_flat_set<T>& x2) { return x1 != x2; } #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> using map_type = boost::unordered::concurrent_flat_map<int, int>; using set_type = boost::unordered::concurrent_flat_map<int, int>; map_type* test_map; set_type* test_set; template <typename X> void fwd_swap_call(X*) { #if !defined(BOOST_CLANG_VERSION) || \ BOOST_WORKAROUND(BOOST_CLANG_VERSION, < 30700) || \ BOOST_WORKAROUND(BOOST_CLANG_VERSION, >= 30800) // clang-3.7 seems to have a codegen bug here so we workaround it X x1, x2; swap_call(x1, x2); #endif } template <typename X> void fwd_equal_call(X*) { X x1, x2; BOOST_TEST(equal_call(x1, x2)); } template <typename X> void fwd_unequal_call(X*) { X x1, x2; BOOST_TEST_NOT(unequal_call(x1, x2)); } // this isn't the best place for this test but it's better than introducing a // new file template <typename X> void max_size(X*) { X x1; BOOST_TEST_EQ( x1.max_size(), std::numeric_limits<typename X::size_type>::max()); } // clang-format off UNORDERED_TEST( fwd_swap_call, ((test_map)(test_set))) UNORDERED_TEST( fwd_equal_call, ((test_map)(test_set))) UNORDERED_TEST( fwd_unequal_call, ((test_map)(test_set))) UNORDERED_TEST( max_size, ((test_map)(test_set))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/rw_spinlock_test3.cpp
// Copyright 2023 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/unordered/detail/foa/rw_spinlock.hpp> #include <boost/core/lightweight_test.hpp> #include <mutex> #include <thread> #include <cstdio> using boost::unordered::detail::foa::rw_spinlock; static int count = 0; static rw_spinlock sp; void f( int k, int n ) { std::printf( "Thread %d started.\n", k ); for( int i = 0; i < n; ++i ) { std::lock_guard<rw_spinlock> lock( sp ); ++count; } std::printf( "Thread %d finished.\n", k ); } int main() { int const N = 1000000; // iterations int const M = 8; // threads std::thread th[ M ]; for( int i = 0; i < M; ++i ) { th[ i ] = std::thread( f, i, N ); } for( int i = 0; i < M; ++i ) { th[ i ].join(); } BOOST_TEST_EQ( count, N * M ); return boost::report_errors(); }
0
repos/unordered/test
repos/unordered/test/cfoa/exception_assign_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal, stateful_allocator<std::pair<raii const, raii> > >; using set_type = boost::unordered::concurrent_flat_set<raii, hasher, key_equal, stateful_allocator<raii> >; map_type* test_map; set_type* test_set; std::initializer_list<map_type::value_type> map_init_list{ {raii{0}, raii{0}}, {raii{1}, raii{1}}, {raii{2}, raii{2}}, {raii{3}, raii{3}}, {raii{4}, raii{4}}, {raii{5}, raii{5}}, {raii{6}, raii{6}}, {raii{6}, raii{6}}, {raii{7}, raii{7}}, {raii{8}, raii{8}}, {raii{9}, raii{9}}, {raii{10}, raii{10}}, {raii{9}, raii{9}}, {raii{8}, raii{8}}, {raii{7}, raii{7}}, {raii{6}, raii{6}}, {raii{5}, raii{5}}, {raii{4}, raii{4}}, {raii{3}, raii{3}}, {raii{2}, raii{2}}, {raii{1}, raii{1}}, {raii{0}, raii{0}}, }; std::initializer_list<set_type::value_type> set_init_list{ raii{0}, raii{1}, raii{2}, raii{3}, raii{4}, raii{5}, raii{6}, raii{6}, raii{7}, raii{8}, raii{9}, raii{10}, raii{9}, raii{8}, raii{7}, raii{6}, raii{5}, raii{4}, raii{3}, raii{2}, raii{1}, raii{0}, }; auto test_map_and_init_list=std::make_pair(test_map,map_init_list); auto test_set_and_init_list=std::make_pair(test_set,set_init_list); namespace { test::seed_t initialize_seed(1794114520); template <class X, class GF> void copy_assign(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { raii::reset_counts(); unsigned num_throws = 0; auto begin = values.begin(); auto mid = values.begin() + static_cast<std::ptrdiff_t>(values.size() / 2); auto end = values.end(); auto reference_cont = reference_container<X>(begin, mid); X x( begin, mid, values.size(), hasher(1), key_equal(2), allocator_type(3)); X y( mid, end, values.size(), hasher(2), key_equal(1), allocator_type(4)); BOOST_TEST(!y.empty()); enable_exceptions(); for (std::size_t i = 0; i < 2 * alloc_throw_threshold; ++i) { try { y = x; } catch (...) { ++num_throws; } } disable_exceptions(); BOOST_TEST_GT(num_throws, 0u); test_fuzzy_matches_reference(y, reference_cont, rg); } check_raii_counts(); } template <class X, class GF> void move_assign(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { raii::reset_counts(); unsigned num_throws = 0; auto begin = values.begin(); auto mid = values.begin() + static_cast<std::ptrdiff_t>(values.size() / 2); auto end = values.end(); auto reference_cont = reference_container<X>(begin, mid); BOOST_TEST( !boost::allocator_is_always_equal<allocator_type>::type::value); BOOST_TEST(!boost::allocator_propagate_on_container_move_assignment< allocator_type>::type::value); for (std::size_t i = 0; i < 2 * alloc_throw_threshold; ++i) { disable_exceptions(); X x(begin, mid, values.size(), hasher(1), key_equal(2), allocator_type(3)); X y( mid, end, values.size(), hasher(2), key_equal(1), allocator_type(4)); enable_exceptions(); try { y = std::move(x); } catch (...) { ++num_throws; } disable_exceptions(); test_fuzzy_matches_reference(y, reference_cont, rg); } BOOST_TEST_GT(num_throws, 0u); } check_raii_counts(); } template <class X, class IL> void intializer_list_assign(std::pair<X*, IL> p) { using allocator_type = typename X::allocator_type; auto init_list = p.second; { raii::reset_counts(); unsigned num_throws = 0; for (std::size_t i = 0; i < throw_threshold; ++i) { X x(0, hasher(1), key_equal(2), allocator_type(3)); enable_exceptions(); try { x = init_list; } catch (...) { ++num_throws; } disable_exceptions(); } BOOST_TEST_GT(num_throws, 0u); check_raii_counts(); } } } // namespace using test::default_generator; using test::limited_range; using test::sequential; // clang-format off UNORDERED_TEST( copy_assign, ((test_map)(test_set)) ((exception_value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_assign, ((test_map)(test_set)) ((exception_value_type_generator_factory)) ((default_generator)(sequential))) UNORDERED_TEST( intializer_list_assign, ((test_map_and_init_list)(test_set_and_init_list))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/constructor_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> test::seed_t initialize_seed(4122023); using test::default_generator; using test::limited_range; using test::sequential; template <class T> struct soccc_allocator { int x_ = -1; using value_type = T; soccc_allocator() = default; soccc_allocator(soccc_allocator const&) = default; soccc_allocator(soccc_allocator&&) = default; soccc_allocator(int const x) : x_{x} {} template <class U> soccc_allocator(soccc_allocator<U> const& rhs) : x_{rhs.x_} { } T* allocate(std::size_t n) { return static_cast<T*>(::operator new(n * sizeof(T))); } void deallocate(T* p, std::size_t) { ::operator delete(p); } soccc_allocator select_on_container_copy_construction() const { return {x_ + 1}; } bool operator==(soccc_allocator const& rhs) const { return x_ == rhs.x_; } bool operator!=(soccc_allocator const& rhs) const { return x_ != rhs.x_; } }; using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal, stateful_allocator<std::pair<raii const, raii> > >; using set_type = boost::unordered::concurrent_flat_set<raii, hasher, key_equal, stateful_allocator<raii> >; map_type* test_map; set_type* test_set; std::initializer_list<map_type::value_type> map_init_list{ {raii{0}, raii{0}}, {raii{1}, raii{1}}, {raii{2}, raii{2}}, {raii{3}, raii{3}}, {raii{4}, raii{4}}, {raii{5}, raii{5}}, {raii{6}, raii{6}}, {raii{6}, raii{6}}, {raii{7}, raii{7}}, {raii{8}, raii{8}}, {raii{9}, raii{9}}, {raii{10}, raii{10}}, {raii{9}, raii{9}}, {raii{8}, raii{8}}, {raii{7}, raii{7}}, {raii{6}, raii{6}}, {raii{5}, raii{5}}, {raii{4}, raii{4}}, {raii{3}, raii{3}}, {raii{2}, raii{2}}, {raii{1}, raii{1}}, {raii{0}, raii{0}}, }; std::initializer_list<set_type::value_type> set_init_list{ raii{0}, raii{1}, raii{2}, raii{3}, raii{4}, raii{5}, raii{6}, raii{6}, raii{7}, raii{8}, raii{9}, raii{10}, raii{9}, raii{8}, raii{7}, raii{6}, raii{5}, raii{4}, raii{3}, raii{2}, raii{1}, raii{0}, }; auto test_map_and_init_list=std::make_pair(test_map,map_init_list); auto test_set_and_init_list=std::make_pair(test_set,set_init_list); namespace { template <class X> void default_constructor(X*) { X x; BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.size(), 0u); } template <class X> void bucket_count_with_hasher_key_equal_and_allocator(X*) { using allocator_type = typename X::allocator_type; raii::reset_counts(); { X x(0); BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); } { X x(0, hasher(1)); BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal()); } { X x(0, hasher(1), key_equal(2)); BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); } { X x(0, hasher(1), key_equal(2), allocator_type{}); BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type{}); } } template <class X> void soccc(X*) { raii::reset_counts(); replace_allocator<X, soccc_allocator> x, y(x); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); BOOST_TEST(y.get_allocator() != x.get_allocator()); } template <class X, class GF> void from_iterator_range(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { X x(values.begin(), values.end()); test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type{}); if (rg == sequential) { BOOST_TEST_EQ(x.size(), values.size()); } } { X x(values.begin(), values.end(), 0); test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type{}); if (rg == sequential) { BOOST_TEST_EQ(x.size(), values.size()); } } { X x(values.begin(), values.end(), 0, hasher(1)); test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type{}); if (rg == sequential) { BOOST_TEST_EQ(x.size(), values.size()); } } { X x(values.begin(), values.end(), 0, hasher(1), key_equal(2)); test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type{}); if (rg == sequential) { BOOST_TEST_EQ(x.size(), values.size()); } } { X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{}); test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type{}); if (rg == sequential) { BOOST_TEST_EQ(x.size(), values.size()); } } check_raii_counts(); } template <class X, class GF> void copy_constructor(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; { X x(0, hasher(1), key_equal(2), allocator_type{}); X y(x); BOOST_TEST_EQ(y.size(), x.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); BOOST_TEST(y.get_allocator() == x.get_allocator()); } auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{}); thread_runner( values, [&x, &reference_cont]( boost::span<span_value_type<decltype(values)> > s) { (void)s; X y(x); test_matches_reference(x, reference_cont); test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), x.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); BOOST_TEST(y.get_allocator() == x.get_allocator()); }); } check_raii_counts(); raii::reset_counts(); { allocator_type a; X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), a); thread_runner( values, [&x, &reference_cont, a]( boost::span<span_value_type<decltype(values)> > s) { (void)s; X y(x, a); test_matches_reference(x, reference_cont); test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), x.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); BOOST_TEST(y.get_allocator() == x.get_allocator()); }); } check_raii_counts(); } template <class X, class GF> void copy_constructor_with_insertion(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); std::mutex m; std::condition_variable cv; bool ready = false; { X x(0, hasher(1), key_equal(2), allocator_type{}); auto f = [&x, &values, &m, &cv, &ready] { { std::lock_guard<std::mutex> guard(m); ready = true; } cv.notify_all(); for (auto const& val : values) { x.insert(val); } }; std::thread t1(f); std::thread t2(f); thread_runner( values, [&x, &reference_cont, &values, rg, &m, &cv, &ready]( boost::span<span_value_type<decltype(values)> > s) { (void)s; { std::unique_lock<std::mutex> lk(m); cv.wait(lk, [&] { return ready; }); } X y(x); BOOST_TEST_LE(y.size(), values.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); BOOST_TEST(y.get_allocator() == x.get_allocator()); x.visit_all([&reference_cont, rg]( typename X::value_type const& val) { BOOST_TEST(reference_cont.contains(get_key(val))); if (rg == sequential) { BOOST_TEST_EQ(val, *reference_cont.find(get_key(val))); } }); }); t1.join(); t2.join(); } check_raii_counts(); } template <class X, class GF> void move_constructor(X*, GF gen_factory, test::random_generator rg) { using value_type = typename X::value_type; using allocator_type = typename X::allocator_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; { X x(0, hasher(1), key_equal(2), allocator_type{}); auto const old_size = x.size(); X y(std::move(x)); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(y.get_allocator() == x.get_allocator()); } auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{}); std::atomic_uint num_transfers{0}; auto const old_mc = +raii::move_constructor; thread_runner( values, [&x, &reference_cont, &num_transfers]( boost::span<span_value_type<decltype(values)> > s) { (void)s; auto const old_size = x.size(); X y(std::move(x)); if (!y.empty()) { ++num_transfers; test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.size(), 0u); BOOST_TEST_EQ(y.hash_function(), hasher()); BOOST_TEST_EQ(y.key_eq(), key_equal()); } BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(y.get_allocator() == x.get_allocator()); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ(raii::move_constructor, old_mc); } check_raii_counts(); // allocator-aware move constructor, unequal allocators raii::reset_counts(); { X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{1}); std::atomic_uint num_transfers{0}; auto const old_mc = +raii::move_constructor; auto const old_size = x.size(); thread_runner( values, [&x, &reference_cont, &num_transfers, old_size]( boost::span<span_value_type<decltype(values)> > s) { (void)s; auto a = allocator_type{2}; BOOST_TEST(a != x.get_allocator()); X y(std::move(x), a); if (!y.empty()) { ++num_transfers; test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.size(), 0u); BOOST_TEST_EQ(y.hash_function(), hasher()); BOOST_TEST_EQ(y.key_eq(), key_equal()); } BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(y.get_allocator() != x.get_allocator()); BOOST_TEST(y.get_allocator() == a); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( raii::move_constructor, old_mc + (value_type_cardinality * old_size)); } check_raii_counts(); // allocator-aware move constructor, equal allocators raii::reset_counts(); { X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{1}); std::atomic_uint num_transfers{0}; auto const old_mc = +raii::move_constructor; auto const old_size = x.size(); thread_runner( values, [&x, &reference_cont, &num_transfers, old_size]( boost::span<span_value_type<decltype(values)> > s) { (void)s; auto a = allocator_type{1}; BOOST_TEST(a == x.get_allocator()); X y(std::move(x), a); if (!y.empty()) { ++num_transfers; test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.size(), 0u); BOOST_TEST_EQ(y.hash_function(), hasher()); BOOST_TEST_EQ(y.key_eq(), key_equal()); } BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(y.get_allocator() == x.get_allocator()); BOOST_TEST(y.get_allocator() == a); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ(raii::move_constructor, old_mc); } check_raii_counts(); } template <class X, class GF> void move_constructor_with_insertion( X*, GF gen_factory, test::random_generator rg) { using value_type = typename X::value_type; using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); std::mutex m; std::condition_variable cv; bool ready = false; { X x(0, hasher(1), key_equal(2), allocator_type{}); std::atomic_uint num_transfers{0}; std::thread t1([&x, &values] { for (auto const& val : values) { x.insert(val); } }); std::thread t2([&x, &m, &cv, &ready] { while (x.empty()) { std::this_thread::yield(); } { std::lock_guard<std::mutex> guard(m); ready = true; } cv.notify_all(); }); thread_runner( values, [&x, &reference_cont, &num_transfers, rg, &m, &ready, &cv]( boost::span<span_value_type<decltype(values)> > s) { (void)s; { std::unique_lock<std::mutex> lk(m); cv.wait(lk, [&] { return ready; }); } X y(std::move(x)); if (!y.empty()) { ++num_transfers; y.cvisit_all([&reference_cont, rg](value_type const& val) { BOOST_TEST(reference_cont.contains(get_key(val))); if (rg == sequential) { BOOST_TEST_EQ( val, *reference_cont.find(get_key(val))); } }); } }); t1.join(); t2.join(); BOOST_TEST_GE(num_transfers, 1u); } check_raii_counts(); } template <class X, class GF> void iterator_range_with_allocator( X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { allocator_type a; X x(values.begin(), values.end(), a); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); if (rg == sequential) { BOOST_TEST_EQ(x.size(), values.size()); } BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == a); test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } template <class X> void explicit_allocator(X*) { using allocator_type = typename X::allocator_type; raii::reset_counts(); { allocator_type a; X x(a); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == a); } } template <class X, class IL> void initializer_list_with_all_params(std::pair<X*, IL> p) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; using allocator_type = typename X::allocator_type; auto init_list = p.second; { raii::reset_counts(); X x(init_list, 0, hasher(1), key_equal(2), allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * init_list.size() / 2u); BOOST_TEST_EQ( raii::move_constructor, 0u); } check_raii_counts(); { raii::reset_counts(); X x(init_list, allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * init_list.size() / 2u); BOOST_TEST_EQ( raii::move_constructor, 0u); } check_raii_counts(); { raii::reset_counts(); X x(init_list, 0, allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * init_list.size() / 2u); BOOST_TEST_EQ( raii::move_constructor, 0u); } check_raii_counts(); { raii::reset_counts(); X x(init_list, 0, hasher(1), allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * init_list.size() / 2u); BOOST_TEST_EQ( raii::move_constructor, 0u); } check_raii_counts(); } template <class X> void bucket_count_and_allocator(X*) { using allocator_type = typename X::allocator_type; raii::reset_counts(); { X x(0, allocator_type(3)); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type(3)); } { X x(4096, allocator_type(3)); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type(3)); } } template <class X> void bucket_count_with_hasher_and_allocator(X*) { using allocator_type = typename X::allocator_type; raii::reset_counts(); { X x(0, hasher(1), allocator_type(3)); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == allocator_type(3)); } } template <class X, class GF> void iterator_range_with_bucket_count_and_allocator( X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { allocator_type a(3); X x(values.begin(), values.end(), 0, a); test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == a); } check_raii_counts(); } template <class X, class GF> void iterator_range_with_bucket_count_hasher_and_allocator( X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { allocator_type a(3); hasher hf(1); X x(values.begin(), values.end(), 0, hf, a); test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(x.hash_function(), hf); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == a); } check_raii_counts(); } template <class X, class GF> void flat_constructor(X*, GF gen_factory, test::random_generator rg) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); auto reference_flat= flat_container<X>(values.begin(), values.end()); raii::reset_counts(); { flat_container<X> flat( values.begin(), values.end(), reference_cont.size(), hasher(1), key_equal(2), allocator_type(3)); auto const old_dc = +raii::default_constructor; auto const old_mc = +raii::move_constructor; auto const old_cc = +raii::copy_constructor; BOOST_TEST_EQ(old_dc, 0u); BOOST_TEST_EQ(old_mc, 0u); BOOST_TEST_EQ(old_cc, value_type_cardinality * flat.size()); X x(std::move(flat)); test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(+raii::default_constructor, old_dc); BOOST_TEST_EQ(+raii::move_constructor, old_mc); BOOST_TEST_EQ(+raii::copy_constructor, old_cc); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST(flat.empty()); } check_raii_counts(); { flat_container<X> flat(0, hasher(1), key_equal(2), allocator_type(3)); X x(std::move(flat)); BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST(flat.empty()); } check_raii_counts(); { X x(values.begin(), values.end(), reference_cont.size(), hasher(1), key_equal(2), allocator_type(3)); auto const old_dc = +raii::default_constructor; auto const old_mc = +raii::move_constructor; auto const old_cc = +raii::copy_constructor; BOOST_TEST_EQ(old_dc, 0u); BOOST_TEST_EQ(old_mc, 0u); BOOST_TEST_EQ(old_cc, 2u * value_type_cardinality * x.size()); flat_container<X> flat(std::move(x)); BOOST_TEST(flat == reference_flat); BOOST_TEST_EQ(+raii::default_constructor, old_dc); BOOST_TEST_EQ(+raii::move_constructor, old_mc); BOOST_TEST_EQ(+raii::copy_constructor, old_cc); BOOST_TEST_EQ(flat.hash_function(), hasher(1)); BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); BOOST_TEST(flat.get_allocator() == allocator_type(3)); BOOST_TEST(x.empty()); } check_raii_counts(); { X x(0, hasher(1), key_equal(2), allocator_type(3)); flat_container<X> flat(std::move(x)); BOOST_TEST(flat.empty()); BOOST_TEST_EQ(flat.hash_function(), hasher(1)); BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); BOOST_TEST(flat.get_allocator() == allocator_type(3)); BOOST_TEST(x.empty()); } check_raii_counts(); } } // namespace // clang-format off UNORDERED_TEST( default_constructor, ((test_map)(test_set))) UNORDERED_TEST( bucket_count_with_hasher_key_equal_and_allocator, ((test_map)(test_set))) UNORDERED_TEST( soccc, ((test_map)(test_set))) UNORDERED_TEST( from_iterator_range, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( copy_constructor, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( copy_constructor_with_insertion, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_constructor, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_constructor_with_insertion, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( iterator_range_with_allocator, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( explicit_allocator, ((test_map)(test_set))) UNORDERED_TEST( initializer_list_with_all_params, ((test_map_and_init_list)(test_set_and_init_list))) UNORDERED_TEST( bucket_count_and_allocator, ((test_map)(test_set))) UNORDERED_TEST( bucket_count_with_hasher_and_allocator, ((test_map)(test_set))) UNORDERED_TEST( iterator_range_with_bucket_count_and_allocator, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( iterator_range_with_bucket_count_hasher_and_allocator, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( flat_constructor, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/insert_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/config.hpp> #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> #include <boost/core/ignore_unused.hpp> #if defined(BOOST_MSVC) #pragma warning(disable : 4127) // conditional expression is constant #endif struct raii_convertible { int x = 0, y = 0 ; template <typename T> raii_convertible(T const & t) : x{t.x_} {} template <typename T, typename Q> raii_convertible(std::pair<T, Q> const & p) : x{p.first.x_}, y{p.second.x_} {} operator raii() { return {x}; } operator std::pair<raii const, raii>() { return {x, y}; } }; namespace { test::seed_t initialize_seed(78937); struct lvalue_inserter_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::atomic<std::uint64_t> num_inserts{0}; thread_runner(values, [&x, &num_inserts](boost::span<T> s) { for (auto const& r : s) { bool b = x.insert(r); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } lvalue_inserter; struct norehash_lvalue_inserter_type : public lvalue_inserter_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; x.reserve(values.size()); lvalue_inserter_type::operator()(values, x); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::move_constructor, 0u); } } norehash_lvalue_inserter; struct rvalue_inserter_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { BOOST_TEST_EQ(raii::copy_constructor, 0u); std::atomic<std::uint64_t> num_inserts{0}; thread_runner(values, [&x, &num_inserts](boost::span<T> s) { for (auto& r : s) { bool b = x.insert(std::move(r)); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); if (std::is_same<T, typename X::value_type>::value && !std::is_same<typename X::key_type, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); } BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } rvalue_inserter; struct norehash_rvalue_inserter_type : public rvalue_inserter_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; x.reserve(values.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ(raii::move_constructor, 0u); rvalue_inserter_type::operator()(values, x); if (std::is_same<T, typename X::value_type>::value) { if (std::is_same<typename X::key_type, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ(raii::move_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_EQ(raii::move_constructor, x.size()); } } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ( raii::move_constructor, value_type_cardinality * x.size()); } } } norehash_rvalue_inserter; struct iterator_range_inserter_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::vector<raii_convertible> values2; values2.reserve(values.size()); for (auto const& v : values) { values2.push_back(raii_convertible(v)); } thread_runner(values2, [&x](boost::span<raii_convertible> s) { x.insert(s.begin(), s.end()); }); BOOST_TEST_EQ( raii::default_constructor, value_type_cardinality * values2.size()); #if BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 50300) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50500) // some versions of old gcc have trouble eliding copies here // https://godbolt.org/z/Ebo6TbvaG #elif BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 40900) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50000) // seemingly same problem, though the snippet above does not reveal it #else BOOST_TEST_EQ(raii::copy_constructor, 0u); #endif BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } iterator_range_inserter; struct lvalue_insert_or_assign_copy_assign_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { thread_runner(values, [&x](boost::span<T> s) { for (auto& r : s) { x.insert_or_assign(r.first, r.second); } }); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ(raii::copy_constructor, 2 * x.size()); // don't check move construction count here because of rehashing BOOST_TEST_GT(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, values.size() - x.size()); BOOST_TEST_EQ(raii::move_assignment, 0u); } } lvalue_insert_or_assign_copy_assign; struct lvalue_insert_or_assign_move_assign_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { thread_runner(values, [&x](boost::span<T> s) { for (auto& r : s) { x.insert_or_assign(r.first, std::move(r.second)); } }); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_GT(raii::move_constructor, x.size()); // rehashing BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, values.size() - x.size()); } } lvalue_insert_or_assign_move_assign; struct rvalue_insert_or_assign_copy_assign_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { thread_runner(values, [&x](boost::span<T> s) { for (auto& r : s) { x.insert_or_assign(std::move(r.first), r.second); } }); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_GT(raii::move_constructor, x.size()); // rehashing BOOST_TEST_EQ(raii::copy_assignment, values.size() - x.size()); BOOST_TEST_EQ(raii::move_assignment, 0u); } } rvalue_insert_or_assign_copy_assign; struct rvalue_insert_or_assign_move_assign_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { thread_runner(values, [&x](boost::span<T> s) { for (auto& r : s) { x.insert_or_assign(std::move(r.first), std::move(r.second)); } }); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, 2 * x.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, values.size() - x.size()); } } rvalue_insert_or_assign_move_assign; struct trans_insert_or_assign_copy_assign_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using is_transparent = typename boost::make_void<typename X::hasher::is_transparent, typename X::key_equal::is_transparent>::type; boost::ignore_unused<is_transparent>(); BOOST_TEST_EQ(raii::default_constructor, 0u); thread_runner(values, [&x](boost::span<T> s) { for (auto& r : s) { x.insert_or_assign(r.first.x_, r.second); } }); BOOST_TEST_EQ(raii::default_constructor, x.size()); BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_GT(raii::move_constructor, x.size()); // rehashing BOOST_TEST_EQ(raii::copy_assignment, values.size() - x.size()); BOOST_TEST_EQ(raii::move_assignment, 0u); } } trans_insert_or_assign_copy_assign; struct trans_insert_or_assign_move_assign_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using is_transparent = typename boost::make_void<typename X::hasher::is_transparent, typename X::key_equal::is_transparent>::type; boost::ignore_unused<is_transparent>(); thread_runner(values, [&x](boost::span<T> s) { for (auto& r : s) { x.insert_or_assign(r.first.x_, std::move(r.second)); } }); BOOST_TEST_EQ(raii::default_constructor, x.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GT(raii::move_constructor, 2 * x.size()); // rehashing BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, values.size() - x.size()); } } trans_insert_or_assign_move_assign; struct lvalue_insert_or_cvisit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.insert_or_cvisit( r, [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * x.size()); // don't check move construction count here because of rehashing BOOST_TEST_GT(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } lvalue_insert_or_cvisit; struct lvalue_insert_or_visit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.insert_or_visit(r, [&num_invokes](arg_type& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ(raii::copy_constructor, value_type_cardinality * x.size()); // don't check move construction count here because of rehashing BOOST_TEST_GT(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } lvalue_insert_or_visit; struct rvalue_insert_or_cvisit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.insert_or_cvisit( std::move(r), [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 0u); if (std::is_same<T, typename X::value_type>::value) { if (std::is_same<typename X::key_type, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_GE(raii::move_constructor, x.size()); } } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE( raii::move_constructor, value_type_cardinality * x.size()); } } } rvalue_insert_or_cvisit; struct rvalue_insert_or_visit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.insert_or_visit( std::move(r), [&num_invokes](arg_type& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 0u); if (std::is_same<T, typename X::value_type>::value) { if (std::is_same<typename X::key_type, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_GE(raii::move_constructor, x.size()); } } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE( raii::move_constructor, value_type_cardinality * x.size()); } } } rvalue_insert_or_visit; struct iterator_range_insert_or_cvisit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::vector<raii_convertible> values2; values2.reserve(values.size()); for (auto const& v : values) { values2.push_back(raii_convertible(v)); } std::atomic<std::uint64_t> num_invokes{0}; thread_runner( values2, [&x, &num_invokes](boost::span<raii_convertible> s) { x.insert_or_cvisit(s.begin(), s.end(), [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); }); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ( raii::default_constructor, value_type_cardinality * values2.size()); #if (BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 50300) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50500)) || \ (BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 40900) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50000)) // skip test #else BOOST_TEST_EQ(raii::copy_constructor, 0u); #endif BOOST_TEST_GT(raii::move_constructor, 0u); } } iterator_range_insert_or_cvisit; struct iterator_range_insert_or_visit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::vector<raii_convertible> values2; values2.reserve(values.size()); for (auto const& v : values) { values2.push_back(raii_convertible(v)); } std::atomic<std::uint64_t> num_invokes{0}; thread_runner( values2, [&x, &num_invokes](boost::span<raii_convertible> s) { x.insert_or_visit(s.begin(), s.end(), [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); }); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ( raii::default_constructor, value_type_cardinality * values2.size()); #if (BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 50300) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50500)) || \ (BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 40900) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50000)) // skip test #else BOOST_TEST_EQ(raii::copy_constructor, 0u); #endif BOOST_TEST_GT(raii::move_constructor, 0u); } } iterator_range_insert_or_visit; template <class X, class GF, class F> void insert(X*, GF gen_factory, F inserter, test::random_generator rg) { auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { X x; inserter(values, x); BOOST_TEST_EQ(x.size(), reference_cont.size()); using value_type = typename X::value_type; BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); if (rg == test::sequential) { BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); } })); } BOOST_TEST_GE(raii::default_constructor, 0u); BOOST_TEST_GE(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, 0u); BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); } template <class X, class IL> void insert_initializer_list(std::pair<X*, IL> p) { using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; auto init_list = p.second; std::vector<raii> dummy; auto reference_cont = reference_container<X>( init_list.begin(), init_list.end()); raii::reset_counts(); { { X x; thread_runner( dummy, [&x, &init_list](boost::span<raii>) { x.insert(init_list); }); BOOST_TEST_EQ(x.size(), reference_cont.size()); BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); })); } BOOST_TEST_GE(raii::default_constructor, 0u); BOOST_TEST_GE(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, 0u); BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } { { std::atomic<std::uint64_t> num_invokes{0}; X x; thread_runner(dummy, [&x, &init_list, &num_invokes](boost::span<raii>) { x.insert_or_visit(init_list, [&num_invokes](arg_type& v) { (void)v; ++num_invokes; }); x.insert_or_cvisit( init_list, [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); }); BOOST_TEST_EQ(num_invokes, (init_list.size() - x.size()) + (num_threads - 1) * init_list.size() + num_threads * init_list.size()); BOOST_TEST_EQ(x.size(), reference_cont.size()); BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); })); } BOOST_TEST_GE(raii::default_constructor, 0u); BOOST_TEST_GE(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, 0u); BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } UNORDERED_AUTO_TEST (insert_sfinae_test) { // mostly a compile-time tests to ensure that there's no ambiguity when a // user does this using value_type = typename boost::unordered::concurrent_flat_map<raii, raii>::value_type; boost::unordered::concurrent_flat_map<raii, raii> x; x.insert({1, 2}); x.insert_or_visit({2, 3}, [](value_type&) {}); x.insert_or_cvisit({3, 4}, [](value_type const&) {}); } boost::unordered::concurrent_flat_map<raii, raii>* map; boost::unordered::concurrent_flat_map<raii, raii, transp_hash, transp_key_equal>* trans_map; boost::unordered::concurrent_flat_map<raii, raii, boost::hash<raii>, std::equal_to<raii>, fancy_allocator<std::pair<raii const, raii> > >* fancy_map; boost::unordered::concurrent_flat_set<raii>* set; boost::unordered::concurrent_flat_set<raii, boost::hash<raii>, std::equal_to<raii>, fancy_allocator<std::pair<raii const, raii> > >* fancy_set; std::initializer_list<std::pair<raii const, raii> > map_init_list{ {raii{0}, raii{0}}, {raii{1}, raii{1}}, {raii{2}, raii{2}}, {raii{3}, raii{3}}, {raii{4}, raii{4}}, {raii{5}, raii{5}}, {raii{6}, raii{6}}, {raii{6}, raii{6}}, {raii{7}, raii{7}}, {raii{8}, raii{8}}, {raii{9}, raii{9}}, {raii{10}, raii{10}}, {raii{9}, raii{9}}, {raii{8}, raii{8}}, {raii{7}, raii{7}}, {raii{6}, raii{6}}, {raii{5}, raii{5}}, {raii{4}, raii{4}}, {raii{3}, raii{3}}, {raii{2}, raii{2}}, {raii{1}, raii{1}}, {raii{0}, raii{0}}, }; std::initializer_list<raii> set_init_list{ raii{0}, raii{1}, raii{2}, raii{3}, raii{4}, raii{5}, raii{6}, raii{6}, raii{7}, raii{8}, raii{9}, raii{10}, raii{9}, raii{8}, raii{7}, raii{6}, raii{5}, raii{4}, raii{3}, raii{2}, raii{1}, raii{0}, }; auto map_and_init_list=std::make_pair(map,map_init_list); auto set_and_init_list=std::make_pair(set,set_init_list); } // namespace using test::default_generator; using test::limited_range; using test::sequential; // clang-format off UNORDERED_TEST( insert_initializer_list, ((map_and_init_list)(set_and_init_list))) UNORDERED_TEST( insert, ((map)(fancy_map)(set)(fancy_set)) ((value_type_generator_factory)(init_type_generator_factory)) ((lvalue_inserter)(rvalue_inserter)(iterator_range_inserter) (norehash_lvalue_inserter)(norehash_rvalue_inserter) (lvalue_insert_or_cvisit)(lvalue_insert_or_visit) (rvalue_insert_or_cvisit)(rvalue_insert_or_visit) (iterator_range_insert_or_cvisit)(iterator_range_insert_or_visit)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( insert, ((map)) ((init_type_generator_factory)) ((lvalue_insert_or_assign_copy_assign)(lvalue_insert_or_assign_move_assign) (rvalue_insert_or_assign_copy_assign)(rvalue_insert_or_assign_move_assign)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( insert, ((trans_map)) ((init_type_generator_factory)) ((trans_insert_or_assign_copy_assign)(trans_insert_or_assign_move_assign)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/exception_helpers.hpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_TEST_CFOA_EXCEPTION_HELPERS_HPP #define BOOST_UNORDERED_TEST_CFOA_EXCEPTION_HELPERS_HPP #include "../helpers/generators.hpp" #include "../helpers/test.hpp" #include "common_helpers.hpp" #include <boost/compat/latch.hpp> #include <boost/container_hash/hash.hpp> #include <boost/core/span.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/unordered/unordered_flat_set.hpp> #include <algorithm> #include <atomic> #include <cmath> #include <condition_variable> #include <cstddef> #include <iostream> #include <mutex> #include <random> #include <thread> #include <type_traits> #include <vector> static std::size_t const num_threads = std::max(2u, std::thread::hardware_concurrency()); std::atomic_bool should_throw{false}; constexpr std::uint32_t throw_threshold = 2300; constexpr std::uint32_t alloc_throw_threshold = 10; void enable_exceptions() { should_throw = true; } void disable_exceptions() { should_throw = false; } struct exception_tag { }; struct stateful_hash { int x_ = -1; static std::atomic<std::uint32_t> c; void throw_helper() const { auto n = ++c; if (should_throw && ((n + 1) % throw_threshold == 0)) { throw exception_tag{}; } } stateful_hash() {} stateful_hash(stateful_hash const& rhs) : x_(rhs.x_) {} stateful_hash(stateful_hash&& rhs) noexcept { auto tmp = x_; x_ = rhs.x_; rhs.x_ = tmp; } stateful_hash(int const x) : x_{x} {} template <class T> std::size_t operator()(T const& t) const { throw_helper(); std::size_t h = static_cast<std::size_t>(x_); boost::hash_combine(h, t); return h; } bool operator==(stateful_hash const& rhs) const { return x_ == rhs.x_; } friend std::ostream& operator<<(std::ostream& os, stateful_hash const& rhs) { os << "{ x_: " << rhs.x_ << " }"; return os; } friend void swap(stateful_hash& lhs, stateful_hash& rhs) noexcept { if (&lhs != &rhs) { std::swap(lhs.x_, rhs.x_); } } }; std::atomic<std::uint32_t> stateful_hash::c{0}; struct stateful_key_equal { int x_ = -1; static std::atomic<std::uint32_t> c; void throw_helper() const { auto n = ++c; if (should_throw && ((n + 1) % throw_threshold == 0)) { throw exception_tag{}; } } stateful_key_equal() = default; stateful_key_equal(stateful_key_equal const&) = default; stateful_key_equal(stateful_key_equal&& rhs) noexcept { auto tmp = x_; x_ = rhs.x_; rhs.x_ = tmp; } stateful_key_equal(int const x) : x_{x} {} template <class T, class U> bool operator()(T const& t, U const& u) const { throw_helper(); return t == u; } bool operator==(stateful_key_equal const& rhs) const { return x_ == rhs.x_; } friend std::ostream& operator<<( std::ostream& os, stateful_key_equal const& rhs) { os << "{ x_: " << rhs.x_ << " }"; return os; } friend void swap(stateful_key_equal& lhs, stateful_key_equal& rhs) noexcept { if (&lhs != &rhs) { std::swap(lhs.x_, rhs.x_); } } }; std::atomic<std::uint32_t> stateful_key_equal::c{0}; static std::atomic<std::uint32_t> allocator_c = {}; template <class T> struct stateful_allocator { int x_ = -1; void throw_helper() const { auto n = ++allocator_c; if (should_throw && ((n + 1) % alloc_throw_threshold == 0)) { throw exception_tag{}; } } using value_type = T; stateful_allocator() = default; stateful_allocator(stateful_allocator const&) = default; stateful_allocator(stateful_allocator&&) = default; stateful_allocator(int const x) : x_{x} {} template <class U> stateful_allocator(stateful_allocator<U> const& rhs) : x_{rhs.x_} { } T* allocate(std::size_t n) { throw_helper(); return static_cast<T*>(::operator new(n * sizeof(T))); } void deallocate(T* p, std::size_t) { ::operator delete(p); } bool operator==(stateful_allocator const& rhs) const { return x_ == rhs.x_; } bool operator!=(stateful_allocator const& rhs) const { return x_ != rhs.x_; } }; struct raii { static std::atomic<std::uint32_t> default_constructor; static std::atomic<std::uint32_t> copy_constructor; static std::atomic<std::uint32_t> move_constructor; static std::atomic<std::uint32_t> destructor; static std::atomic<std::uint32_t> copy_assignment; static std::atomic<std::uint32_t> move_assignment; static std::atomic<std::uint32_t> c; void throw_helper() const { auto n = ++c; if (should_throw && ((n + 1) % throw_threshold == 0)) { throw exception_tag{}; } } int x_ = -1; raii() { throw_helper(); ++default_constructor; } raii(int const x) : x_{x} { throw_helper(); ++default_constructor; } raii(raii const& rhs) : x_{rhs.x_} { throw_helper(); ++copy_constructor; } raii(raii&& rhs) noexcept : x_{rhs.x_} { rhs.x_ = -1; ++move_constructor; } ~raii() { ++destructor; } raii& operator=(raii const& rhs) { throw_helper(); ++copy_assignment; if (this != &rhs) { x_ = rhs.x_; } return *this; } raii& operator=(raii&& rhs) noexcept { ++move_assignment; if (this != &rhs) { x_ = rhs.x_; rhs.x_ = -1; } return *this; } friend bool operator==(raii const& lhs, raii const& rhs) { return lhs.x_ == rhs.x_; } friend bool operator!=(raii const& lhs, raii const& rhs) { return !(lhs == rhs); } friend bool operator==(raii const& lhs, int const x) { return lhs.x_ == x; } friend bool operator!=(raii const& lhs, int const x) { return !(lhs.x_ == x); } friend bool operator==(int const x, raii const& rhs) { return rhs.x_ == x; } friend bool operator!=(int const x, raii const& rhs) { return !(rhs.x_ == x); } friend std::ostream& operator<<(std::ostream& os, raii const& rhs) { os << "{ x_: " << rhs.x_ << " }"; return os; } friend std::ostream& operator<<( std::ostream& os, std::pair<raii const, raii> const& rhs) { os << "pair<" << rhs.first << ", " << rhs.second << ">"; return os; } static void reset_counts() { default_constructor = 0; copy_constructor = 0; move_constructor = 0; destructor = 0; copy_assignment = 0; move_assignment = 0; c = 0; stateful_hash::c = 0; stateful_key_equal::c = 0; allocator_c = 0; } friend void swap(raii& lhs, raii& rhs) { std::swap(lhs.x_, rhs.x_); } }; std::atomic<std::uint32_t> raii::default_constructor{0}; std::atomic<std::uint32_t> raii::copy_constructor{0}; std::atomic<std::uint32_t> raii::move_constructor{0}; std::atomic<std::uint32_t> raii::destructor{0}; std::atomic<std::uint32_t> raii::copy_assignment{0}; std::atomic<std::uint32_t> raii::move_assignment{0}; std::atomic<std::uint32_t> raii::c{0}; std::size_t hash_value(raii const& r) noexcept { boost::hash<int> hasher; return hasher(r.x_); } template <typename K> struct exception_value_generator { using value_type = raii; value_type operator()(test::random_generator rg) { int* p = nullptr; int a = generate(p, rg); return value_type(a); } }; template <typename K, typename V> struct exception_value_generator<std::pair<K, V> > { static constexpr bool const_key = std::is_const<K>::value; static constexpr bool const_mapped = std::is_const<V>::value; using value_type = std::pair< typename std::conditional<const_key, raii const, raii>::type, typename std::conditional<const_mapped, raii const, raii>::type>; value_type operator()(test::random_generator rg) { int* p = nullptr; int a = generate(p, rg); int b = generate(p, rg); return std::make_pair(raii{a}, raii{b}); } }; struct exception_value_type_generator_factory_type { template <typename Container> exception_value_generator<typename Container::value_type> get() { return {}; } } exception_value_type_generator_factory; struct exception_init_type_generator_factory_type { template <typename Container> exception_value_generator<typename Container::init_type> get() { return {}; } } exception_init_type_generator_factory; struct exception_init_type_generator_type { std::pair<raii, raii> operator()(test::random_generator rg) { int* p = nullptr; int a = generate(p, rg); int b = generate(p, rg); return std::make_pair(raii{a}, raii{b}); } } exception_init_type_generator; template <class T> std::vector<boost::span<T> > split( boost::span<T> s, std::size_t const nt /* num threads*/) { std::vector<boost::span<T> > subslices; subslices.reserve(nt); auto a = s.size() / nt; auto b = a; if (s.size() % nt != 0) { ++b; } auto num_a = nt; auto num_b = std::size_t{0}; if (nt * b > s.size()) { num_a = nt * b - s.size(); num_b = nt - num_a; } auto sub_b = s.subspan(0, num_b * b); auto sub_a = s.subspan(num_b * b); for (std::size_t i = 0; i < num_b; ++i) { subslices.push_back(sub_b.subspan(i * b, b)); } for (std::size_t i = 0; i < num_a; ++i) { auto const is_last = i == (num_a - 1); subslices.push_back( sub_a.subspan(i * a, is_last ? boost::dynamic_extent : a)); } return subslices; } template <class T, class F> void thread_runner(std::vector<T>& values, F f) { boost::compat::latch latch(static_cast<std::ptrdiff_t>(num_threads)); std::vector<std::thread> threads; auto subslices = split<T>(values, num_threads); for (std::size_t i = 0; i < num_threads; ++i) { threads.emplace_back([&f, &subslices, i, &latch] { latch.arrive_and_wait(); auto s = subslices[i]; f(s); }); } for (auto& t : threads) { t.join(); } } template <class T> using span_value_type = typename T::value_type; void check_raii_counts() { BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ( raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); } template <class T> void shuffle_values(std::vector<T>& v) { std::random_device rd; std::mt19937 g(rd()); std::shuffle(v.begin(), v.end(), g); } template <class F> auto make_random_values(std::size_t count, F f) -> std::vector<decltype(f())> { using vector_type = std::vector<decltype(f())>; vector_type v; v.reserve(count); for (std::size_t i = 0; i < count; ++i) { v.emplace_back(f()); } return v; } #endif // BOOST_UNORDERED_TEST_CFOA_EXCEPTION_HELPERS_HPP
0
repos/unordered/test
repos/unordered/test/cfoa/rw_spinlock_test6.cpp
// Copyright 2023 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/unordered/detail/foa/rw_spinlock.hpp> #include <boost/compat/shared_lock.hpp> #include <boost/core/lightweight_test.hpp> #include <mutex> #include <thread> #include <cstdio> using boost::unordered::detail::foa::rw_spinlock; static int count = 0; static rw_spinlock sp; void f( int k, int n ) { std::printf( "Thread %d started.\n", k ); int i = 0; for( ;; ++i ) { { boost::compat::shared_lock<rw_spinlock> lock( sp ); if( count >= n ) break; } { std::lock_guard<rw_spinlock> lock( sp ); if( count >= n ) break; ++count; } } std::printf( "Thread %d finished (%i iterations).\n", k, i ); } int main() { int const N = 1000000; // total iterations int const M = 8; // threads std::thread th[ M ]; for( int i = 0; i < M; ++i ) { th[ i ] = std::thread( f, i, N ); } for( int i = 0; i < M; ++i ) { th[ i ].join(); } BOOST_TEST_EQ( count, N ); return boost::report_errors(); }
0
repos/unordered/test
repos/unordered/test/cfoa/swap_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> test::seed_t initialize_seed{996130204}; using test::default_generator; using test::limited_range; using test::sequential; template <class T> struct pocs_allocator { using propagate_on_container_swap = std::true_type; int x_ = -1; using value_type = T; pocs_allocator() = default; pocs_allocator(pocs_allocator const&) = default; pocs_allocator(pocs_allocator&&) = default; pocs_allocator(int const x) : x_{x} {} pocs_allocator& operator=(pocs_allocator const& rhs) { if (this != &rhs) { x_ = rhs.x_; } return *this; } template <class U> pocs_allocator(pocs_allocator<U> const& rhs) : x_{rhs.x_} { } T* allocate(std::size_t n) { return static_cast<T*>(::operator new(n * sizeof(T))); } void deallocate(T* p, std::size_t) { ::operator delete(p); } bool operator==(pocs_allocator const& rhs) const { return x_ == rhs.x_; } bool operator!=(pocs_allocator const& rhs) const { return x_ != rhs.x_; } friend void swap(pocs_allocator& lhs, pocs_allocator& rhs) noexcept { std::swap(lhs.x_, rhs.x_); } }; using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal, stateful_allocator<std::pair<raii const, raii> > >; using set_type = boost::unordered::concurrent_flat_set<raii, hasher, key_equal, stateful_allocator<raii> >; template <class T> struct is_nothrow_member_swappable { static bool const value = noexcept(std::declval<T&>().swap(std::declval<T&>())); }; BOOST_STATIC_ASSERT(is_nothrow_member_swappable< replace_allocator<map_type, std::allocator> >::value); BOOST_STATIC_ASSERT(is_nothrow_member_swappable< replace_allocator<map_type, pocs_allocator> >::value); BOOST_STATIC_ASSERT(!is_nothrow_member_swappable<map_type>::value); BOOST_STATIC_ASSERT(is_nothrow_member_swappable< replace_allocator<set_type, std::allocator> >::value); BOOST_STATIC_ASSERT(is_nothrow_member_swappable< replace_allocator<set_type, pocs_allocator> >::value); BOOST_STATIC_ASSERT(!is_nothrow_member_swappable<set_type>::value); namespace { struct { template <class T> void operator()(T& x1, T& x2) const { x1.swap(x2); } } member_fn_swap; struct { template <class T> void operator()(T& x1, T& x2) const { using boost::unordered::swap; swap(x1, x2); } } free_fn_swap; template <class X, class F, class GF> void swap_tests(X*, F swapper, GF gen_factory, test::random_generator rg) { using value_type = typename X::value_type; using allocator_type = typename X::allocator_type; bool const pocs = boost::allocator_propagate_on_container_swap< allocator_type>::type::value; auto gen = gen_factory.template get<X>(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); auto vals2 = make_random_values(1024 * 4, [&] { return gen(rg); }); auto reference_cont1 = reference_container<X>(vals1.begin(), vals1.end()); auto reference_cont2 = reference_container<X>(vals2.begin(), vals2.end()); { raii::reset_counts(); X x1(vals1.begin(), vals1.end(), vals1.size(), hasher(1), key_equal(2), allocator_type(3)); X x2(vals2.begin(), vals2.end(), vals2.size(), hasher(2), key_equal(1), pocs ? allocator_type(4) : allocator_type(3)); if (pocs) { BOOST_TEST(x1.get_allocator() != x2.get_allocator()); } else { BOOST_TEST(x1.get_allocator() == x2.get_allocator()); } auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; thread_runner(vals1, [&x1, &x2, swapper](boost::span<value_type> s) { (void)s; swapper(x1, x2); swapper(x2, x1); }); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); if (pocs) { if (x1.get_allocator() == allocator_type(3)) { BOOST_TEST(x2.get_allocator() == allocator_type(4)); } else { BOOST_TEST(x1.get_allocator() == allocator_type(4)); BOOST_TEST(x2.get_allocator() == allocator_type(3)); } } else { BOOST_TEST(x1.get_allocator() == allocator_type(3)); BOOST_TEST(x1.get_allocator() == x2.get_allocator()); } if (x1.size() == reference_cont1.size()) { test_matches_reference(x1, reference_cont1); test_matches_reference(x2, reference_cont2); BOOST_TEST_EQ(x1.hash_function(), hasher(1)); BOOST_TEST_EQ(x1.key_eq(), key_equal(2)); BOOST_TEST_EQ(x2.hash_function(), hasher(2)); BOOST_TEST_EQ(x2.key_eq(), key_equal(1)); } else { test_matches_reference(x2, reference_cont1); test_matches_reference(x1, reference_cont2); BOOST_TEST_EQ(x1.hash_function(), hasher(2)); BOOST_TEST_EQ(x1.key_eq(), key_equal(1)); BOOST_TEST_EQ(x2.hash_function(), hasher(1)); BOOST_TEST_EQ(x2.key_eq(), key_equal(2)); } } check_raii_counts(); } template <class X, class F, class GF> void insert_and_swap( X*, F swapper, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); auto vals2 = make_random_values(1024 * 4, [&] { return gen(rg); }); { raii::reset_counts(); X x1(vals1.size(), hasher(1), key_equal(2), allocator_type(3)); X x2(vals2.size(), hasher(2), key_equal(1), allocator_type(3)); std::thread t1, t2, t3; boost::compat::latch l(2); std::mutex m; std::condition_variable cv; std::atomic_bool done1{false}, done2{false}; std::atomic<unsigned> num_swaps{0}; bool ready = false; t1 = std::thread([&x1, &vals1, &l, &done1, &cv, &ready, &m] { l.arrive_and_wait(); for (std::size_t idx = 0; idx < vals1.size(); ++idx) { auto const& val = vals1[idx]; x1.insert(val); if (idx % (vals1.size() / 128) == 0) { { std::unique_lock<std::mutex> lk(m); ready = true; } cv.notify_all(); } std::this_thread::yield(); } done1 = true; { std::unique_lock<std::mutex> lk(m); ready = true; } cv.notify_all(); }); t2 = std::thread([&x2, &vals2, &l, &done2, &ready, &cv, &m] { l.arrive_and_wait(); for (std::size_t idx = 0; idx < vals2.size(); ++idx) { auto const& val = vals2[idx]; x2.insert(val); if (idx % 100 == 0) { std::this_thread::yield(); } } done2 = true; { std::unique_lock<std::mutex> lk(m); ready = true; } cv.notify_all(); }); t3 = std::thread( [&x1, &x2, &m, &cv, &done1, &done2, &num_swaps, swapper, &ready] { do { { std::unique_lock<std::mutex> lk(m); cv.wait(lk, [&ready] { return ready; }); ready = false; } swapper(x1, x2); ++num_swaps; std::this_thread::yield(); } while (!done1 || !done2); BOOST_TEST(done1); BOOST_TEST(done2); }); t1.join(); t2.join(); t3.join(); BOOST_TEST_GT(num_swaps, 0u); if (x1.hash_function() == hasher(1)) { BOOST_TEST_EQ(x1.key_eq(), key_equal(2)); BOOST_TEST_EQ(x2.hash_function(), hasher(2)); BOOST_TEST_EQ(x2.key_eq(), key_equal(1)); } else { BOOST_TEST_EQ(x1.hash_function(), hasher(2)); BOOST_TEST_EQ(x1.key_eq(), key_equal(1)); BOOST_TEST_EQ(x2.hash_function(), hasher(1)); BOOST_TEST_EQ(x2.key_eq(), key_equal(2)); } } check_raii_counts(); } map_type* map; replace_allocator<map_type, pocs_allocator>* pocs_map; set_type* set; replace_allocator<set_type, pocs_allocator>* pocs_set; } // namespace // clang-format off UNORDERED_TEST( swap_tests, ((map)(pocs_map)(set)(pocs_set)) ((member_fn_swap)(free_fn_swap)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST(insert_and_swap, ((map)(set)) ((member_fn_swap)(free_fn_swap)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/assign_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> #if defined(__clang__) && defined(__has_warning) #if __has_warning("-Wself-assign-overloaded") #pragma clang diagnostic ignored "-Wself-assign-overloaded" #endif #if __has_warning("-Wself-move") #pragma clang diagnostic ignored "-Wself-move" #endif #endif /* defined(__clang__) && defined(__has_warning) */ #if defined(BOOST_GCC) && BOOST_GCC >= 130000 #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wself-move" #endif test::seed_t initialize_seed{2762556623}; using test::default_generator; using test::limited_range; using test::sequential; using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal, stateful_allocator<std::pair<raii const, raii> > >; using set_type = boost::unordered::concurrent_flat_set<raii, hasher, key_equal, stateful_allocator<raii> >; using fancy_map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal, stateful_allocator2<std::pair<raii const, raii> > >; using fancy_set_type = boost::unordered::concurrent_flat_set<raii, hasher, key_equal, stateful_allocator2<raii> >; map_type* test_map; set_type* test_set; fancy_map_type* fancy_test_map; fancy_set_type* fancy_test_set; std::initializer_list<map_type::value_type> map_init_list{ {raii{0}, raii{0}}, {raii{1}, raii{1}}, {raii{2}, raii{2}}, {raii{3}, raii{3}}, {raii{4}, raii{4}}, {raii{5}, raii{5}}, {raii{6}, raii{6}}, {raii{6}, raii{6}}, {raii{7}, raii{7}}, {raii{8}, raii{8}}, {raii{9}, raii{9}}, {raii{10}, raii{10}}, {raii{9}, raii{9}}, {raii{8}, raii{8}}, {raii{7}, raii{7}}, {raii{6}, raii{6}}, {raii{5}, raii{5}}, {raii{4}, raii{4}}, {raii{3}, raii{3}}, {raii{2}, raii{2}}, {raii{1}, raii{1}}, {raii{0}, raii{0}}, }; std::initializer_list<set_type::value_type> set_init_list{ raii{0}, raii{1}, raii{2}, raii{3}, raii{4}, raii{5}, raii{6}, raii{6}, raii{7}, raii{8}, raii{9}, raii{10}, raii{9}, raii{8}, raii{7}, raii{6}, raii{5}, raii{4}, raii{3}, raii{2}, raii{1}, raii{0}, }; auto test_map_and_init_list=std::make_pair(test_map,map_init_list); auto test_set_and_init_list=std::make_pair(test_set,set_init_list); template <class T,bool POCCA, bool POCMA> struct poca_allocator: fancy_allocator<T> { using super = fancy_allocator<T>; using pointer = typename super::pointer; using propagate_on_container_copy_assignment = std::integral_constant<bool, POCCA>; using propagate_on_container_move_assignment = std::integral_constant<bool, POCMA>; int x_ = -1; template <class U> struct rebind { typedef poca_allocator<U, POCCA, POCMA> other; }; poca_allocator() = default; poca_allocator(poca_allocator const&) = default; poca_allocator(poca_allocator &&) = default; poca_allocator(int const x) : x_{x} {} poca_allocator& operator=(poca_allocator const& rhs) { if (this != &rhs) { super::operator=(rhs); x_ = rhs.x_; } return *this; } template <class U> poca_allocator( poca_allocator<U, POCCA, POCMA> const& rhs) : super{rhs}, x_{rhs.x_} { } pointer allocate(std::size_t n) { auto p = super::allocate(n + 1); reinterpret_cast<char&>(*p) = static_cast<char>(x_); return p + std::ptrdiff_t(1); } void deallocate(pointer p, std::size_t n) { p = p + std::ptrdiff_t(-1); BOOST_TEST_EQ(reinterpret_cast<char&>(*p), static_cast<char>(x_)); super::deallocate(p, n + 1); } bool operator==(poca_allocator const& rhs) const { return x_ == rhs.x_; } bool operator!=(poca_allocator const& rhs) const { return x_ != rhs.x_; } }; template <class T> struct pocca_allocator: poca_allocator<T, true, false> { pocca_allocator() = default; pocca_allocator(pocca_allocator const&) = default; pocca_allocator(pocca_allocator &&) = default; using poca_allocator<T, true, false>::poca_allocator; pocca_allocator& operator=(pocca_allocator const&) = default; }; template <class T> struct pocma_allocator: poca_allocator<T, false, true> { pocma_allocator() = default; pocma_allocator(pocma_allocator const&) = default; pocma_allocator(pocma_allocator &&) = default; using poca_allocator<T, false, true>::poca_allocator; pocma_allocator& operator=(pocma_allocator const&) = default; }; namespace { template <class X, class GF> void copy_assign(X*, GF gen_factory, test::random_generator rg) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); // lhs empty, rhs empty { raii::reset_counts(); X x(0, hasher(1), key_equal(2), allocator_type(3)); thread_runner(values, [&x](boost::span<value_type> s) { (void)s; X y; BOOST_TEST(x.empty()); BOOST_TEST(y.empty()); y = x; BOOST_TEST_EQ(x.hash_function(), y.hash_function()); BOOST_TEST_EQ(x.key_eq(), y.key_eq()); BOOST_TEST(x.get_allocator() != y.get_allocator()); }); BOOST_TEST_EQ(raii::destructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_constructor, 0u); } // lhs non-empty, rhs empty { raii::reset_counts(); X x(0, hasher(1), key_equal(2), allocator_type(3)); auto const old_size = reference_cont.size(); thread_runner(values, [&x, &values](boost::span<value_type> s) { (void)s; X y(values.size()); for (auto const& v : values) { y.insert(v); } BOOST_TEST(x.empty()); BOOST_TEST(!y.empty()); y = x; BOOST_TEST_EQ(x.hash_function(), y.hash_function()); BOOST_TEST_EQ(x.key_eq(), y.key_eq()); BOOST_TEST(x.get_allocator() != y.get_allocator()); BOOST_TEST(y.empty()); }); BOOST_TEST_EQ( raii::destructor, num_threads * (value_type_cardinality * old_size)); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( raii::copy_constructor, num_threads * value_type_cardinality * reference_cont.size()); } check_raii_counts(); // lhs empty, rhs non-empty { raii::reset_counts(); X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_cc = +raii::copy_constructor; thread_runner( values, [&x, &reference_cont](boost::span<value_type> s) { (void)s; X y; BOOST_TEST(!x.empty()); BOOST_TEST(y.empty()); y = x; BOOST_TEST_EQ(x.hash_function(), y.hash_function()); BOOST_TEST_EQ(x.key_eq(), y.key_eq()); BOOST_TEST(x.get_allocator() != y.get_allocator()); test_matches_reference(y, reference_cont); }); BOOST_TEST_EQ( raii::destructor, num_threads * value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( raii::copy_constructor, old_cc + (num_threads * value_type_cardinality * x.size())); } check_raii_counts(); // lhs non-empty, rhs non-empty { raii::reset_counts(); X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_size = x.size(); auto const old_cc = +raii::copy_constructor; thread_runner(values, [&x, &values](boost::span<value_type> s) { (void)s; X y(values.size()); for (auto const& v : values) { y.insert(v); } BOOST_TEST(!x.empty()); BOOST_TEST(!y.empty()); y = x; BOOST_TEST_EQ(x.hash_function(), y.hash_function()); BOOST_TEST_EQ(x.key_eq(), y.key_eq()); BOOST_TEST(x.get_allocator() != y.get_allocator()); }); BOOST_TEST_EQ( raii::destructor, 2 * num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( raii::copy_constructor, old_cc + (2 * num_threads * value_type_cardinality * x.size())); } check_raii_counts(); // self-assign { raii::reset_counts(); X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_cc = +raii::copy_constructor; thread_runner( values, [&x, &reference_cont](boost::span<value_type> s) { (void)s; BOOST_TEST(!x.empty()); x = x; BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); test_matches_reference(x, reference_cont); }); BOOST_TEST_EQ(raii::destructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_constructor, old_cc); } check_raii_counts(); // propagation { using pocca_container_type = replace_allocator<X, pocca_allocator>; using pocca_allocator_type = typename pocca_container_type::allocator_type; raii::reset_counts(); pocca_container_type x( values.size(), hasher(1), key_equal(2), pocca_allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_size = x.size(); auto const old_cc = +raii::copy_constructor; thread_runner(values, [&x, &values](boost::span<value_type> s) { (void)s; pocca_container_type y(values.size()); for (auto const& v : values) { y.insert(v); } BOOST_TEST(!x.empty()); BOOST_TEST(!y.empty()); BOOST_TEST(x.get_allocator() != y.get_allocator()); y = x; BOOST_TEST_EQ(x.hash_function(), y.hash_function()); BOOST_TEST_EQ(x.key_eq(), y.key_eq()); BOOST_TEST(x.get_allocator() == y.get_allocator()); }); BOOST_TEST_EQ( raii::destructor, 2 * num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( raii::copy_constructor, old_cc + (2 * num_threads * value_type_cardinality * x.size())); } check_raii_counts(); } template <class X, class GF> void move_assign(X*, GF gen_factory, test::random_generator rg) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; using allocator_type = typename X::allocator_type; using pocma_container_type = replace_allocator<X, pocma_allocator>; using pocma_allocator_type = typename pocma_container_type::allocator_type; auto gen = gen_factory.template get<X>(); BOOST_STATIC_ASSERT( std::is_nothrow_move_assignable< replace_allocator<X, std::allocator> >::value); BOOST_STATIC_ASSERT( !std::is_nothrow_move_assignable< replace_allocator<X, stateful_allocator> >::value); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); // move assignment has more complex requirements than copying // equal allocators: // lhs empty, rhs non-empty // lhs non-empty, rhs empty // lhs non-empty, rhs non-empty // // unequal allocators: // lhs non-empty, rhs non-empty // // pocma // self move-assign // lhs empty, rhs empty { raii::reset_counts(); X x(0, hasher(1), key_equal(2), allocator_type(3)); std::atomic<unsigned> num_transfers{0}; thread_runner( values, [&x, &num_transfers](boost::span<value_type> s) { (void)s; X y(0, hasher(2), key_equal(1), allocator_type(3)); BOOST_TEST(x.empty()); BOOST_TEST(y.empty()); BOOST_TEST(x.get_allocator() == y.get_allocator()); y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.hash_function(), hasher(2)); BOOST_TEST_EQ(y.key_eq(), key_equal(1)); } BOOST_TEST_EQ(x.hash_function(), hasher(2)); BOOST_TEST_EQ(x.key_eq(), key_equal(1)); BOOST_TEST(x.get_allocator() == y.get_allocator()); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ(raii::destructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_constructor, 0u); } // lhs non-empty, rhs empty { raii::reset_counts(); X x(0, hasher(1), key_equal(2), allocator_type(3)); std::atomic<unsigned> num_transfers{0}; thread_runner( values, [&x, &values, &num_transfers](boost::span<value_type> s) { (void)s; X y(values.size(), hasher(2), key_equal(1), allocator_type(3)); for (auto const& v : values) { y.insert(v); } BOOST_TEST(x.empty()); BOOST_TEST(!y.empty()); BOOST_TEST(x.get_allocator() == y.get_allocator()); y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.hash_function(), hasher(2)); BOOST_TEST_EQ(y.key_eq(), key_equal(1)); } BOOST_TEST_EQ(x.hash_function(), hasher(2)); BOOST_TEST_EQ(x.key_eq(), key_equal(1)); BOOST_TEST(x.get_allocator() == y.get_allocator()); BOOST_TEST(y.empty()); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( raii::destructor, num_threads * value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( raii::copy_constructor, num_threads * value_type_cardinality * reference_cont.size()); } check_raii_counts(); // lhs empty, rhs non-empty { raii::reset_counts(); X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; std::atomic<unsigned> num_transfers{0}; thread_runner(values, [&x, &reference_cont, &num_transfers](boost::span<value_type> s) { (void)s; X y(allocator_type(3)); BOOST_TEST(y.empty()); BOOST_TEST(x.get_allocator() == y.get_allocator()); y = std::move(x); if (!y.empty()) { ++num_transfers; test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.hash_function(), hasher()); BOOST_TEST_EQ(y.key_eq(), key_equal()); } BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); BOOST_TEST(x.get_allocator() == y.get_allocator()); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( raii::destructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); } check_raii_counts(); // lhs non-empty, rhs non-empty { raii::reset_counts(); X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_size = x.size(); auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; std::atomic<unsigned> num_transfers{0}; thread_runner(values, [&x, &values, &num_transfers, &reference_cont]( boost::span<value_type> s) { (void)s; X y(values.size(), hasher(2), key_equal(1), allocator_type(3)); for (auto const& v : values) { y.insert(v); } BOOST_TEST(!y.empty()); BOOST_TEST(x.get_allocator() == y.get_allocator()); y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.hash_function(), hasher(2)); BOOST_TEST_EQ(y.key_eq(), key_equal(1)); } BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.hash_function(), hasher(2)); BOOST_TEST_EQ(x.key_eq(), key_equal(1)); BOOST_TEST(x.get_allocator() == y.get_allocator()); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( raii::destructor, value_type_cardinality * old_size + num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::copy_constructor, old_cc + (num_threads * value_type_cardinality * reference_cont.size())); } check_raii_counts(); // lhs non-empty, rhs non-empty, unequal allocators, no propagation { raii::reset_counts(); X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_size = x.size(); auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; std::atomic<unsigned> num_transfers{0}; thread_runner(values, [&x, &values, &num_transfers, &reference_cont]( boost::span<value_type> s) { (void)s; X y(values.size(), hasher(2), key_equal(1), allocator_type(13)); for (auto const& v : values) { y.insert(v); } BOOST_TEST( !boost::allocator_is_always_equal<allocator_type>::type::value); BOOST_TEST(!boost::allocator_propagate_on_container_move_assignment< allocator_type>::type::value); BOOST_TEST(!y.empty()); BOOST_TEST(x.get_allocator() != y.get_allocator()); y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.hash_function(), hasher(2)); BOOST_TEST_EQ(y.key_eq(), key_equal(1)); } BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.hash_function(), hasher(2)); BOOST_TEST_EQ(x.key_eq(), key_equal(1)); BOOST_TEST(x.get_allocator() != y.get_allocator()); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( raii::destructor, 2 * value_type_cardinality * old_size + num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( raii::move_constructor, old_mc + value_type_cardinality * old_size); BOOST_TEST_EQ( raii::copy_constructor, old_cc + (num_threads * value_type_cardinality * reference_cont.size())); } check_raii_counts(); // lhs non-empty, rhs non-empty, pocma { raii::reset_counts(); pocma_container_type x( values.size(), hasher(1), key_equal(2), pocma_allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_size = x.size(); auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; std::atomic<unsigned> num_transfers{0}; thread_runner(values, [&x, &values, &num_transfers, &reference_cont]( boost::span<value_type> s) { (void)s; pocma_container_type y( values.size(), hasher(2), key_equal(1), pocma_allocator_type(13)); for (auto const& v : values) { y.insert(v); } BOOST_TEST(!y.empty()); BOOST_TEST(x.get_allocator() != y.get_allocator()); y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { BOOST_TEST_EQ(y.hash_function(), hasher(2)); BOOST_TEST_EQ(y.key_eq(), key_equal(1)); } BOOST_TEST(x.empty()); BOOST_TEST_EQ(x.hash_function(), hasher(2)); BOOST_TEST_EQ(x.key_eq(), key_equal(1)); BOOST_TEST(x.get_allocator() == y.get_allocator()); }); BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( raii::destructor, value_type_cardinality * old_size + num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::copy_constructor, old_cc + (num_threads * value_type_cardinality * reference_cont.size())); } check_raii_counts(); // self-assign { raii::reset_counts(); X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; thread_runner( values, [&x, &reference_cont](boost::span<value_type> s) { (void)s; x = std::move(x); BOOST_TEST(!x.empty()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); test_matches_reference(x, reference_cont); }); BOOST_TEST_EQ(raii::destructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); } check_raii_counts(); } template <class X, class IL> void initializer_list_assign(std::pair<X*, IL> p) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; using allocator_type = typename X::allocator_type; auto init_list = p.second; auto reference_cont = reference_container<X>( init_list.begin(), init_list.end()); auto v = std::vector<value_type>(init_list.begin(), init_list.end()); { raii::reset_counts(); X x(0, hasher(1), key_equal(2), allocator_type(3)); thread_runner(v, [&x, &init_list](boost::span<value_type> s) { (void)s; x = init_list; }); test_matches_reference(x, reference_cont); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ( raii::copy_constructor, num_threads * value_type_cardinality * x.size()); BOOST_TEST_EQ( raii::destructor, (num_threads - 1) * value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } check_raii_counts(); } template <class X, class GF> void insert_and_assign(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); std::thread t1, t2, t3; boost::compat::latch start_latch(2), end_latch(2); auto v1 = make_random_values(1024 * 16, [&] { return gen(rg); }); auto v2 = v1; shuffle_values(v2); auto reference_cont = reference_container<X>(v1.begin(), v1.end()); raii::reset_counts(); { X c1(v1.size(), hasher(1), key_equal(2), allocator_type(3)); X c2(v2.size(), hasher(1), key_equal(2), allocator_type(3)); t1 = std::thread([&v1, &c1, &start_latch, &end_latch] { start_latch.arrive_and_wait(); for (auto const& v : v1) { c1.insert(v); } end_latch.arrive_and_wait(); }); t2 = std::thread([&v2, &c2, &end_latch, &start_latch] { start_latch.arrive_and_wait(); for (auto const& v : v2) { c2.insert(v); } end_latch.arrive_and_wait(); }); std::atomic<unsigned> num_assignments{0}; t3 = std::thread([&c1, &c2, &end_latch, &num_assignments] { while (c1.empty() && c2.empty()) { std::this_thread::sleep_for(std::chrono::microseconds(10)); } do { c1 = c2; std::this_thread::sleep_for(std::chrono::milliseconds(100)); c2 = c1; std::this_thread::sleep_for(std::chrono::milliseconds(100)); ++num_assignments; } while (!end_latch.try_wait()); }); t1.join(); t2.join(); t3.join(); BOOST_TEST_GT(num_assignments, 0u); test_fuzzy_matches_reference(c1, reference_cont, rg); test_fuzzy_matches_reference(c2, reference_cont, rg); } check_raii_counts(); } template <class X, class GF> void flat_move_assign(X*, GF gen_factory, test::random_generator rg) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); /* * basically test that a temporary container is materialized and we * move-assign from that * * we don't need to be super rigorous here because we already have tests for * container assignment, we're just testing that a temporary is materialized */ { raii::reset_counts(); flat_container<X> flat(values.begin(), values.end(), values.size(), hasher(1), key_equal(2), allocator_type(3)); X x(0, hasher(2), key_equal(1), allocator_type(3)); BOOST_TEST(flat.get_allocator() == x.get_allocator()); x = std::move(flat); BOOST_TEST(flat.empty()); BOOST_TEST_EQ(x.size(), reference_cont.size()); test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::destructor, 0u); BOOST_TEST_EQ(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } check_raii_counts(); { raii::reset_counts(); X x(values.begin(), values.end(), values.size(), hasher(1), key_equal(2), allocator_type(3)); flat_container<X> flat(0, hasher(2), key_equal(1), allocator_type(3)); BOOST_TEST(flat.get_allocator() == x.get_allocator()); flat = std::move(x); BOOST_TEST(x.empty()); BOOST_TEST_EQ(flat.size(), reference_cont.size()); BOOST_TEST_EQ(flat.hash_function(), hasher(1)); BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::destructor, 0u); BOOST_TEST_EQ(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } check_raii_counts(); { raii::reset_counts(); flat_container<X> flat(values.begin(), values.end(), values.size(), hasher(1), key_equal(2), allocator_type(3)); X x(0, hasher(2), key_equal(1), allocator_type(4)); BOOST_TEST(flat.get_allocator() != x.get_allocator()); x = std::move(flat); BOOST_TEST(flat.empty()); BOOST_TEST_EQ(x.size(), reference_cont.size()); test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ( raii::destructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ( raii::move_constructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } check_raii_counts(); { raii::reset_counts(); X x(values.begin(), values.end(), values.size(), hasher(1), key_equal(2), allocator_type(3)); flat_container<X> flat(0, hasher(2), key_equal(1), allocator_type(4)); BOOST_TEST(flat.get_allocator() != x.get_allocator()); flat = std::move(x); BOOST_TEST(x.empty()); BOOST_TEST_EQ(flat.size(), reference_cont.size()); BOOST_TEST_EQ(flat.hash_function(), hasher(1)); BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); BOOST_TEST_EQ( raii::copy_constructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ( raii::destructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ( raii::move_constructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } check_raii_counts(); } } // namespace // clang-format off UNORDERED_TEST( copy_assign, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_assign, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( initializer_list_assign, ((test_map_and_init_list)(test_set_and_init_list))) UNORDERED_TEST( insert_and_assign, ((test_map)(test_set)) ((init_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( flat_move_assign, ((test_map)(test_set)(fancy_test_map)(fancy_test_set)) ((init_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/helpers.hpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Copyright (C) 2024 Braden Ganetsky // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_TEST_CFOA_HELPERS_HPP #define BOOST_UNORDERED_TEST_CFOA_HELPERS_HPP #include "../helpers/generators.hpp" #include "../helpers/test.hpp" #include "common_helpers.hpp" #include <boost/compat/latch.hpp> #include <boost/container_hash/hash.hpp> #include <boost/core/span.hpp> #include <boost/unordered/concurrent_flat_map_fwd.hpp> #include <boost/unordered/concurrent_flat_set_fwd.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/unordered/unordered_flat_set.hpp> #include <algorithm> #include <atomic> #include <cmath> #include <condition_variable> #include <cstddef> #include <iostream> #include <mutex> #include <random> #include <thread> #include <type_traits> #include <vector> static std::size_t const num_threads = std::max(2u, std::thread::hardware_concurrency()); struct transp_hash { using is_transparent = void; template <class T> std::size_t operator()(T const& t) const noexcept { return boost::hash<T>()(t); } }; struct transp_key_equal { using is_transparent = void; template <class T, class U> bool operator()(T const& lhs, U const& rhs) const { return lhs == rhs; } }; struct stateful_hash { int x_ = -1; stateful_hash() = default; stateful_hash(stateful_hash const&) = default; stateful_hash(stateful_hash&& rhs) noexcept { auto tmp = x_; x_ = rhs.x_; rhs.x_ = tmp; } stateful_hash(int const x) : x_{x} {} template <class T> std::size_t operator()(T const& t) const noexcept { std::size_t h = static_cast<std::size_t>(x_); boost::hash_combine(h, t); return h; } bool operator==(stateful_hash const& rhs) const { return x_ == rhs.x_; } friend std::ostream& operator<<(std::ostream& os, stateful_hash const& rhs) { os << "{ x_: " << rhs.x_ << " }"; return os; } friend void swap(stateful_hash& lhs, stateful_hash& rhs) noexcept { if (&lhs != &rhs) { std::swap(lhs.x_, rhs.x_); } } }; struct stateful_key_equal { int x_ = -1; stateful_key_equal() = default; stateful_key_equal(stateful_key_equal const&) = default; stateful_key_equal(stateful_key_equal&& rhs) noexcept { auto tmp = x_; x_ = rhs.x_; rhs.x_ = tmp; } stateful_key_equal(int const x) : x_{x} {} template <class T, class U> bool operator()(T const& t, U const& u) const { return t == u; } bool operator==(stateful_key_equal const& rhs) const { return x_ == rhs.x_; } friend std::ostream& operator<<( std::ostream& os, stateful_key_equal const& rhs) { os << "{ x_: " << rhs.x_ << " }"; return os; } friend void swap(stateful_key_equal& lhs, stateful_key_equal& rhs) noexcept { if (&lhs != &rhs) { std::swap(lhs.x_, rhs.x_); } } }; template <class T> struct cfoa_ptr { private: template <class> friend struct stateful_allocator2; T* p_ = nullptr; cfoa_ptr(T* p) : p_(p) {} public: using element_type = T; cfoa_ptr() = default; cfoa_ptr(std::nullptr_t) : p_(nullptr){}; template <class U> using rebind = cfoa_ptr<U>; T* operator->() const noexcept { return p_; } static cfoa_ptr<T> pointer_to(element_type& r) { return {std::addressof(r)}; } }; template <class T> struct stateful_allocator { int x_ = -1; using value_type = T; stateful_allocator() = default; stateful_allocator(stateful_allocator const&) = default; stateful_allocator(stateful_allocator&&) = default; stateful_allocator(int const x) : x_{x} {} template <class U> stateful_allocator(stateful_allocator<U> const& rhs) : x_{rhs.x_} { } T* allocate(std::size_t n) { return static_cast<T*>(::operator new(n * sizeof(T))); } void deallocate(T* p, std::size_t) { ::operator delete(p); } bool operator==(stateful_allocator const& rhs) const { return x_ == rhs.x_; } bool operator!=(stateful_allocator const& rhs) const { return x_ != rhs.x_; } }; template <class T> struct stateful_allocator2 { int x_ = -1; using value_type = T; using pointer = cfoa_ptr<T>; stateful_allocator2() = default; stateful_allocator2(stateful_allocator2 const&) = default; stateful_allocator2(stateful_allocator2&&) = default; stateful_allocator2(int const x) : x_{x} {} template <class U> stateful_allocator2(stateful_allocator2<U> const& rhs) : x_{rhs.x_} { } pointer allocate(std::size_t n) { return {static_cast<T*>(::operator new(n * sizeof(T)))}; } void deallocate(pointer p, std::size_t) { ::operator delete(p.p_); } bool operator==(stateful_allocator2 const& rhs) const { return x_ == rhs.x_; } bool operator!=(stateful_allocator2 const& rhs) const { return x_ != rhs.x_; } }; template <class Tag> struct basic_raii { static std::atomic<std::uint32_t> default_constructor; static std::atomic<std::uint32_t> copy_constructor; static std::atomic<std::uint32_t> move_constructor; static std::atomic<std::uint32_t> destructor; static std::atomic<std::uint32_t> copy_assignment; static std::atomic<std::uint32_t> move_assignment; int x_ = -1; basic_raii() { ++default_constructor; } basic_raii(int const x) : x_{x} { ++default_constructor; } basic_raii(basic_raii const& rhs) : x_{rhs.x_} { ++copy_constructor; } basic_raii(basic_raii&& rhs) noexcept : x_{rhs.x_} { rhs.x_ = -1; ++move_constructor; } ~basic_raii() { ++destructor; } basic_raii& operator=(basic_raii const& rhs) { ++copy_assignment; if (this != &rhs) { x_ = rhs.x_; } return *this; } basic_raii& operator=(basic_raii&& rhs) noexcept { ++move_assignment; if (this != &rhs) { x_ = rhs.x_; rhs.x_ = -1; } return *this; } friend bool operator==(basic_raii const& lhs, basic_raii const& rhs) { return lhs.x_ == rhs.x_; } friend bool operator!=(basic_raii const& lhs, basic_raii const& rhs) { return !(lhs == rhs); } friend bool operator==(basic_raii const& lhs, int const x) { return lhs.x_ == x; } friend bool operator!=(basic_raii const& lhs, int const x) { return !(lhs.x_ == x); } friend bool operator==(int const x, basic_raii const& rhs) { return rhs.x_ == x; } friend bool operator!=(int const x, basic_raii const& rhs) { return !(rhs.x_ == x); } friend std::ostream& operator<<(std::ostream& os, basic_raii const& rhs) { os << "{ x_: " << rhs.x_ << " }"; return os; } friend std::ostream& operator<<( std::ostream& os, std::pair<basic_raii const, basic_raii> const& rhs) { os << "pair<" << rhs.first << ", " << rhs.second << ">"; return os; } static void reset_counts() { default_constructor = 0; copy_constructor = 0; move_constructor = 0; destructor = 0; copy_assignment = 0; move_assignment = 0; } friend void swap(basic_raii& lhs, basic_raii& rhs) { std::swap(lhs.x_, rhs.x_); } }; template <class Tag> std::atomic<std::uint32_t> basic_raii<Tag>::default_constructor(0); template <class Tag> std::atomic<std::uint32_t> basic_raii<Tag>::copy_constructor(0); template <class Tag> std::atomic<std::uint32_t> basic_raii<Tag>::move_constructor(0); template <class Tag> std::atomic<std::uint32_t> basic_raii<Tag>::destructor(0); template <class Tag> std::atomic<std::uint32_t> basic_raii<Tag>::copy_assignment(0); template <class Tag> std::atomic<std::uint32_t> basic_raii<Tag>::move_assignment(0); struct raii_tag_ { }; class raii : public basic_raii<raii_tag_> { using basic_raii::basic_raii; }; template <class Tag> std::size_t hash_value(basic_raii<Tag> const& r) noexcept { boost::hash<int> hasher; return hasher(r.x_); } std::size_t hash_value(raii const& r) noexcept { boost::hash<int> hasher; return hasher(r.x_); } namespace std { template <class Tag> struct hash<basic_raii<Tag>> { std::size_t operator()(basic_raii<Tag> const& r) const noexcept { return hash_value(r); } }; template <> struct hash<raii> { std::size_t operator()(raii const& r) const noexcept { return hash_value(r); } }; } // namespace std template <class F> auto make_random_values(std::size_t count, F f) -> std::vector<decltype(f())> { using vector_type = std::vector<decltype(f())>; vector_type v; v.reserve(count); for (std::size_t i = 0; i < count; ++i) { v.emplace_back(f()); } return v; } template <typename K> struct value_generator { using value_type = raii; value_type operator()(test::random_generator rg) { int* p = nullptr; int a = generate(p, rg); return value_type(a); } }; template <typename K, typename V> struct value_generator<std::pair<K, V> > { static constexpr bool const_key = std::is_const<K>::value; static constexpr bool const_mapped = std::is_const<V>::value; using value_type = std::pair< typename std::conditional<const_key, raii const, raii>::type, typename std::conditional<const_mapped, raii const, raii>::type>; value_type operator()(test::random_generator rg) { int* p = nullptr; int a = generate(p, rg); int b = generate(p, rg); return std::make_pair(raii{a}, raii{b}); } }; struct value_type_generator_factory_type { template <typename Container> value_generator<typename Container::value_type> get() { return {}; } } value_type_generator_factory; struct init_type_generator_factory_type { template <typename Container> value_generator<typename Container::init_type> get() { return {}; } } init_type_generator_factory; template <class T> std::vector<boost::span<T> > split( boost::span<T> s, std::size_t const nt /* num threads*/) { std::vector<boost::span<T> > subslices; subslices.reserve(nt); auto a = s.size() / nt; auto b = a; if (s.size() % nt != 0) { ++b; } auto num_a = nt; auto num_b = std::size_t{0}; if (nt * b > s.size()) { num_a = nt * b - s.size(); num_b = nt - num_a; } auto sub_b = s.subspan(0, num_b * b); auto sub_a = s.subspan(num_b * b); for (std::size_t i = 0; i < num_b; ++i) { subslices.push_back(sub_b.subspan(i * b, b)); } for (std::size_t i = 0; i < num_a; ++i) { auto const is_last = i == (num_a - 1); subslices.push_back( sub_a.subspan(i * a, is_last ? boost::dynamic_extent : a)); } return subslices; } template <class T, class F> void thread_runner(std::vector<T>& values, F f) { boost::compat::latch latch(static_cast<std::ptrdiff_t>(num_threads)); std::vector<std::thread> threads; auto subslices = split<T>(values, num_threads); for (std::size_t i = 0; i < num_threads; ++i) { threads.emplace_back([&f, &subslices, i, &latch] { latch.arrive_and_wait(); auto s = subslices[i]; f(s); }); } for (auto& t : threads) { t.join(); } } template <class T> using span_value_type = typename T::value_type; void check_raii_counts() { BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ( raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); } template <class T> void shuffle_values(std::vector<T>& v) { std::random_device rd; std::mt19937 g(rd()); std::shuffle(v.begin(), v.end(), g); } template <class T> class ptr; template <class T> class const_ptr; template <class T> class fancy_allocator; struct void_ptr { template <typename T> friend class ptr; private: void* ptr_; public: void_ptr() : ptr_(0) {} template <typename T> explicit void_ptr(ptr<T> const& x) : ptr_(x.ptr_) {} // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(void_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(void_ptr const& x) const { return ptr_ != x.ptr_; } }; class void_const_ptr { template <typename T> friend class const_ptr; private: void* ptr_; public: void_const_ptr() : ptr_(0) {} template <typename T> explicit void_const_ptr(const_ptr<T> const& x) : ptr_(x.ptr_) { } // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(void_const_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(void_const_ptr const& x) const { return ptr_ != x.ptr_; } }; template <class T> class ptr { friend class fancy_allocator<T>; friend class const_ptr<T>; friend struct void_ptr; T* ptr_; ptr(T* x) : ptr_(x) {} public: ptr() : ptr_(0) {} ptr(std::nullptr_t) : ptr_(nullptr) {} explicit ptr(void_ptr const& x) : ptr_((T*)x.ptr_) {} T& operator*() const { return *ptr_; } T* operator->() const { return ptr_; } ptr& operator++() { ++ptr_; return *this; } ptr operator++(int) { ptr tmp(*this); ++ptr_; return tmp; } ptr operator+(std::ptrdiff_t s) const { return ptr<T>(ptr_ + s); } friend ptr operator+(std::ptrdiff_t s, ptr p) { return ptr<T>(s + p.ptr_); } std::ptrdiff_t operator-(ptr p) const { return ptr_ - p.ptr_; } ptr operator-(std::ptrdiff_t s) const { return ptr(ptr_ - s); } T& operator[](std::ptrdiff_t s) const { return ptr_[s]; } bool operator!() const { return !ptr_; } static ptr pointer_to(T& p) { return ptr(std::addressof(p)); } // I'm not using the safe bool idiom because the containers should be // able to cope with bool conversions. operator bool() const { return !!ptr_; } bool operator==(ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(ptr const& x) const { return ptr_ != x.ptr_; } bool operator<(ptr const& x) const { return ptr_ < x.ptr_; } bool operator>(ptr const& x) const { return ptr_ > x.ptr_; } bool operator<=(ptr const& x) const { return ptr_ <= x.ptr_; } bool operator>=(ptr const& x) const { return ptr_ >= x.ptr_; } }; template <class T> class const_ptr { friend class fancy_allocator<T>; friend struct const_void_ptr; T const* ptr_; const_ptr(T const* ptr) : ptr_(ptr) {} public: const_ptr() : ptr_(0) {} const_ptr(ptr<T> const& x) : ptr_(x.ptr_) {} explicit const_ptr(void_const_ptr const& x) : ptr_((T const*)x.ptr_) {} T const& operator*() const { return *ptr_; } T const* operator->() const { return ptr_; } const_ptr& operator++() { ++ptr_; return *this; } const_ptr operator++(int) { const_ptr tmp(*this); ++ptr_; return tmp; } const_ptr operator+(std::ptrdiff_t s) const { return const_ptr(ptr_ + s); } friend const_ptr operator+(std::ptrdiff_t s, const_ptr p) { return ptr<T>(s + p.ptr_); } T const& operator[](int s) const { return ptr_[s]; } bool operator!() const { return !ptr_; } operator bool() const { return !!ptr_; } bool operator==(const_ptr const& x) const { return ptr_ == x.ptr_; } bool operator!=(const_ptr const& x) const { return ptr_ != x.ptr_; } bool operator<(const_ptr const& x) const { return ptr_ < x.ptr_; } bool operator>(const_ptr const& x) const { return ptr_ > x.ptr_; } bool operator<=(const_ptr const& x) const { return ptr_ <= x.ptr_; } bool operator>=(const_ptr const& x) const { return ptr_ >= x.ptr_; } }; template <class T> class fancy_allocator { public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef void_ptr void_pointer; typedef void_const_ptr const_void_pointer; typedef ptr<T> pointer; typedef const_ptr<T> const_pointer; typedef T& reference; typedef T const& const_reference; typedef T value_type; template <class U> struct rebind { typedef fancy_allocator<U> other; }; fancy_allocator() {} template <class Y> fancy_allocator(fancy_allocator<Y> const&) {} fancy_allocator(fancy_allocator const&) {} ~fancy_allocator() {} pointer address(reference r) { return pointer(&r); } const_pointer address(const_reference r) { return const_pointer(&r); } pointer allocate(size_type n) { return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } template <class Y> pointer allocate(size_type n, const_ptr<Y>) { return pointer(static_cast<T*>(::operator new(n * sizeof(T)))); } void deallocate(pointer p, size_type) { ::operator delete((void*)p.ptr_); } template <class U, class... Args> void construct(U* p, Args&&... args) { new ((void*)p) U(std::forward<Args>(args)...); } template <class U> void destroy(U* p) { p->~U(); } size_type max_size() const { return 1000; } public: fancy_allocator& operator=(fancy_allocator const&) { return *this; } }; #endif // BOOST_UNORDERED_TEST_CFOA_HELPERS_HPP
0
repos/unordered/test
repos/unordered/test/cfoa/try_emplace_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/core/ignore_unused.hpp> namespace { test::seed_t initialize_seed(511933564); struct lvalue_try_emplacer_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { std::atomic<std::uint64_t> num_inserts{0}; thread_runner(values, [&x, &num_inserts](boost::span<T> s) { for (auto const& r : s) { bool b = x.try_emplace(r.first, r.second.x_); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_EQ(raii::default_constructor, x.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } lvalue_try_emplacer; struct norehash_lvalue_try_emplacer_type : public lvalue_try_emplacer_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { x.reserve(values.size()); lvalue_try_emplacer_type::operator()(values, x); BOOST_TEST_EQ(raii::move_constructor, 0u); } } norehash_lvalue_try_emplacer; struct rvalue_try_emplacer_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { BOOST_TEST_EQ(raii::copy_constructor, 0u); std::atomic<std::uint64_t> num_inserts{0}; thread_runner(values, [&x, &num_inserts](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace(std::move(r.first), r.second.x_); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); if (std::is_same<T, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, x.size()); } BOOST_TEST_EQ(raii::default_constructor, x.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } rvalue_try_emplacer; struct norehash_rvalue_try_emplacer_type : public rvalue_try_emplacer_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { x.reserve(values.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ(raii::move_constructor, 0u); rvalue_try_emplacer_type::operator()(values, x); if (std::is_same<T, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_EQ(raii::move_constructor, 0u); } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ(raii::move_constructor, x.size()); } } } norehash_rvalue_try_emplacer; struct transp_try_emplace_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using is_transparent = typename boost::make_void<typename X::hasher::is_transparent, typename X::key_equal::is_transparent>::type; boost::ignore_unused<is_transparent>(); BOOST_TEST_EQ(raii::default_constructor, 0u); std::atomic<std::uint64_t> num_inserts{0}; thread_runner(values, [&x, &num_inserts](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace(r.first.x_, r.second.x_); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(raii::default_constructor, 2 * x.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } } transp_try_emplace; struct norehash_transp_try_emplace_type : public transp_try_emplace_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { x.reserve(values.size()); transp_try_emplace_type::operator()(values, x); BOOST_TEST_EQ(raii::move_constructor, 0u); } } norehash_transp_try_emplace; struct lvalue_try_emplace_or_cvisit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace_or_cvisit( r.first, r.second.x_, [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, x.size()); BOOST_TEST_EQ(raii::copy_constructor, x.size()); // don't check move construction count here because of rehashing BOOST_TEST_GT(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); } } lvalue_try_emplace_or_cvisit; struct lvalue_try_emplace_or_visit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace_or_visit( r.first, r.second.x_, [&num_invokes](typename X::value_type& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, x.size()); BOOST_TEST_EQ(raii::copy_constructor, x.size()); // don't check move construction count here because of rehashing BOOST_TEST_GT(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); } } lvalue_try_emplace_or_visit; struct rvalue_try_emplace_or_cvisit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace_or_cvisit( std::move(r.first), r.second.x_, [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, x.size()); if (std::is_same<T, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_GE(raii::move_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, x.size()); } } } rvalue_try_emplace_or_cvisit; struct rvalue_try_emplace_or_visit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace_or_visit( std::move(r.first), r.second.x_, [&num_invokes](typename X::value_type& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, x.size()); if (std::is_same<T, typename X::value_type>::value) { BOOST_TEST_EQ(raii::copy_constructor, x.size()); BOOST_TEST_GE(raii::move_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, x.size()); } } } rvalue_try_emplace_or_visit; struct transp_try_emplace_or_cvisit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace_or_cvisit( r.first.x_, r.second.x_, [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 2 * x.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); } } transp_try_emplace_or_cvisit; struct transp_try_emplace_or_visit_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { std::atomic<std::uint64_t> num_inserts{0}; std::atomic<std::uint64_t> num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span<T> s) { for (auto& r : s) { bool b = x.try_emplace_or_visit( r.first.x_, r.second.x_, [&num_invokes](typename X::value_type& v) { (void)v; ++num_invokes; }); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 2 * x.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); } } transp_try_emplace_or_visit; template <class X, class G, class F> void try_emplace(X*, G gen, F try_emplacer, test::random_generator rg) { auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_map = boost::unordered_flat_map<raii, raii>(values.begin(), values.end()); raii::reset_counts(); { X x; try_emplacer(values, x); BOOST_TEST_EQ(x.size(), reference_map.size()); using value_type = typename X::value_type; BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { BOOST_TEST(reference_map.contains(kv.first)); if (rg == test::sequential) { BOOST_TEST_EQ(kv.second, reference_map[kv.first]); } })); } BOOST_TEST_GE(raii::default_constructor, 0u); BOOST_TEST_GE(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, 0u); BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); } boost::unordered::concurrent_flat_map<raii, raii>* map; boost::unordered::concurrent_flat_map<raii, raii, transp_hash, transp_key_equal>* transp_map; } // namespace using test::default_generator; using test::limited_range; using test::sequential; value_generator<std::pair<raii const, raii> > value_type_generator; value_generator<std::pair<raii, raii> > init_type_generator; // clang-format off UNORDERED_TEST( try_emplace, ((map)) ((value_type_generator)(init_type_generator)) ((lvalue_try_emplacer)(norehash_lvalue_try_emplacer) (rvalue_try_emplacer)(norehash_rvalue_try_emplacer) (lvalue_try_emplace_or_cvisit)(lvalue_try_emplace_or_visit) (rvalue_try_emplace_or_cvisit)(rvalue_try_emplace_or_visit)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( try_emplace, ((transp_map)) ((init_type_generator)) ((transp_try_emplace)(norehash_transp_try_emplace) (transp_try_emplace_or_cvisit)(transp_try_emplace_or_visit)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/rehash_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> using test::default_generator; using test::limited_range; using test::sequential; using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal, stateful_allocator<std::pair<raii const, raii> > >; using set_type = boost::unordered::concurrent_flat_set<raii, hasher, key_equal, stateful_allocator<raii> >; map_type* test_map; set_type* test_set; namespace { test::seed_t initialize_seed{748775921}; template <typename X> void rehash_no_insert(X*) { using allocator_type = typename X::allocator_type; X x(0, hasher(1), key_equal(2), allocator_type(3)); BOOST_TEST_EQ(x.bucket_count(), 0u); x.rehash(1024); BOOST_TEST_GE(x.bucket_count(), 1024u); x.rehash(512); BOOST_TEST_GE(x.bucket_count(), 512u); BOOST_TEST_LT(x.bucket_count(), 1024u); x.rehash(0); BOOST_TEST_EQ(x.bucket_count(), 0u); } template <typename X> void reserve_no_insert(X*) { using allocator_type = typename X::allocator_type; using size_type = typename X::size_type; X x(0, hasher(1), key_equal(2), allocator_type(3)); auto f = [&x](double c) { return static_cast<size_type>(std::ceil(c / x.max_load_factor())); }; BOOST_TEST_EQ(x.bucket_count(), f(0.0)); x.reserve(1024); BOOST_TEST_GE(x.bucket_count(), f(1024.0)); x.reserve(512); BOOST_TEST_GE(x.bucket_count(), f(512.0)); BOOST_TEST_LT(x.bucket_count(), f(1024.0)); x.reserve(0); BOOST_TEST_EQ(x.bucket_count(), f(0.0)); } template <class X, class GF> void insert_and_erase_with_rehash( X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); auto erase_indices = std::vector<std::size_t>(vals1.size()); for (std::size_t idx = 0; idx < erase_indices.size(); ++idx) { erase_indices[idx] = idx; } shuffle_values(erase_indices); auto reference_cont = reference_container<X>(); reference_cont.insert(vals1.begin(), vals1.end()); { raii::reset_counts(); X x(0, hasher(1), key_equal(2), allocator_type(3)); std::thread t1, t2, t3; boost::compat::latch l(2); std::mutex m; std::condition_variable cv; std::atomic_bool done1{false}, done2{false}; std::atomic<unsigned long long> call_count{0}; bool ready = false; auto const old_mc = +raii::move_constructor; BOOST_TEST_EQ(old_mc, 0u); t1 = std::thread([&x, &vals1, &l, &done1, &cv, &ready, &m] { l.arrive_and_wait(); for (std::size_t idx = 0; idx < vals1.size(); ++idx) { auto const& val = vals1[idx]; x.insert(val); if (idx % (vals1.size() / 128) == 0) { { std::unique_lock<std::mutex> lk(m); ready = true; } cv.notify_all(); std::this_thread::yield(); } } done1 = true; { std::unique_lock<std::mutex> lk(m); ready = true; } cv.notify_all(); }); t2 = std::thread([&x, &vals1, &erase_indices, &l, &done2, &cv, &m, &ready] { l.arrive_and_wait(); for (std::size_t idx = 0; idx < erase_indices.size(); ++idx) { auto const& val = vals1[erase_indices[idx]]; x.erase(get_key(val)); if (idx % 100 == 0) { std::this_thread::yield(); } } done2 = true; { std::unique_lock<std::mutex> lk(m); ready = true; } cv.notify_all(); }); t3 = std::thread([&x, &vals1, &m, &cv, &done1, &done2, &call_count, &ready] { do { { std::unique_lock<std::mutex> lk(m); cv.wait(lk, [&ready] { return ready; }); ready = false; } auto const bc = static_cast<std::size_t>(rand()) % vals1.size(); x.rehash(bc); call_count += 1; std::this_thread::yield(); } while (!done1 || !done2); BOOST_TEST(done1); BOOST_TEST(done2); }); t1.join(); t2.join(); t3.join(); BOOST_TEST_GE(call_count, 1u); test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } } // namespace // clang-format off UNORDERED_TEST( rehash_no_insert, ((test_map)(test_set))) UNORDERED_TEST( reserve_no_insert, ((test_map)(test_set))) UNORDERED_TEST( insert_and_erase_with_rehash, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/exception_erase_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> #include <boost/core/ignore_unused.hpp> namespace { test::seed_t initialize_seed(3202923); struct lvalue_eraser_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { static constexpr auto value_type_cardinality = value_cardinality<typename X::value_type>::value; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; enable_exceptions(); thread_runner(values, [&values, &num_erased, &x](boost::span<T>) { for (auto const& v : values) { try { auto count = x.erase(get_key(v)); BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); num_erased += count; } catch (...) { } } }); disable_exceptions(); BOOST_TEST_EQ(x.size(), old_size - num_erased); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * num_erased); } } lvalue_eraser; struct lvalue_eraser_if_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_erased{0}; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { if (get_value(v).x_ > threshold) { ++expected_erasures; } }); enable_exceptions(); thread_runner(values, [&num_erased, &x, threshold](boost::span<T> s) { for (auto const& v : s) { try { auto count = x.erase_if(get_key(v), [threshold](arg_type& w) { return get_value(w).x_ > threshold; }); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); } catch (...) { } } }); disable_exceptions(); BOOST_TEST_LE(num_erased, expected_erasures); BOOST_TEST_EQ(x.size(), old_size - num_erased); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * num_erased); } } lvalue_eraser_if; struct erase_if_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { if (get_value(v).x_ > threshold) { ++expected_erasures; } }); enable_exceptions(); thread_runner(values, [&x, threshold](boost::span<T> /* s */) { for (std::size_t i = 0; i < 256; ++i) { try { x.erase_if([threshold](arg_type& v) { static std::atomic<std::uint32_t> c{0}; auto t = ++c; if (should_throw && (t % throw_threshold == 0)) { throw exception_tag{}; } return get_value(v).x_ > threshold; }); } catch (...) { } } }); disable_exceptions(); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * (old_size - x.size())); } } erase_if; struct free_fn_erase_if_type { template <class T, class X> void operator()(std::vector<T>& values, X& x) { using value_type = typename X::value_type; static constexpr auto value_type_cardinality = value_cardinality<value_type>::value; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; auto const old_size = x.size(); auto const old_dc = +raii::default_constructor; auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; auto const old_d = +raii::destructor; auto max = 0; x.visit_all([&max](value_type const& v) { if (get_value(v).x_ > max) { max = get_value(v).x_; } }); auto threshold = max / 2; enable_exceptions(); thread_runner(values, [&x, threshold](boost::span<T> /* s */) { for (std::size_t i = 0; i < 256; ++i) { try { boost::unordered::erase_if(x, [threshold](arg_type& v) { static std::atomic<std::uint32_t> c{0}; auto t = ++c; if (should_throw && (t % throw_threshold == 0)) { throw exception_tag{}; } return get_value(v).x_ > threshold; }); } catch (...) { } } }); disable_exceptions(); BOOST_TEST_EQ(raii::default_constructor, old_dc); BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); BOOST_TEST_EQ( raii::destructor, old_d + value_type_cardinality * (old_size - x.size())); } } free_fn_erase_if; template <class X, class GF, class F> void erase(X*, GF gen_factory, F eraser, test::random_generator rg) { auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { X x(values.size()); for (auto const& v : values) { x.insert(v); } BOOST_TEST_EQ(x.size(), reference_cont.size()); BOOST_TEST_EQ(raii::destructor, 0u); test_fuzzy_matches_reference(x, reference_cont, rg); eraser(values, x); test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } boost::unordered::concurrent_flat_map<raii, raii, stateful_hash, stateful_key_equal, stateful_allocator<std::pair<raii const, raii> > >* map; boost::unordered::concurrent_flat_set<raii, stateful_hash, stateful_key_equal, stateful_allocator<raii> >* set; } // namespace using test::default_generator; using test::limited_range; using test::sequential; // clang-format off UNORDERED_TEST( erase, ((map)(set)) ((exception_value_type_generator_factory) (exception_init_type_generator_factory)) ((lvalue_eraser)(lvalue_eraser_if)(erase_if)(free_fn_erase_if)) ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/exception_constructor_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal, stateful_allocator<std::pair<raii const, raii> > >; using set_type = boost::unordered::concurrent_flat_set<raii, hasher, key_equal, stateful_allocator<raii> >; map_type* test_map; set_type* test_set; std::initializer_list<map_type::value_type> map_init_list{ {raii{0}, raii{0}}, {raii{1}, raii{1}}, {raii{2}, raii{2}}, {raii{3}, raii{3}}, {raii{4}, raii{4}}, {raii{5}, raii{5}}, {raii{6}, raii{6}}, {raii{6}, raii{6}}, {raii{7}, raii{7}}, {raii{8}, raii{8}}, {raii{9}, raii{9}}, {raii{10}, raii{10}}, {raii{9}, raii{9}}, {raii{8}, raii{8}}, {raii{7}, raii{7}}, {raii{6}, raii{6}}, {raii{5}, raii{5}}, {raii{4}, raii{4}}, {raii{3}, raii{3}}, {raii{2}, raii{2}}, {raii{1}, raii{1}}, {raii{0}, raii{0}}, }; std::initializer_list<set_type::value_type> set_init_list{ raii{0}, raii{1}, raii{2}, raii{3}, raii{4}, raii{5}, raii{6}, raii{6}, raii{7}, raii{8}, raii{9}, raii{10}, raii{9}, raii{8}, raii{7}, raii{6}, raii{5}, raii{4}, raii{3}, raii{2}, raii{1}, raii{0}, }; auto test_map_and_init_list=std::make_pair(test_map,map_init_list); auto test_set_and_init_list=std::make_pair(test_set,set_init_list); namespace { test::seed_t initialize_seed(795610904); template <class X> void bucket_constructor(X*) { raii::reset_counts(); bool was_thrown = false; enable_exceptions(); for (std::size_t i = 0; i < alloc_throw_threshold; ++i) { try { X m(128); } catch (...) { was_thrown = true; } } disable_exceptions(); BOOST_TEST(was_thrown); } template <class X, class GF> void iterator_range(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { raii::reset_counts(); bool was_thrown = false; enable_exceptions(); try { X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type(3)); } catch (...) { was_thrown = true; } disable_exceptions(); BOOST_TEST(was_thrown); check_raii_counts(); } { raii::reset_counts(); bool was_thrown = false; enable_exceptions(); try { X x(values.begin(), values.end(), allocator_type(3)); } catch (...) { was_thrown = true; } disable_exceptions(); BOOST_TEST(was_thrown); check_raii_counts(); } { raii::reset_counts(); bool was_thrown = false; enable_exceptions(); try { X x( values.begin(), values.end(), values.size(), allocator_type(3)); } catch (...) { was_thrown = true; } disable_exceptions(); BOOST_TEST(was_thrown); check_raii_counts(); } { raii::reset_counts(); bool was_thrown = false; enable_exceptions(); try { X x(values.begin(), values.end(), values.size(), hasher(1), allocator_type(3)); } catch (...) { was_thrown = true; } disable_exceptions(); BOOST_TEST(was_thrown); check_raii_counts(); } } template <class X, class GF> void copy_constructor(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { raii::reset_counts(); bool was_thrown = false; try { X x(values.begin(), values.end(), 0); enable_exceptions(); X y(x); } catch (...) { was_thrown = true; } disable_exceptions(); BOOST_TEST(was_thrown); check_raii_counts(); } { raii::reset_counts(); bool was_thrown = false; try { X x(values.begin(), values.end(), 0); enable_exceptions(); X y(x, allocator_type(4)); } catch (...) { was_thrown = true; } disable_exceptions(); BOOST_TEST(was_thrown); check_raii_counts(); } } template <class X, class GF> void move_constructor(X*, GF gen_factory, test::random_generator rg) { using allocator_type = typename X::allocator_type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { raii::reset_counts(); bool was_thrown = false; try { X x(values.begin(), values.end(), 0); enable_exceptions(); X y(std::move(x), allocator_type(4)); } catch (...) { was_thrown = true; } disable_exceptions(); BOOST_TEST(was_thrown); check_raii_counts(); } } template <class X, class IL> void initializer_list_bucket_count(std::pair<X*, IL> p) { using allocator_type = typename X::allocator_type; auto init_list = p.second; { raii::reset_counts(); unsigned num_throws = 0; enable_exceptions(); for (std::size_t i = 0; i < throw_threshold; ++i) { try { X x(init_list, 0, hasher(1), key_equal(2), allocator_type(3)); } catch (...) { ++num_throws; } } disable_exceptions(); BOOST_TEST_GT(num_throws, 0u); check_raii_counts(); } { raii::reset_counts(); unsigned num_throws = 0; enable_exceptions(); for (std::size_t i = 0; i < alloc_throw_threshold * 2; ++i) { try { X x(init_list, allocator_type(3)); } catch (...) { ++num_throws; } } disable_exceptions(); BOOST_TEST_GT(num_throws, 0u); check_raii_counts(); } { raii::reset_counts(); unsigned num_throws = 0; enable_exceptions(); for (std::size_t i = 0; i < alloc_throw_threshold * 2; ++i) { try { X x(init_list, init_list.size() * 2, allocator_type(3)); } catch (...) { ++num_throws; } } disable_exceptions(); BOOST_TEST_GT(num_throws, 0u); check_raii_counts(); } { raii::reset_counts(); unsigned num_throws = 0; enable_exceptions(); for (std::size_t i = 0; i < throw_threshold; ++i) { try { X x(init_list, init_list.size() * 2, hasher(1), allocator_type(3)); } catch (...) { ++num_throws; } } disable_exceptions(); BOOST_TEST_GT(num_throws, 0u); check_raii_counts(); } } } // namespace using test::default_generator; using test::limited_range; using test::sequential; // clang-format off UNORDERED_TEST( bucket_constructor, ((test_map)(test_set))) UNORDERED_TEST( iterator_range, ((test_map)(test_set)) ((exception_value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( copy_constructor, ((test_map)(test_set)) ((exception_value_type_generator_factory)) ((default_generator)(sequential))) UNORDERED_TEST( move_constructor, ((test_map)(test_set)) ((exception_value_type_generator_factory)) ((default_generator)(sequential))) UNORDERED_TEST( initializer_list_bucket_count, ((test_map_and_init_list)(test_set_and_init_list))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/visit_tests.cpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <boost/config.hpp> #include <boost/config/workaround.hpp> #if BOOST_WORKAROUND(BOOST_GCC_VERSION, < 40900) // warning triggered in transform_iterator.hpp transitive includes #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wsign-conversion" #endif #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> #include <boost/core/ignore_unused.hpp> #include <boost/iterator/transform_iterator.hpp> #if BOOST_WORKAROUND(BOOST_GCC_VERSION, < 40900) #pragma GCC diagnostic pop #endif #include <algorithm> #include <array> #include <functional> #include <vector> namespace { test::seed_t initialize_seed(335740237); auto non_present_keys = [] { std::array<raii,128> a; for(std::size_t i = 0; i < a.size(); ++i) { a[i].x_ = -((int)i + 1); } return a; }(); template<typename T> raii const & get_non_present_key(T const & x) { return non_present_keys[ (std::size_t)get_key(x).x_ % non_present_keys.size()]; } struct lvalue_visitor_type { template <class T, class X, class M> void operator()(std::vector<T>& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_visits{0}; std::atomic<std::uint64_t> total_count{0}; auto mut_visitor = [&num_visits, &reference_cont](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; auto const_visitor = [&num_visits, &reference_cont](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; { thread_runner( values, [&x, &mut_visitor, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto count = x.visit(get_key(val), mut_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; count = x.visit(get_non_present_key(val), mut_visitor); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(num_visits, values.size()); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner( values, [&x, &const_visitor, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto const& y = x; auto count = y.visit(get_key(val), const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; count = y.visit(get_non_present_key(val), const_visitor); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(num_visits, values.size()); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner( values, [&x, &const_visitor, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto count = x.cvisit(get_key(val), const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; count = x.cvisit(get_non_present_key(val), const_visitor); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(num_visits, values.size()); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner(values, [&x, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto count = x.count(get_key(val)); BOOST_TEST_EQ(count, 1u); total_count += count; count = x.count(get_non_present_key(val)); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner(values, [&x](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto contains = x.contains(get_key(val)); BOOST_TEST(contains); contains = x.contains(get_non_present_key(val)); BOOST_TEST(!contains); } }); num_visits = 0; total_count = 0; } } } lvalue_visitor; struct transp_visitor_type { template <class T, class X, class M> void operator()(std::vector<T>& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> num_visits{0}; std::atomic<std::uint64_t> total_count{0}; auto mut_visitor = [&num_visits, &reference_cont](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; auto const_visitor = [&num_visits, &reference_cont](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; { thread_runner( values, [&x, &mut_visitor, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto count = x.visit(get_key(val).x_, mut_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; count = x.visit(get_non_present_key(val).x_, mut_visitor); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(num_visits, values.size()); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner( values, [&x, &const_visitor, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto const& y = x; auto count = y.visit(get_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; count = y.visit(get_non_present_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(num_visits, values.size()); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner( values, [&x, &const_visitor, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto count = x.cvisit(get_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; count = x.cvisit(get_non_present_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(num_visits, values.size()); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner(values, [&x, &total_count](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto count = x.count(get_key(val).x_); BOOST_TEST_EQ(count, 1u); total_count += count; count = x.count(get_non_present_key(val).x_); BOOST_TEST_EQ(count, 0u); } }); BOOST_TEST_EQ(total_count, values.size()); num_visits = 0; total_count = 0; } { thread_runner(values, [&x](boost::span<T> s) { for (auto const& val : s) { auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto contains = x.contains(get_key(val).x_); BOOST_TEST(contains); contains = x.contains(get_non_present_key(val).x_); BOOST_TEST(!contains); } }); num_visits = 0; total_count = 0; } } } transp_visitor; struct visit_all_type { template <class T, class X, class M> void operator()(std::vector<T>& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; std::atomic<std::uint64_t> total_count{0}; auto mut_visitor = [&reference_cont](std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; auto const_visitor = [&reference_cont](std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; { thread_runner(values, [&x, &total_count, &mut_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; total_count += x.visit_all(mut_visitor(num_visits)); BOOST_TEST_EQ(x.size(), num_visits); }); BOOST_TEST_EQ(total_count, num_threads * x.size()); total_count = 0; } { thread_runner( values, [&x, &total_count, &const_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; auto const& y = x; total_count += y.visit_all(const_visitor(num_visits)); BOOST_TEST_EQ(x.size(), num_visits); }); BOOST_TEST_EQ(total_count, num_threads * x.size()); total_count = 0; } { thread_runner( values, [&x, &total_count, &const_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; total_count += x.cvisit_all(const_visitor(num_visits)); BOOST_TEST_EQ(x.size(), num_visits); }); BOOST_TEST_EQ(total_count, num_threads * x.size()); total_count = 0; } } } visit_all; struct visit_while_type { template <class T, class X, class M> void operator()(std::vector<T>& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; auto mut_truthy_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; auto const_truthy_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; auto mut_falsey_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); ++num_visits; return (get_value(v).x_ % 100) == 0; }; }; auto const_falsey_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); ++num_visits; return (get_value(v).x_ % 100) == 0; }; }; { thread_runner(values, [&x, &mut_truthy_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST(x.visit_while(mut_truthy_visitor(num_visits))); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; auto const& y = x; BOOST_TEST(y.visit_while(const_truthy_visitor(num_visits))); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST(x.cvisit_while(const_truthy_visitor(num_visits))); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &mut_falsey_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST_NOT(x.visit_while(mut_falsey_visitor(num_visits))); BOOST_TEST_LT(num_visits, x.size()); BOOST_TEST_GT(num_visits, 0u); }); } { thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; auto const& y = x; BOOST_TEST_NOT(y.visit_while(const_falsey_visitor(num_visits))); BOOST_TEST_LT(num_visits, x.size()); BOOST_TEST_GT(num_visits, 0u); }); } { thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST_NOT(x.cvisit_while(const_falsey_visitor(num_visits))); BOOST_TEST_LT(num_visits, x.size()); BOOST_TEST_GT(num_visits, 0u); }); } } } visit_while; struct exec_policy_visit_all_type { template <class T, class X, class M> void operator()(std::vector<T>& values, X& x, M const& reference_cont) { #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; auto mut_visitor = [&reference_cont](std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; auto const_visitor = [&reference_cont](std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; { thread_runner(values, [&x, &mut_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; x.visit_all(std::execution::par, mut_visitor(num_visits)); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &const_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; auto const& y = x; y.visit_all(std::execution::par, const_visitor(num_visits)); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &const_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; x.cvisit_all(std::execution::par, const_visitor(num_visits)); BOOST_TEST_EQ(x.size(), num_visits); }); } #else (void)values; (void)x; (void)reference_cont; #endif } } exec_policy_visit_all; struct exec_policy_visit_while_type { template <class T, class X, class M> void operator()(std::vector<T>& values, X& x, M const& reference_cont) { #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; auto mut_truthy_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; auto const_truthy_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; auto mut_falsey_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](arg_type& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return (get_value(v).x_ % 100) == 0; }; }; auto const_falsey_visitor = [&reference_cont]( std::atomic<uint64_t>& num_visits) { return [&reference_cont, &num_visits](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return (get_value(v).x_ % 100) == 0; }; }; { thread_runner(values, [&x, &mut_truthy_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST( x.visit_while(std::execution::par, mut_truthy_visitor(num_visits))); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; auto const& y = x; BOOST_TEST(y.visit_while( std::execution::par, const_truthy_visitor(num_visits))); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST(x.cvisit_while( std::execution::par, const_truthy_visitor(num_visits))); BOOST_TEST_EQ(x.size(), num_visits); }); } { thread_runner(values, [&x, &mut_falsey_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST_NOT( x.visit_while(std::execution::par, mut_falsey_visitor(num_visits))); BOOST_TEST_LT(num_visits, x.size()); BOOST_TEST_GT(num_visits, 0u); }); } { thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; auto const& y = x; BOOST_TEST_NOT(y.visit_while( std::execution::par, const_falsey_visitor(num_visits))); BOOST_TEST_LT(num_visits, x.size()); BOOST_TEST_GT(num_visits, 0u); }); } { thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) { std::atomic<std::uint64_t> num_visits{0}; BOOST_TEST_NOT(x.cvisit_while( std::execution::par, const_falsey_visitor(num_visits))); BOOST_TEST_LT(num_visits, x.size()); BOOST_TEST_GT(num_visits, 0u); }); } #else (void)values; (void)x; (void)reference_cont; #endif } } exec_policy_visit_while; template <class X, class GF, class F> void visit(X*, GF gen_factory, F visitor, test::random_generator rg) { auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_cont = reference_container<X>(values.begin(), values.end()); raii::reset_counts(); { X x; for (auto const& v : values) { x.insert(v); } BOOST_TEST_EQ(x.size(), reference_cont.size()); std::uint64_t old_default_constructor = raii::default_constructor; std::uint64_t old_copy_constructor = raii::copy_constructor; std::uint64_t old_move_constructor = raii::move_constructor; std::uint64_t old_copy_assignment = raii::copy_assignment; std::uint64_t old_move_assignment = raii::move_assignment; visitor(values, x, reference_cont); BOOST_TEST_EQ(old_default_constructor, raii::default_constructor); BOOST_TEST_EQ(old_copy_constructor, raii::copy_constructor); BOOST_TEST_EQ(old_move_constructor, raii::move_constructor); BOOST_TEST_EQ(old_copy_assignment, raii::copy_assignment); BOOST_TEST_EQ(old_move_assignment, raii::move_assignment); } BOOST_TEST_GE(raii::default_constructor, 0u); BOOST_TEST_GE(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, 0u); BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); } template <class X, class GF> void empty_visit(X*, GF gen_factory, test::random_generator rg) { auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); using values_type = decltype(values); using span_value_type = typename values_type::value_type; raii::reset_counts(); { X x; std::uint64_t old_default_constructor = raii::default_constructor; std::uint64_t old_copy_constructor = raii::copy_constructor; std::uint64_t old_move_constructor = raii::move_constructor; std::uint64_t old_copy_assignment = raii::copy_assignment; std::uint64_t old_move_assignment = raii::move_assignment; { thread_runner(values, [&x](boost::span<span_value_type> s) { std::atomic<std::uint64_t> num_visits{0}; x.visit_all( [&num_visits](typename X::value_type const&) { ++num_visits; }); BOOST_TEST_EQ(num_visits, 0u); for (auto const& val : s) { auto count = x.visit(get_key(val), [&num_visits](typename X::value_type const&) { ++num_visits; }); BOOST_TEST_EQ(count, 0u); } }); } BOOST_TEST_EQ(old_default_constructor, raii::default_constructor); BOOST_TEST_EQ(old_copy_constructor, raii::copy_constructor); BOOST_TEST_EQ(old_move_constructor, raii::move_constructor); BOOST_TEST_EQ(old_copy_assignment, raii::copy_assignment); BOOST_TEST_EQ(old_move_assignment, raii::move_assignment); } BOOST_TEST_EQ(raii::default_constructor, 0u); BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::destructor, 0u); } template <class X, class GF> void insert_and_visit(X*, GF gen_factory, test::random_generator rg) { // here we attempt to ensure happens-before and synchronizes-with // the visitation thread essentially chases the insertion one // we double-check unreloated loads/stores to ensure that a store is visible // in the visitation thread BOOST_TEST(rg == test::sequential); auto gen = gen_factory.template get<X>(); auto const values = make_random_values(1024 * 16, [&] { return gen(rg); }); { raii::reset_counts(); X x; std::thread t1, t2; boost::compat::latch l(2); std::vector<std::string> strs(values.size()); t1 = std::thread([&l, &values, &x, &strs] { l.arrive_and_wait(); for (std::size_t idx = 0; idx < values.size(); ++idx) { strs[idx] = "rawr"; auto const& val = values[idx]; x.insert(val); } }); t2 = std::thread([&l, &values, &x, &strs] { l.arrive_and_wait(); for (std::size_t idx = 0; idx < values.size(); ++idx) { std::atomic_bool b{false}; while (!b) { x.cvisit(get_key(values[idx]), [&b, &strs, idx, &values](typename X::value_type const& v) { BOOST_TEST_EQ(get_value(v), get_value(values[idx])); BOOST_TEST_EQ(strs[idx], "rawr"); b = true; }); } } }); t1.join(); t2.join(); } check_raii_counts(); } struct regular_key_extractor { template<typename T> auto operator()(const T& x) const -> decltype(get_key(x)) { return get_key(x); } } regular_key_extract; struct transp_key_extractor { template<typename T> auto operator()(const T& x) const -> decltype((get_key(x).x_)) { return get_key(x).x_; } } transp_key_extract; template <class X, class KeyExtractor, class GF> void bulk_visit( X*, KeyExtractor key_extract, GF gen_factory, test::random_generator rg) { using key_type = typename X::key_type; using value_type = typename X::value_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<key_type, value_type>::value, value_type const, value_type >::type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(16384 * 16, [&] { return gen(rg); }); using values_type = decltype(values); using span_value_type = typename values_type::value_type; raii::reset_counts(); { X x; for (auto const& v: values) { if (get_key(v).x_ % 3 != 0) x.insert(v); } X const& cx = x; std::uint64_t old_default_constructor = raii::default_constructor; std::uint64_t old_copy_constructor = raii::copy_constructor; std::uint64_t old_move_constructor = raii::move_constructor; std::uint64_t old_copy_assignment = raii::copy_assignment; std::uint64_t old_move_assignment = raii::move_assignment; std::atomic<std::size_t> num_visits{0}; thread_runner(values, [&x, &cx, &num_visits, key_extract] (boost::span<span_value_type> s) { auto it = boost::make_transform_iterator(s.begin(), key_extract); std::size_t n = s.size(), m = 0, q = 0; auto found = [&it, &m](value_type const& v) { return std::find( it, it + (std::ptrdiff_t)m, get_key(v)) != it + (std::ptrdiff_t)m; }; while (n) { if (m > n) m = n; switch (q % 3) { case 0: x.visit( it, it + (std::ptrdiff_t)m, [&num_visits, &found](arg_type& v) { if ( found(v) ) ++num_visits; }); break; case 1: cx.visit( it, it + (std::ptrdiff_t)m, [&num_visits, &found](value_type const& v) { if ( found(v) ) ++num_visits; }); break; case 2: cx.cvisit( it, it + (std::ptrdiff_t)m, [&num_visits, &found](value_type const& v) { if ( found(v) ) ++num_visits; }); break; default: break; } it += (std::ptrdiff_t)m; n -= m; ++m; if (m > 5*X::bulk_visit_size){ m = 0; ++ q; } } }); BOOST_TEST_EQ(num_visits, x.size()); BOOST_TEST_EQ(old_default_constructor, raii::default_constructor); BOOST_TEST_EQ(old_copy_constructor, raii::copy_constructor); BOOST_TEST_EQ(old_move_constructor, raii::move_constructor); BOOST_TEST_EQ(old_copy_assignment, raii::copy_assignment); BOOST_TEST_EQ(old_move_assignment, raii::move_assignment); } BOOST_TEST_GE(raii::default_constructor, 0u); BOOST_TEST_GE(raii::copy_constructor, 0u); BOOST_TEST_GE(raii::move_constructor, 0u); BOOST_TEST_GT(raii::destructor, 0u); BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, raii::destructor); } boost::unordered::concurrent_flat_map<raii, raii>* map; boost::unordered::concurrent_flat_map<raii, raii, transp_hash, transp_key_equal>* transp_map; boost::unordered::concurrent_flat_set<raii>* set; boost::unordered::concurrent_flat_set<raii, transp_hash, transp_key_equal>* transp_set; } // namespace using test::default_generator; using test::limited_range; using test::sequential; // clang-format off UNORDERED_TEST( visit, ((map)(set)) ((value_type_generator_factory)(init_type_generator_factory)) ((lvalue_visitor)(visit_all)(visit_while)(exec_policy_visit_all) (exec_policy_visit_while)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( visit, ((transp_map)(transp_set)) ((value_type_generator_factory)(init_type_generator_factory)) ((transp_visitor)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( empty_visit, ((map)(transp_map)(set)(transp_set)) ((value_type_generator_factory)(init_type_generator_factory)) ((default_generator)(sequential)(limited_range)) ) UNORDERED_TEST( insert_and_visit, ((map)(set)) ((value_type_generator_factory)) ((sequential)) ) UNORDERED_TEST( bulk_visit, ((map)(set)) ((regular_key_extract)) ((value_type_generator_factory)) ((sequential)) ) UNORDERED_TEST( bulk_visit, ((transp_map)(transp_set)) ((transp_key_extract)) ((value_type_generator_factory)) ((sequential)) ) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/reentrancy_check_test.cpp
// Copyright 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <cstdlib> #define BOOST_ENABLE_ASSERT_HANDLER static bool reentrancy_detected = false; namespace boost { // Caveat lector: a proper handler shouldn't throw as it may be executed // within a noexcept function. void assertion_failed_msg( char const*, char const*, char const*, char const*, long) { reentrancy_detected = true; throw 0; } // LCOV_EXCL_START void assertion_failed(char const*, char const*, char const*, long) { std::abort(); } // LCOV_EXCL_STOP } // namespace boost #include "helpers.hpp" #include <boost/unordered/concurrent_flat_map.hpp> #include <boost/unordered/concurrent_flat_set.hpp> #include <boost/core/lightweight_test.hpp> using test::default_generator; using map_type = boost::unordered::concurrent_flat_map<raii, raii>; using set_type = boost::unordered::concurrent_flat_set<raii>; map_type* test_map; set_type* test_set; template<typename F> void detect_reentrancy(F f) { reentrancy_detected = false; try { f(); } catch(int) {} BOOST_TEST(reentrancy_detected); } namespace { template <class X, class GF> void reentrancy_tests(X*, GF gen_factory, test::random_generator rg) { using key_type = typename X::key_type; // concurrent_flat_set visit is always const access using arg_type = typename std::conditional< std::is_same<typename X::key_type, typename X::value_type>::value, typename X::value_type const, typename X::value_type >::type; auto gen = gen_factory.template get<X>(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); X x1, x2; x1.insert(values.begin(), values.end()); x2.insert(values.begin(), values.end()); detect_reentrancy([&] { x1.visit_all([&](arg_type&) { (void)x1.contains(key_type()); }); }); // LCOV_EXCL_LINE detect_reentrancy([&] { x1.visit_all([&](arg_type&) { x1.rehash(0); }); }); // LCOV_EXCL_LINE detect_reentrancy([&] { x1.visit_all([&](arg_type&) { x2.visit_all([&](arg_type&) { x1=x2; }); // LCOV_EXCL_START }); }); // LCOV_EXCL_STOP detect_reentrancy([&] { x1.visit_all([&](arg_type&) { x2.visit_all([&](arg_type&) { x2=x1; }); // LCOV_EXCL_START }); }); // LCOV_EXCL_STOP } } // namespace // clang-format off UNORDERED_TEST( reentrancy_tests, ((test_map)(test_set)) ((value_type_generator_factory)) ((default_generator))) // clang-format on RUN_TESTS()
0
repos/unordered/test
repos/unordered/test/cfoa/common_helpers.hpp
// Copyright (C) 2023 Christian Mazakas // Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_UNORDERED_TEST_CFOA_COMMON_HELPERS_HPP #define BOOST_UNORDERED_TEST_CFOA_COMMON_HELPERS_HPP #include <boost/unordered/concurrent_flat_map_fwd.hpp> #include <boost/unordered/concurrent_flat_set_fwd.hpp> #include <boost/unordered/unordered_flat_map.hpp> #include <boost/unordered/unordered_flat_set.hpp> #include <cstddef> #include <type_traits> #include <utility> template <typename K> struct value_cardinality { static constexpr std::size_t value=1; }; template <typename K, typename V> struct value_cardinality<std::pair<K, V> > { static constexpr std::size_t value=2; }; template <class Container> struct reference_container_impl; template <class Container> using reference_container = typename reference_container_impl<Container>::type; template <typename K, typename V, typename H, typename P, typename A> struct reference_container_impl<boost::concurrent_flat_map<K, V, H, P, A> > { using type = boost::unordered_flat_map<K, V>; }; template <typename K, typename H, typename P, typename A> struct reference_container_impl<boost::concurrent_flat_set<K, H, P, A> > { using type = boost::unordered_flat_set<K>; }; template <class Container> struct flat_container_impl; template <class Container> using flat_container = typename flat_container_impl<Container>::type; template <typename K, typename V, typename H, typename P, typename A> struct flat_container_impl<boost::concurrent_flat_map<K, V, H, P, A> > { using type = boost::unordered_flat_map<K, V, H, P, A>; }; template <typename K, typename H, typename P, typename A> struct flat_container_impl<boost::concurrent_flat_set<K, H, P, A> > { using type = boost::unordered_flat_set<K, H, P, A>; }; template <typename Container, template <typename> class Allocator> struct replace_allocator_impl; template <typename Container, template <typename> class Allocator> using replace_allocator = typename replace_allocator_impl<Container, Allocator>::type; template < typename K, typename V, typename H, typename P, typename A, template <typename> class Allocator > struct replace_allocator_impl< boost::concurrent_flat_map<K, V, H, P, A>, Allocator> { using value_type = typename boost::concurrent_flat_map<K, V, H, P, A>::value_type; using type = boost::concurrent_flat_map<K, V, H, P, Allocator<value_type> >; }; template < typename K, typename H, typename P, typename A, template <typename> class Allocator > struct replace_allocator_impl< boost::concurrent_flat_set<K, H, P, A>, Allocator> { using value_type = typename boost::concurrent_flat_set<K, H, P, A>::value_type; using type = boost::concurrent_flat_set<K, H, P, Allocator<value_type> >; }; template <typename K> K const& get_key(K const& x) { return x; } template <typename K,typename V> K const& get_key(const std::pair<K, V>& x) { return x.first; } template <typename K> K const& get_value(K const& x) { return x; } template <typename K,typename V> V const& get_value(const std::pair<K, V>& x) { return x.second; } template <typename K,typename V> V& get_value(std::pair<K, V>& x) { return x.second; } template <class X, class Y> void test_matches_reference(X const& x, Y const& reference_cont) { using value_type = typename X::value_type; BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); })); } template <class X, class Y> void test_fuzzy_matches_reference( X const& x, Y const& reference_cont, test::random_generator rg) { using value_type = typename X::value_type; BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { BOOST_TEST(reference_cont.contains(get_key(v))); if (rg == test::sequential) { BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); } })); } #endif // BOOST_UNORDERED_TEST_CFOA_COMMON_HELPERS_HPP
0
repos/unordered/test
repos/unordered/test/cfoa/rw_spinlock_test8.cpp
// Copyright 2023 Peter Dimov // Distributed under the Boost Software License, Version 1.0. // https://www.boost.org/LICENSE_1_0.txt #include <boost/unordered/detail/foa/rw_spinlock.hpp> #include <boost/compat/shared_lock.hpp> #include <boost/core/lightweight_test.hpp> #include <mutex> #include <thread> #include <cstdio> using boost::unordered::detail::foa::rw_spinlock; static int count = 0; static rw_spinlock sp; void f( int k, int m, int n ) { std::printf( "Thread %d of %d started.\n", k, m ); for( int i = 0; i < n; ++i ) { int oldc; for( ;; ) { { boost::compat::shared_lock<rw_spinlock> lock( sp ); oldc = count; } if( oldc % m == k ) break; } { std::lock_guard<rw_spinlock> lock( sp ); if( count == oldc ) ++count; } } std::printf( "Thread %d of %d finished.\n", k, m ); } int main() { int const N = 100; // total iterations int const M = 4; // threads std::thread th[ M ]; for( int i = 0; i < M; ++i ) { th[ i ] = std::thread( f, i, M, N ); } for( int i = 0; i < M; ++i ) { th[ i ].join(); } BOOST_TEST_EQ( count, N * M ); return boost::report_errors(); }