table.hpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /* Fast open-addressing hash table.
  2. *
  3. * Copyright 2022-2024 Joaquin M Lopez Munoz.
  4. * Copyright 2023 Christian Mazakas.
  5. * Copyright 2024 Braden Ganetsky.
  6. * Distributed under the Boost Software License, Version 1.0.
  7. * (See accompanying file LICENSE_1_0.txt or copy at
  8. * http://www.boost.org/LICENSE_1_0.txt)
  9. *
  10. * See https://www.boost.org/libs/unordered for library home page.
  11. */
  12. #ifndef BOOST_UNORDERED_DETAIL_FOA_TABLE_HPP
  13. #define BOOST_UNORDERED_DETAIL_FOA_TABLE_HPP
  14. #include <boost/assert.hpp>
  15. #include <boost/config.hpp>
  16. #include <boost/config/workaround.hpp>
  17. #include <boost/core/serialization.hpp>
  18. #include <boost/unordered/detail/foa/core.hpp>
  19. #include <boost/unordered/detail/serialize_tracked_address.hpp>
  20. #include <boost/unordered/detail/type_traits.hpp>
  21. #include <cstddef>
  22. #include <iterator>
  23. #include <memory>
  24. #include <type_traits>
  25. #include <utility>
  26. namespace boost{
  27. namespace unordered{
  28. namespace detail{
  29. namespace foa{
  30. /* use plain integrals for group metadata storage */
  31. template<typename Integral>
  32. struct plain_integral
  33. {
  34. operator Integral()const{return n;}
  35. void operator=(Integral m){n=m;}
  36. #if BOOST_WORKAROUND(BOOST_GCC,>=50000 && BOOST_GCC<60000)
  37. void operator|=(Integral m){n=static_cast<Integral>(n|m);}
  38. void operator&=(Integral m){n=static_cast<Integral>(n&m);}
  39. #else
  40. void operator|=(Integral m){n|=m;}
  41. void operator&=(Integral m){n&=m;}
  42. #endif
  43. Integral n;
  44. };
  45. struct plain_size_control
  46. {
  47. std::size_t ml;
  48. std::size_t size;
  49. };
  50. template<typename,typename,typename,typename>
  51. class table;
  52. /* table_iterator keeps two pointers:
  53. *
  54. * - A pointer p to the element slot.
  55. * - A pointer pc to the n-th byte of the associated group metadata, where n
  56. * is the position of the element in the group.
  57. *
  58. * A simpler solution would have been to keep a pointer p to the element, a
  59. * pointer pg to the group, and the position n, but that would increase
  60. * sizeof(table_iterator) by 4/8 bytes. In order to make this compact
  61. * representation feasible, it is required that group objects are aligned
  62. * to their size, so that we can recover pg and n as
  63. *
  64. * - n = pc%sizeof(group)
  65. * - pg = pc-n
  66. *
  67. * (for explanatory purposes pg and pc are treated above as if they were memory
  68. * addresses rather than pointers).
  69. *
  70. * p = nullptr is conventionally used to mark end() iterators.
  71. */
  72. /* internal conversion from const_iterator to iterator */
  73. struct const_iterator_cast_tag{};
  74. template<typename TypePolicy,typename GroupPtr,bool Const>
  75. class table_iterator
  76. {
  77. using group_pointer_traits=boost::pointer_traits<GroupPtr>;
  78. using type_policy=TypePolicy;
  79. using table_element_type=typename type_policy::element_type;
  80. using group_type=typename group_pointer_traits::element_type;
  81. using table_element_pointer=
  82. typename group_pointer_traits::template rebind<table_element_type>;
  83. using char_pointer=
  84. typename group_pointer_traits::template rebind<unsigned char>;
  85. static constexpr auto N=group_type::N;
  86. static constexpr auto regular_layout=group_type::regular_layout;
  87. public:
  88. using difference_type=std::ptrdiff_t;
  89. using value_type=typename type_policy::value_type;
  90. using pointer=
  91. typename std::conditional<Const,value_type const*,value_type*>::type;
  92. using reference=
  93. typename std::conditional<Const,value_type const&,value_type&>::type;
  94. using iterator_category=std::forward_iterator_tag;
  95. using element_type=
  96. typename std::conditional<Const,value_type const,value_type>::type;
  97. table_iterator():pc_{nullptr},p_{nullptr}{};
  98. template<bool Const2,typename std::enable_if<!Const2>::type* =nullptr>
  99. table_iterator(const table_iterator<TypePolicy,GroupPtr,Const2>& x):
  100. pc_{x.pc_},p_{x.p_}{}
  101. table_iterator(
  102. const_iterator_cast_tag, const table_iterator<TypePolicy,GroupPtr,true>& x):
  103. pc_{x.pc_},p_{x.p_}{}
  104. inline reference operator*()const noexcept
  105. {return type_policy::value_from(*p());}
  106. inline pointer operator->()const noexcept
  107. {return std::addressof(type_policy::value_from(*p()));}
  108. inline table_iterator& operator++()noexcept{increment();return *this;}
  109. inline table_iterator operator++(int)noexcept
  110. {auto x=*this;increment();return x;}
  111. friend inline bool operator==(
  112. const table_iterator& x,const table_iterator& y)
  113. {return x.p()==y.p();}
  114. friend inline bool operator!=(
  115. const table_iterator& x,const table_iterator& y)
  116. {return !(x==y);}
  117. private:
  118. template<typename,typename,bool> friend class table_iterator;
  119. template<typename> friend class table_erase_return_type;
  120. template<typename,typename,typename,typename> friend class table;
  121. table_iterator(group_type* pg,std::size_t n,const table_element_type* ptet):
  122. pc_{to_pointer<char_pointer>(
  123. reinterpret_cast<unsigned char*>(const_cast<group_type*>(pg))+n)},
  124. p_{to_pointer<table_element_pointer>(const_cast<table_element_type*>(ptet))}
  125. {}
  126. unsigned char* pc()const noexcept{return boost::to_address(pc_);}
  127. table_element_type* p()const noexcept{return boost::to_address(p_);}
  128. inline void increment()noexcept
  129. {
  130. BOOST_ASSERT(p()!=nullptr);
  131. increment(std::integral_constant<bool,regular_layout>{});
  132. }
  133. inline void increment(std::true_type /* regular layout */)noexcept
  134. {
  135. using diff_type=
  136. typename boost::pointer_traits<char_pointer>::difference_type;
  137. for(;;){
  138. ++p_;
  139. if(reinterpret_cast<uintptr_t>(pc())%sizeof(group_type)==N-1){
  140. pc_+=static_cast<diff_type>(sizeof(group_type)-(N-1));
  141. break;
  142. }
  143. ++pc_;
  144. if(!group_type::is_occupied(pc()))continue;
  145. if(BOOST_UNLIKELY(group_type::is_sentinel(pc())))p_=nullptr;
  146. return;
  147. }
  148. for(;;){
  149. int mask=reinterpret_cast<group_type*>(pc())->match_occupied();
  150. if(mask!=0){
  151. auto n=unchecked_countr_zero(mask);
  152. if(BOOST_UNLIKELY(reinterpret_cast<group_type*>(pc())->is_sentinel(n))){
  153. p_=nullptr;
  154. }
  155. else{
  156. pc_+=static_cast<diff_type>(n);
  157. p_+=static_cast<diff_type>(n);
  158. }
  159. return;
  160. }
  161. pc_+=static_cast<diff_type>(sizeof(group_type));
  162. p_+=static_cast<diff_type>(N);
  163. }
  164. }
  165. inline void increment(std::false_type /* interleaved */)noexcept
  166. {
  167. using diff_type=
  168. typename boost::pointer_traits<char_pointer>::difference_type;
  169. std::size_t n0=reinterpret_cast<uintptr_t>(pc())%sizeof(group_type);
  170. pc_-=static_cast<diff_type>(n0);
  171. int mask=(
  172. reinterpret_cast<group_type*>(pc())->match_occupied()>>(n0+1))<<(n0+1);
  173. if(!mask){
  174. do{
  175. pc_+=sizeof(group_type);
  176. p_+=N;
  177. }
  178. while((mask=reinterpret_cast<group_type*>(pc())->match_occupied())==0);
  179. }
  180. auto n=unchecked_countr_zero(mask);
  181. if(BOOST_UNLIKELY(reinterpret_cast<group_type*>(pc())->is_sentinel(n))){
  182. p_=nullptr;
  183. }
  184. else{
  185. pc_+=static_cast<diff_type>(n);
  186. p_-=static_cast<diff_type>(n0);
  187. p_+=static_cast<diff_type>(n);
  188. }
  189. }
  190. template<typename Archive>
  191. friend void serialization_track(Archive& ar,const table_iterator& x)
  192. {
  193. if(x.p()){
  194. track_address(ar,x.pc_);
  195. track_address(ar,x.p_);
  196. }
  197. }
  198. friend class boost::serialization::access;
  199. template<typename Archive>
  200. void serialize(Archive& ar,unsigned int)
  201. {
  202. if(!p())pc_=nullptr;
  203. serialize_tracked_address(ar,pc_);
  204. serialize_tracked_address(ar,p_);
  205. }
  206. char_pointer pc_=nullptr;
  207. table_element_pointer p_=nullptr;
  208. };
  209. /* Returned by table::erase([const_]iterator) to avoid iterator increment
  210. * if discarded.
  211. */
  212. template<typename Iterator>
  213. class table_erase_return_type;
  214. template<typename TypePolicy,typename GroupPtr,bool Const>
  215. class table_erase_return_type<table_iterator<TypePolicy,GroupPtr,Const>>
  216. {
  217. using iterator=table_iterator<TypePolicy,GroupPtr,Const>;
  218. using const_iterator=table_iterator<TypePolicy,GroupPtr,true>;
  219. public:
  220. /* can't delete it because VS in pre-C++17 mode needs to see it for RVO */
  221. table_erase_return_type(const table_erase_return_type&);
  222. operator iterator()const noexcept
  223. {
  224. auto it=pos;
  225. it.increment(); /* valid even if *it was erased */
  226. return iterator(const_iterator_cast_tag{},it);
  227. }
  228. template<
  229. bool dependent_value=false,
  230. typename std::enable_if<!Const||dependent_value>::type* =nullptr
  231. >
  232. operator const_iterator()const noexcept{return this->operator iterator();}
  233. private:
  234. template<typename,typename,typename,typename> friend class table;
  235. table_erase_return_type(const_iterator pos_):pos{pos_}{}
  236. table_erase_return_type& operator=(const table_erase_return_type&)=delete;
  237. const_iterator pos;
  238. };
  239. /* foa::table interface departs in a number of ways from that of C++ unordered
  240. * associative containers because it's not for end-user consumption
  241. * (boost::unordered_(flat|node)_(map|set) wrappers complete it as
  242. * appropriate).
  243. *
  244. * The table supports two main modes of operation: flat and node-based. In the
  245. * flat case, buckets directly store elements. For node-based, buckets store
  246. * pointers to individually heap-allocated elements.
  247. *
  248. * For both flat and node-based:
  249. *
  250. * - begin() is not O(1).
  251. * - No bucket API.
  252. * - Load factor is fixed and can't be set by the user.
  253. *
  254. * For flat only:
  255. *
  256. * - value_type must be moveable.
  257. * - Pointer stability is not kept under rehashing.
  258. * - No extract API.
  259. *
  260. * try_emplace, erase and find support heterogeneous lookup by default,
  261. * that is, without checking for any ::is_transparent typedefs --the
  262. * checking is done by boost::unordered_(flat|node)_(map|set).
  263. */
  264. template<typename,typename,typename,typename>
  265. class concurrent_table; /* concurrent/non-concurrent interop */
  266. template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
  267. using table_core_impl=
  268. table_core<TypePolicy,group15<plain_integral>,table_arrays,
  269. plain_size_control,Hash,Pred,Allocator>;
  270. #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
  271. #if defined(BOOST_MSVC)
  272. #pragma warning(push)
  273. #pragma warning(disable:4714) /* marked as __forceinline not inlined */
  274. #endif
  275. template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
  276. class table:table_core_impl<TypePolicy,Hash,Pred,Allocator>
  277. {
  278. using super=table_core_impl<TypePolicy,Hash,Pred,Allocator>;
  279. using type_policy=typename super::type_policy;
  280. using group_type=typename super::group_type;
  281. using super::N;
  282. using prober=typename super::prober;
  283. using arrays_type=typename super::arrays_type;
  284. using size_ctrl_type=typename super::size_ctrl_type;
  285. using locator=typename super::locator;
  286. using compatible_concurrent_table=
  287. concurrent_table<TypePolicy,Hash,Pred,Allocator>;
  288. using group_type_pointer=typename boost::pointer_traits<
  289. typename boost::allocator_pointer<Allocator>::type
  290. >::template rebind<group_type>;
  291. friend compatible_concurrent_table;
  292. public:
  293. using key_type=typename super::key_type;
  294. using init_type=typename super::init_type;
  295. using value_type=typename super::value_type;
  296. using element_type=typename super::element_type;
  297. private:
  298. static constexpr bool has_mutable_iterator=
  299. !std::is_same<key_type,value_type>::value;
  300. public:
  301. using hasher=typename super::hasher;
  302. using key_equal=typename super::key_equal;
  303. using allocator_type=typename super::allocator_type;
  304. using pointer=typename super::pointer;
  305. using const_pointer=typename super::const_pointer;
  306. using reference=typename super::reference;
  307. using const_reference=typename super::const_reference;
  308. using size_type=typename super::size_type;
  309. using difference_type=typename super::difference_type;
  310. using const_iterator=table_iterator<type_policy,group_type_pointer,true>;
  311. using iterator=typename std::conditional<
  312. has_mutable_iterator,
  313. table_iterator<type_policy,group_type_pointer,false>,
  314. const_iterator>::type;
  315. using erase_return_type=table_erase_return_type<iterator>;
  316. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  317. using stats=typename super::stats;
  318. #endif
  319. table(
  320. std::size_t n=default_bucket_count,const Hash& h_=Hash(),
  321. const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
  322. super{n,h_,pred_,al_}
  323. {}
  324. table(const table& x)=default;
  325. table(table&& x)=default;
  326. table(const table& x,const Allocator& al_):super{x,al_}{}
  327. table(table&& x,const Allocator& al_):super{std::move(x),al_}{}
  328. table(compatible_concurrent_table&& x):
  329. table(std::move(x),x.exclusive_access()){}
  330. ~table()=default;
  331. table& operator=(const table& x)=default;
  332. table& operator=(table&& x)=default;
  333. using super::get_allocator;
  334. iterator begin()noexcept
  335. {
  336. iterator it{this->arrays.groups(),0,this->arrays.elements()};
  337. if(this->arrays.elements()&&
  338. !(this->arrays.groups()[0].match_occupied()&0x1))++it;
  339. return it;
  340. }
  341. const_iterator begin()const noexcept
  342. {return const_cast<table*>(this)->begin();}
  343. iterator end()noexcept{return {};}
  344. const_iterator end()const noexcept{return const_cast<table*>(this)->end();}
  345. const_iterator cbegin()const noexcept{return begin();}
  346. const_iterator cend()const noexcept{return end();}
  347. using super::empty;
  348. using super::size;
  349. using super::max_size;
  350. template<typename... Args>
  351. BOOST_FORCEINLINE std::pair<iterator,bool> emplace(Args&&... args)
  352. {
  353. alloc_cted_insert_type<type_policy,Allocator,Args...> x(
  354. this->al(),std::forward<Args>(args)...);
  355. return emplace_impl(type_policy::move(x.value()));
  356. }
  357. /* Optimization for value_type and init_type, to avoid constructing twice */
  358. template <typename T>
  359. BOOST_FORCEINLINE typename std::enable_if<
  360. detail::is_similar_to_any<T, value_type, init_type>::value,
  361. std::pair<iterator, bool> >::type
  362. emplace(T&& x)
  363. {
  364. return emplace_impl(std::forward<T>(x));
  365. }
  366. /* Optimizations for maps for (k,v) to avoid eagerly constructing value */
  367. template <typename K, typename V>
  368. BOOST_FORCEINLINE
  369. typename std::enable_if<is_emplace_kv_able<table, K>::value,
  370. std::pair<iterator, bool> >::type
  371. emplace(K&& k, V&& v)
  372. {
  373. alloc_cted_or_fwded_key_type<type_policy, Allocator, K&&> x(
  374. this->al(), std::forward<K>(k));
  375. return emplace_impl(
  376. try_emplace_args_t{}, x.move_or_fwd(), std::forward<V>(v));
  377. }
  378. template<typename Key,typename... Args>
  379. BOOST_FORCEINLINE std::pair<iterator,bool> try_emplace(
  380. Key&& x,Args&&... args)
  381. {
  382. return emplace_impl(
  383. try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
  384. }
  385. BOOST_FORCEINLINE std::pair<iterator,bool>
  386. insert(const init_type& x){return emplace_impl(x);}
  387. BOOST_FORCEINLINE std::pair<iterator,bool>
  388. insert(init_type&& x){return emplace_impl(std::move(x));}
  389. /* template<typename=void> tilts call ambiguities in favor of init_type */
  390. template<typename=void>
  391. BOOST_FORCEINLINE std::pair<iterator,bool>
  392. insert(const value_type& x){return emplace_impl(x);}
  393. template<typename=void>
  394. BOOST_FORCEINLINE std::pair<iterator,bool>
  395. insert(value_type&& x){return emplace_impl(std::move(x));}
  396. template<typename T=element_type>
  397. BOOST_FORCEINLINE
  398. typename std::enable_if<
  399. !std::is_same<T,value_type>::value,
  400. std::pair<iterator,bool>
  401. >::type
  402. insert(element_type&& x){return emplace_impl(std::move(x));}
  403. template<
  404. bool dependent_value=false,
  405. typename std::enable_if<
  406. has_mutable_iterator||dependent_value>::type* =nullptr
  407. >
  408. erase_return_type erase(iterator pos)noexcept
  409. {return erase(const_iterator(pos));}
  410. BOOST_FORCEINLINE
  411. erase_return_type erase(const_iterator pos)noexcept
  412. {
  413. super::erase(pos.pc(),pos.p());
  414. return {pos};
  415. }
  416. template<typename Key>
  417. BOOST_FORCEINLINE
  418. auto erase(Key&& x) -> typename std::enable_if<
  419. !std::is_convertible<Key,iterator>::value&&
  420. !std::is_convertible<Key,const_iterator>::value, std::size_t>::type
  421. {
  422. auto it=find(x);
  423. if(it!=end()){
  424. erase(it);
  425. return 1;
  426. }
  427. else return 0;
  428. }
  429. void swap(table& x)
  430. noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>())))
  431. {
  432. super::swap(x);
  433. }
  434. using super::clear;
  435. element_type extract(const_iterator pos)
  436. {
  437. BOOST_ASSERT(pos!=end());
  438. erase_on_exit e{*this,pos};
  439. (void)e;
  440. return std::move(*pos.p());
  441. }
  442. // TODO: should we accept different allocator too?
  443. template<typename Hash2,typename Pred2>
  444. void merge(table<TypePolicy,Hash2,Pred2,Allocator>& x)
  445. {
  446. x.for_all_elements([&,this](group_type* pg,unsigned int n,element_type* p){
  447. erase_on_exit e{x,{pg,n,p}};
  448. if(!emplace_impl(type_policy::move(*p)).second)e.rollback();
  449. });
  450. }
  451. template<typename Hash2,typename Pred2>
  452. void merge(table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
  453. using super::hash_function;
  454. using super::key_eq;
  455. template<typename Key>
  456. BOOST_FORCEINLINE iterator find(const Key& x)
  457. {
  458. return make_iterator(super::find(x));
  459. }
  460. template<typename Key>
  461. BOOST_FORCEINLINE const_iterator find(const Key& x)const
  462. {
  463. return const_cast<table*>(this)->find(x);
  464. }
  465. using super::capacity;
  466. using super::load_factor;
  467. using super::max_load_factor;
  468. using super::max_load;
  469. using super::rehash;
  470. using super::reserve;
  471. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  472. using super::get_stats;
  473. using super::reset_stats;
  474. #endif
  475. template<typename Predicate>
  476. friend std::size_t erase_if(table& x,Predicate& pr)
  477. {
  478. using value_reference=typename std::conditional<
  479. std::is_same<key_type,value_type>::value,
  480. const_reference,
  481. reference
  482. >::type;
  483. std::size_t s=x.size();
  484. x.for_all_elements(
  485. [&](group_type* pg,unsigned int n,element_type* p){
  486. if(pr(const_cast<value_reference>(type_policy::value_from(*p)))){
  487. x.super::erase(pg,n,p);
  488. }
  489. });
  490. return std::size_t(s-x.size());
  491. }
  492. friend bool operator==(const table& x,const table& y)
  493. {
  494. return static_cast<const super&>(x)==static_cast<const super&>(y);
  495. }
  496. friend bool operator!=(const table& x,const table& y){return !(x==y);}
  497. private:
  498. template<typename ArraysType>
  499. table(compatible_concurrent_table&& x,arrays_holder<ArraysType,Allocator>&& ah):
  500. super{
  501. std::move(x.h()),std::move(x.pred()),std::move(x.al()),
  502. [&x]{return arrays_type{
  503. x.arrays.groups_size_index,x.arrays.groups_size_mask,
  504. to_pointer<group_type_pointer>(
  505. reinterpret_cast<group_type*>(x.arrays.groups())),
  506. x.arrays.elements_};},
  507. size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
  508. {
  509. compatible_concurrent_table::arrays_type::delete_group_access(x.al(),x.arrays);
  510. x.arrays=ah.release();
  511. x.size_ctrl.ml=x.initial_max_load();
  512. x.size_ctrl.size=0;
  513. BOOST_UNORDERED_SWAP_STATS(this->cstats,x.cstats);
  514. }
  515. template<typename ExclusiveLockGuard>
  516. table(compatible_concurrent_table&& x,ExclusiveLockGuard):
  517. table(std::move(x),x.make_empty_arrays())
  518. {}
  519. struct erase_on_exit
  520. {
  521. erase_on_exit(table& x_,const_iterator it_):x(x_),it(it_){}
  522. ~erase_on_exit(){if(!rollback_)x.erase(it);}
  523. void rollback(){rollback_=true;}
  524. table& x;
  525. const_iterator it;
  526. bool rollback_=false;
  527. };
  528. static inline iterator make_iterator(const locator& l)noexcept
  529. {
  530. return {l.pg,l.n,l.p};
  531. }
  532. template<typename... Args>
  533. BOOST_FORCEINLINE std::pair<iterator,bool> emplace_impl(Args&&... args)
  534. {
  535. const auto &k=this->key_from(std::forward<Args>(args)...);
  536. auto hash=this->hash_for(k);
  537. auto pos0=this->position_for(hash);
  538. auto loc=super::find(k,pos0,hash);
  539. if(loc){
  540. return {make_iterator(loc),false};
  541. }
  542. if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
  543. return {
  544. make_iterator(
  545. this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...)),
  546. true
  547. };
  548. }
  549. else{
  550. return {
  551. make_iterator(
  552. this->unchecked_emplace_with_rehash(
  553. hash,std::forward<Args>(args)...)),
  554. true
  555. };
  556. }
  557. }
  558. };
  559. #if defined(BOOST_MSVC)
  560. #pragma warning(pop) /* C4714 */
  561. #endif
  562. #include <boost/unordered/detail/foa/restore_wshadow.hpp>
  563. } /* namespace foa */
  564. } /* namespace detail */
  565. } /* namespace unordered */
  566. } /* namespace boost */
  567. #endif