concurrent_table.hpp 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792
  1. /* Fast open-addressing concurrent hash table.
  2. *
  3. * Copyright 2023-2024 Joaquin M Lopez Munoz.
  4. * Copyright 2024 Braden Ganetsky.
  5. * Distributed under the Boost Software License, Version 1.0.
  6. * (See accompanying file LICENSE_1_0.txt or copy at
  7. * http://www.boost.org/LICENSE_1_0.txt)
  8. *
  9. * See https://www.boost.org/libs/unordered for library home page.
  10. */
  11. #ifndef BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
  12. #define BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
  13. #include <atomic>
  14. #include <boost/assert.hpp>
  15. #include <boost/config.hpp>
  16. #include <boost/core/ignore_unused.hpp>
  17. #include <boost/core/no_exceptions_support.hpp>
  18. #include <boost/core/serialization.hpp>
  19. #include <boost/cstdint.hpp>
  20. #include <boost/mp11/tuple.hpp>
  21. #include <boost/throw_exception.hpp>
  22. #include <boost/unordered/detail/archive_constructed.hpp>
  23. #include <boost/unordered/detail/bad_archive_exception.hpp>
  24. #include <boost/unordered/detail/foa/core.hpp>
  25. #include <boost/unordered/detail/foa/reentrancy_check.hpp>
  26. #include <boost/unordered/detail/foa/rw_spinlock.hpp>
  27. #include <boost/unordered/detail/foa/tuple_rotate_right.hpp>
  28. #include <boost/unordered/detail/serialization_version.hpp>
  29. #include <boost/unordered/detail/static_assert.hpp>
  30. #include <boost/unordered/detail/type_traits.hpp>
  31. #include <cstddef>
  32. #include <functional>
  33. #include <initializer_list>
  34. #include <iterator>
  35. #include <memory>
  36. #include <new>
  37. #include <type_traits>
  38. #include <tuple>
  39. #include <utility>
  40. #if !defined(BOOST_UNORDERED_DISABLE_PARALLEL_ALGORITHMS)
  41. #if defined(BOOST_UNORDERED_ENABLE_PARALLEL_ALGORITHMS)|| \
  42. !defined(BOOST_NO_CXX17_HDR_EXECUTION)
  43. #define BOOST_UNORDERED_PARALLEL_ALGORITHMS
  44. #endif
  45. #endif
  46. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  47. #include <algorithm>
  48. #include <execution>
  49. #endif
  50. namespace boost{
  51. namespace unordered{
  52. namespace detail{
  53. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  54. template<typename ExecutionPolicy>
  55. using is_execution_policy=std::is_execution_policy<
  56. typename std::remove_cv<
  57. typename std::remove_reference<ExecutionPolicy>::type
  58. >::type
  59. >;
  60. #else
  61. template<typename ExecutionPolicy>
  62. using is_execution_policy=std::false_type;
  63. #endif
  64. namespace foa{
  65. static constexpr std::size_t cacheline_size=64;
  66. template<typename T,std::size_t N>
  67. class cache_aligned_array
  68. {
  69. public:
  70. cache_aligned_array(){for(std::size_t n=0;n<N;)::new (data(n++)) T();}
  71. ~cache_aligned_array(){for(auto n=N;n>0;)data(n--)->~T();}
  72. cache_aligned_array(const cache_aligned_array&)=delete;
  73. cache_aligned_array& operator=(const cache_aligned_array&)=delete;
  74. T& operator[](std::size_t pos)noexcept{return *data(pos);}
  75. private:
  76. static constexpr std::size_t element_offset=
  77. (sizeof(T)+cacheline_size-1)/cacheline_size*cacheline_size;
  78. BOOST_UNORDERED_STATIC_ASSERT(alignof(T)<=cacheline_size);
  79. T* data(std::size_t pos)noexcept
  80. {
  81. return reinterpret_cast<T*>(
  82. (reinterpret_cast<uintptr_t>(&buf)+cacheline_size-1)/
  83. cacheline_size*cacheline_size
  84. +pos*element_offset);
  85. }
  86. unsigned char buf[element_offset*N+cacheline_size-1];
  87. };
  88. template<typename Mutex,std::size_t N>
  89. class multimutex
  90. {
  91. public:
  92. constexpr std::size_t size()const noexcept{return N;}
  93. Mutex& operator[](std::size_t pos)noexcept
  94. {
  95. BOOST_ASSERT(pos<N);
  96. return mutexes[pos];
  97. }
  98. void lock()noexcept{for(std::size_t n=0;n<N;)mutexes[n++].lock();}
  99. void unlock()noexcept{for(auto n=N;n>0;)mutexes[--n].unlock();}
  100. private:
  101. cache_aligned_array<Mutex,N> mutexes;
  102. };
  103. /* std::shared_lock is C++14 */
  104. template<typename Mutex>
  105. class shared_lock
  106. {
  107. public:
  108. shared_lock(Mutex& m_)noexcept:m(m_){m.lock_shared();}
  109. ~shared_lock()noexcept{if(owns)m.unlock_shared();}
  110. /* not used but VS in pre-C++17 mode needs to see it for RVO */
  111. shared_lock(const shared_lock&);
  112. void lock(){BOOST_ASSERT(!owns);m.lock_shared();owns=true;}
  113. void unlock(){BOOST_ASSERT(owns);m.unlock_shared();owns=false;}
  114. private:
  115. Mutex &m;
  116. bool owns=true;
  117. };
  118. /* VS in pre-C++17 mode can't implement RVO for std::lock_guard due to
  119. * its copy constructor being deleted.
  120. */
  121. template<typename Mutex>
  122. class lock_guard
  123. {
  124. public:
  125. lock_guard(Mutex& m_)noexcept:m(m_){m.lock();}
  126. ~lock_guard()noexcept{m.unlock();}
  127. /* not used but VS in pre-C++17 mode needs to see it for RVO */
  128. lock_guard(const lock_guard&);
  129. private:
  130. Mutex &m;
  131. };
  132. /* inspired by boost/multi_index/detail/scoped_bilock.hpp */
  133. template<typename Mutex>
  134. class scoped_bilock
  135. {
  136. public:
  137. scoped_bilock(Mutex& m1,Mutex& m2)noexcept
  138. {
  139. bool mutex_lt=std::less<Mutex*>{}(&m1,&m2);
  140. pm1=mutex_lt?&m1:&m2;
  141. pm1->lock();
  142. if(&m1==&m2){
  143. pm2=nullptr;
  144. }
  145. else{
  146. pm2=mutex_lt?&m2:&m1;
  147. pm2->lock();
  148. }
  149. }
  150. /* not used but VS in pre-C++17 mode needs to see it for RVO */
  151. scoped_bilock(const scoped_bilock&);
  152. ~scoped_bilock()noexcept
  153. {
  154. if(pm2)pm2->unlock();
  155. pm1->unlock();
  156. }
  157. private:
  158. Mutex *pm1,*pm2;
  159. };
  160. /* use atomics for group metadata storage */
  161. template<typename Integral>
  162. struct atomic_integral
  163. {
  164. operator Integral()const{return n.load(std::memory_order_relaxed);}
  165. void operator=(Integral m){n.store(m,std::memory_order_relaxed);}
  166. void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);}
  167. void operator&=(Integral m){n.fetch_and(m,std::memory_order_relaxed);}
  168. atomic_integral& operator=(atomic_integral const& rhs) {
  169. n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed);
  170. return *this;
  171. }
  172. std::atomic<Integral> n;
  173. };
  174. /* Group-level concurrency protection. It provides a rw mutex plus an
  175. * atomic insertion counter for optimistic insertion (see
  176. * unprotected_norehash_emplace_or_visit).
  177. */
  178. struct group_access
  179. {
  180. using mutex_type=rw_spinlock;
  181. using shared_lock_guard=shared_lock<mutex_type>;
  182. using exclusive_lock_guard=lock_guard<mutex_type>;
  183. using insert_counter_type=std::atomic<boost::uint32_t>;
  184. shared_lock_guard shared_access(){return shared_lock_guard{m};}
  185. exclusive_lock_guard exclusive_access(){return exclusive_lock_guard{m};}
  186. insert_counter_type& insert_counter(){return cnt;}
  187. private:
  188. mutex_type m;
  189. insert_counter_type cnt{0};
  190. };
  191. template<std::size_t Size>
  192. group_access* dummy_group_accesses()
  193. {
  194. /* Default group_access array to provide to empty containers without
  195. * incurring dynamic allocation. Mutexes won't actually ever be used,
  196. * (no successful reduced hash match) and insertion counters won't ever
  197. * be incremented (insertions won't succeed as capacity()==0).
  198. */
  199. static group_access accesses[Size];
  200. return accesses;
  201. }
  202. /* subclasses table_arrays to add an additional group_access array */
  203. template<typename Value,typename Group,typename SizePolicy,typename Allocator>
  204. struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy,Allocator>
  205. {
  206. using group_access_allocator_type=
  207. typename boost::allocator_rebind<Allocator,group_access>::type;
  208. using group_access_pointer=
  209. typename boost::allocator_pointer<group_access_allocator_type>::type;
  210. using super=table_arrays<Value,Group,SizePolicy,Allocator>;
  211. using allocator_type=typename super::allocator_type;
  212. concurrent_table_arrays(const super& arrays,group_access_pointer pga):
  213. super{arrays},group_accesses_{pga}{}
  214. group_access* group_accesses()const noexcept{
  215. return boost::to_address(group_accesses_);
  216. }
  217. static concurrent_table_arrays new_(allocator_type al,std::size_t n)
  218. {
  219. super x{super::new_(al,n)};
  220. BOOST_TRY{
  221. return new_group_access(group_access_allocator_type(al),x);
  222. }
  223. BOOST_CATCH(...){
  224. super::delete_(al,x);
  225. BOOST_RETHROW
  226. }
  227. BOOST_CATCH_END
  228. }
  229. static void set_group_access(
  230. group_access_allocator_type al,concurrent_table_arrays& arrays)
  231. {
  232. set_group_access(
  233. al,arrays,std::is_same<group_access*,group_access_pointer>{});
  234. }
  235. static void set_group_access(
  236. group_access_allocator_type al,
  237. concurrent_table_arrays& arrays,
  238. std::false_type /* fancy pointers */)
  239. {
  240. arrays.group_accesses_=
  241. boost::allocator_allocate(al,arrays.groups_size_mask+1);
  242. for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
  243. ::new (arrays.group_accesses()+i) group_access();
  244. }
  245. }
  246. static void set_group_access(
  247. group_access_allocator_type al,
  248. concurrent_table_arrays& arrays,
  249. std::true_type /* optimize when elements() is null */)
  250. {
  251. if(!arrays.elements()){
  252. arrays.group_accesses_=
  253. dummy_group_accesses<SizePolicy::min_size()>();
  254. } else {
  255. set_group_access(al,arrays,std::false_type{});
  256. }
  257. }
  258. static concurrent_table_arrays new_group_access(
  259. group_access_allocator_type al,const super& x)
  260. {
  261. concurrent_table_arrays arrays{x,nullptr};
  262. set_group_access(al,arrays);
  263. return arrays;
  264. }
  265. static void delete_(allocator_type al,concurrent_table_arrays& arrays)noexcept
  266. {
  267. delete_group_access(group_access_allocator_type(al),arrays);
  268. super::delete_(al,arrays);
  269. }
  270. static void delete_group_access(
  271. group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept
  272. {
  273. if(arrays.elements()){
  274. boost::allocator_deallocate(
  275. al,arrays.group_accesses_,arrays.groups_size_mask+1);
  276. }
  277. }
  278. group_access_pointer group_accesses_;
  279. };
  280. struct atomic_size_control
  281. {
  282. static constexpr auto atomic_size_t_size=sizeof(std::atomic<std::size_t>);
  283. BOOST_UNORDERED_STATIC_ASSERT(atomic_size_t_size<cacheline_size);
  284. atomic_size_control(std::size_t ml_,std::size_t size_):
  285. pad0_{},ml{ml_},pad1_{},size{size_}{}
  286. atomic_size_control(const atomic_size_control& x):
  287. pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{}
  288. /* padding to avoid false sharing internally and with sorrounding data */
  289. unsigned char pad0_[cacheline_size-atomic_size_t_size];
  290. std::atomic<std::size_t> ml;
  291. unsigned char pad1_[cacheline_size-atomic_size_t_size];
  292. std::atomic<std::size_t> size;
  293. };
  294. /* std::swap can't be used on non-assignable atomics */
  295. inline void
  296. swap_atomic_size_t(std::atomic<std::size_t>& x,std::atomic<std::size_t>& y)
  297. {
  298. std::size_t tmp=x;
  299. x=static_cast<std::size_t>(y);
  300. y=tmp;
  301. }
  302. inline void swap(atomic_size_control& x,atomic_size_control& y)
  303. {
  304. swap_atomic_size_t(x.ml,y.ml);
  305. swap_atomic_size_t(x.size,y.size);
  306. }
  307. /* foa::concurrent_table serves as the foundation for end-user concurrent
  308. * hash containers.
  309. *
  310. * The exposed interface (completed by the wrapping containers) is not that
  311. * of a regular container (in fact, it does not model Container as understood
  312. * by the C++ standard):
  313. *
  314. * - Iterators are not provided as they are not suitable for concurrent
  315. * scenarios.
  316. * - As a consequence, composite operations with regular containers
  317. * (like, for instance, looking up an element and modifying it), must
  318. * be provided natively without any intervening iterator/accesor.
  319. * Visitation is a core concept in this design, either on its own (eg.
  320. * visit(k) locates the element with key k *and* accesses it) or as part
  321. * of a native composite operation (eg. try_emplace_or_visit). Visitation
  322. * is constant or mutating depending on whether the used table function is
  323. * const or not.
  324. * - The API provides member functions for all the meaningful composite
  325. * operations of the form "X (and|or) Y", where X, Y are one of the
  326. * primitives FIND, ACCESS, INSERT or ERASE.
  327. * - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based
  328. * on C++17 stdlib parallel algorithms.
  329. *
  330. * Consult boost::concurrent_flat_(map|set) docs for the full API reference.
  331. * Heterogeneous lookup is suported by default, that is, without checking for
  332. * any ::is_transparent typedefs --this checking is done by the wrapping
  333. * containers.
  334. *
  335. * Thread-safe concurrency is implemented using a two-level lock system:
  336. *
  337. * - A first container-level lock is implemented with an array of
  338. * rw spinlocks acting as a single rw mutex with very little
  339. * cache-coherence traffic on read (each thread is assigned a different
  340. * spinlock in the array). Container-level write locking is only used for
  341. * rehashing and other container-wide operations (assignment, swap, etc.)
  342. * - Each group of slots has an associated rw spinlock. A thread holds
  343. * at most one group lock at any given time. Lookup is implemented in
  344. * a (groupwise) lock-free manner until a reduced hash match is found, in
  345. * which case the relevant group is locked and the slot is double-checked
  346. * for occupancy and compared with the key.
  347. * - Each group has also an associated so-called insertion counter used for
  348. * the following optimistic insertion algorithm:
  349. * - The value of the insertion counter for the initial group in the probe
  350. * sequence is locally recorded (let's call this value c0).
  351. * - Lookup is as described above. If lookup finds no equivalent element,
  352. * search for an available slot for insertion successively locks/unlocks
  353. * each group in the probing sequence.
  354. * - When an available slot is located, it is preemptively occupied (its
  355. * reduced hash value is set) and the insertion counter is atomically
  356. * incremented: if no other thread has incremented the counter during the
  357. * whole operation (which is checked by comparing with c0), then we're
  358. * good to go and complete the insertion, otherwise we roll back and
  359. * start over.
  360. */
  361. template<typename,typename,typename,typename>
  362. class table; /* concurrent/non-concurrent interop */
  363. template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
  364. using concurrent_table_core_impl=table_core<
  365. TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
  366. atomic_size_control,Hash,Pred,Allocator>;
  367. #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
  368. #if defined(BOOST_MSVC)
  369. #pragma warning(push)
  370. #pragma warning(disable:4714) /* marked as __forceinline not inlined */
  371. #endif
  372. template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
  373. class concurrent_table:
  374. concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>
  375. {
  376. using super=concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>;
  377. using type_policy=typename super::type_policy;
  378. using group_type=typename super::group_type;
  379. using super::N;
  380. using prober=typename super::prober;
  381. using arrays_type=typename super::arrays_type;
  382. using size_ctrl_type=typename super::size_ctrl_type;
  383. using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>;
  384. friend compatible_nonconcurrent_table;
  385. public:
  386. using key_type=typename super::key_type;
  387. using init_type=typename super::init_type;
  388. using value_type=typename super::value_type;
  389. using element_type=typename super::element_type;
  390. using hasher=typename super::hasher;
  391. using key_equal=typename super::key_equal;
  392. using allocator_type=typename super::allocator_type;
  393. using size_type=typename super::size_type;
  394. static constexpr std::size_t bulk_visit_size=16;
  395. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  396. using stats=typename super::stats;
  397. #endif
  398. private:
  399. template<typename Value,typename T>
  400. using enable_if_is_value_type=typename std::enable_if<
  401. !std::is_same<init_type,value_type>::value&&
  402. std::is_same<Value,value_type>::value,
  403. T
  404. >::type;
  405. public:
  406. concurrent_table(
  407. std::size_t n=default_bucket_count,const Hash& h_=Hash(),
  408. const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
  409. super{n,h_,pred_,al_}
  410. {}
  411. concurrent_table(const concurrent_table& x):
  412. concurrent_table(x,x.exclusive_access()){}
  413. concurrent_table(concurrent_table&& x):
  414. concurrent_table(std::move(x),x.exclusive_access()){}
  415. concurrent_table(const concurrent_table& x,const Allocator& al_):
  416. concurrent_table(x,al_,x.exclusive_access()){}
  417. concurrent_table(concurrent_table&& x,const Allocator& al_):
  418. concurrent_table(std::move(x),al_,x.exclusive_access()){}
  419. template<typename ArraysType>
  420. concurrent_table(
  421. compatible_nonconcurrent_table&& x,
  422. arrays_holder<ArraysType,Allocator>&& ah):
  423. super{
  424. std::move(x.h()),std::move(x.pred()),std::move(x.al()),
  425. [&x]{return arrays_type::new_group_access(
  426. x.al(),typename arrays_type::super{
  427. x.arrays.groups_size_index,x.arrays.groups_size_mask,
  428. to_pointer<typename arrays_type::group_type_pointer>(
  429. reinterpret_cast<group_type*>(x.arrays.groups())),
  430. x.arrays.elements_});},
  431. size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
  432. {
  433. x.arrays=ah.release();
  434. x.size_ctrl.ml=x.initial_max_load();
  435. x.size_ctrl.size=0;
  436. BOOST_UNORDERED_SWAP_STATS(this->cstats,x.cstats);
  437. }
  438. concurrent_table(compatible_nonconcurrent_table&& x):
  439. concurrent_table(std::move(x),x.make_empty_arrays())
  440. {}
  441. ~concurrent_table()=default;
  442. concurrent_table& operator=(const concurrent_table& x)
  443. {
  444. auto lck=exclusive_access(*this,x);
  445. super::operator=(x);
  446. return *this;
  447. }
  448. concurrent_table& operator=(concurrent_table&& x)noexcept(
  449. noexcept(std::declval<super&>() = std::declval<super&&>()))
  450. {
  451. auto lck=exclusive_access(*this,x);
  452. super::operator=(std::move(x));
  453. return *this;
  454. }
  455. concurrent_table& operator=(std::initializer_list<value_type> il) {
  456. auto lck=exclusive_access();
  457. super::clear();
  458. super::noshrink_reserve(il.size());
  459. for (auto const& v : il) {
  460. this->unprotected_emplace(v);
  461. }
  462. return *this;
  463. }
  464. allocator_type get_allocator()const noexcept
  465. {
  466. auto lck=shared_access();
  467. return super::get_allocator();
  468. }
  469. template<typename Key,typename F>
  470. BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)
  471. {
  472. return visit_impl(group_exclusive{},x,std::forward<F>(f));
  473. }
  474. template<typename Key,typename F>
  475. BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)const
  476. {
  477. return visit_impl(group_shared{},x,std::forward<F>(f));
  478. }
  479. template<typename Key,typename F>
  480. BOOST_FORCEINLINE std::size_t cvisit(const Key& x,F&& f)const
  481. {
  482. return visit(x,std::forward<F>(f));
  483. }
  484. template<typename FwdIterator,typename F>
  485. BOOST_FORCEINLINE
  486. std::size_t visit(FwdIterator first,FwdIterator last,F&& f)
  487. {
  488. return bulk_visit_impl(group_exclusive{},first,last,std::forward<F>(f));
  489. }
  490. template<typename FwdIterator,typename F>
  491. BOOST_FORCEINLINE
  492. std::size_t visit(FwdIterator first,FwdIterator last,F&& f)const
  493. {
  494. return bulk_visit_impl(group_shared{},first,last,std::forward<F>(f));
  495. }
  496. template<typename FwdIterator,typename F>
  497. BOOST_FORCEINLINE
  498. std::size_t cvisit(FwdIterator first,FwdIterator last,F&& f)const
  499. {
  500. return visit(first,last,std::forward<F>(f));
  501. }
  502. template<typename F> std::size_t visit_all(F&& f)
  503. {
  504. return visit_all_impl(group_exclusive{},std::forward<F>(f));
  505. }
  506. template<typename F> std::size_t visit_all(F&& f)const
  507. {
  508. return visit_all_impl(group_shared{},std::forward<F>(f));
  509. }
  510. template<typename F> std::size_t cvisit_all(F&& f)const
  511. {
  512. return visit_all(std::forward<F>(f));
  513. }
  514. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  515. template<typename ExecutionPolicy,typename F>
  516. void visit_all(ExecutionPolicy&& policy,F&& f)
  517. {
  518. visit_all_impl(
  519. group_exclusive{},
  520. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  521. }
  522. template<typename ExecutionPolicy,typename F>
  523. void visit_all(ExecutionPolicy&& policy,F&& f)const
  524. {
  525. visit_all_impl(
  526. group_shared{},
  527. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  528. }
  529. template<typename ExecutionPolicy,typename F>
  530. void cvisit_all(ExecutionPolicy&& policy,F&& f)const
  531. {
  532. visit_all(std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  533. }
  534. #endif
  535. template<typename F> bool visit_while(F&& f)
  536. {
  537. return visit_while_impl(group_exclusive{},std::forward<F>(f));
  538. }
  539. template<typename F> bool visit_while(F&& f)const
  540. {
  541. return visit_while_impl(group_shared{},std::forward<F>(f));
  542. }
  543. template<typename F> bool cvisit_while(F&& f)const
  544. {
  545. return visit_while(std::forward<F>(f));
  546. }
  547. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  548. template<typename ExecutionPolicy,typename F>
  549. bool visit_while(ExecutionPolicy&& policy,F&& f)
  550. {
  551. return visit_while_impl(
  552. group_exclusive{},
  553. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  554. }
  555. template<typename ExecutionPolicy,typename F>
  556. bool visit_while(ExecutionPolicy&& policy,F&& f)const
  557. {
  558. return visit_while_impl(
  559. group_shared{},
  560. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  561. }
  562. template<typename ExecutionPolicy,typename F>
  563. bool cvisit_while(ExecutionPolicy&& policy,F&& f)const
  564. {
  565. return visit_while(
  566. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  567. }
  568. #endif
  569. bool empty()const noexcept{return size()==0;}
  570. std::size_t size()const noexcept
  571. {
  572. auto lck=shared_access();
  573. return unprotected_size();
  574. }
  575. using super::max_size;
  576. template<typename... Args>
  577. BOOST_FORCEINLINE bool emplace(Args&&... args)
  578. {
  579. return construct_and_emplace(std::forward<Args>(args)...);
  580. }
  581. /* Optimization for value_type and init_type, to avoid constructing twice */
  582. template<typename Value>
  583. BOOST_FORCEINLINE auto emplace(Value&& x)->typename std::enable_if<
  584. detail::is_similar_to_any<Value,value_type,init_type>::value,bool>::type
  585. {
  586. return emplace_impl(std::forward<Value>(x));
  587. }
  588. /* Optimizations for maps for (k,v) to avoid eagerly constructing value */
  589. template <typename K, typename V>
  590. BOOST_FORCEINLINE auto emplace(K&& k, V&& v) ->
  591. typename std::enable_if<is_emplace_kv_able<concurrent_table, K>::value,
  592. bool>::type
  593. {
  594. alloc_cted_or_fwded_key_type<type_policy, Allocator, K&&> x(
  595. this->al(), std::forward<K>(k));
  596. return emplace_impl(
  597. try_emplace_args_t{}, x.move_or_fwd(), std::forward<V>(v));
  598. }
  599. BOOST_FORCEINLINE bool
  600. insert(const init_type& x){return emplace_impl(x);}
  601. BOOST_FORCEINLINE bool
  602. insert(init_type&& x){return emplace_impl(std::move(x));}
  603. /* template<typename=void> tilts call ambiguities in favor of init_type */
  604. template<typename=void>
  605. BOOST_FORCEINLINE bool
  606. insert(const value_type& x){return emplace_impl(x);}
  607. template<typename=void>
  608. BOOST_FORCEINLINE bool
  609. insert(value_type&& x){return emplace_impl(std::move(x));}
  610. template<typename Key,typename... Args>
  611. BOOST_FORCEINLINE bool try_emplace(Key&& x,Args&&... args)
  612. {
  613. return emplace_impl(
  614. try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
  615. }
  616. template<typename Key,typename... Args>
  617. BOOST_FORCEINLINE bool try_emplace_or_visit(Key&& x,Args&&... args)
  618. {
  619. return emplace_or_visit_flast(
  620. group_exclusive{},
  621. try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
  622. }
  623. template<typename Key,typename... Args>
  624. BOOST_FORCEINLINE bool try_emplace_or_cvisit(Key&& x,Args&&... args)
  625. {
  626. return emplace_or_visit_flast(
  627. group_shared{},
  628. try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
  629. }
  630. template<typename... Args>
  631. BOOST_FORCEINLINE bool emplace_or_visit(Args&&... args)
  632. {
  633. return construct_and_emplace_or_visit_flast(
  634. group_exclusive{},std::forward<Args>(args)...);
  635. }
  636. template<typename... Args>
  637. BOOST_FORCEINLINE bool emplace_or_cvisit(Args&&... args)
  638. {
  639. return construct_and_emplace_or_visit_flast(
  640. group_shared{},std::forward<Args>(args)...);
  641. }
  642. template<typename F>
  643. BOOST_FORCEINLINE bool insert_or_visit(const init_type& x,F&& f)
  644. {
  645. return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
  646. }
  647. template<typename F>
  648. BOOST_FORCEINLINE bool insert_or_cvisit(const init_type& x,F&& f)
  649. {
  650. return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
  651. }
  652. template<typename F>
  653. BOOST_FORCEINLINE bool insert_or_visit(init_type&& x,F&& f)
  654. {
  655. return emplace_or_visit_impl(
  656. group_exclusive{},std::forward<F>(f),std::move(x));
  657. }
  658. template<typename F>
  659. BOOST_FORCEINLINE bool insert_or_cvisit(init_type&& x,F&& f)
  660. {
  661. return emplace_or_visit_impl(
  662. group_shared{},std::forward<F>(f),std::move(x));
  663. }
  664. /* SFINAE tilts call ambiguities in favor of init_type */
  665. template<typename Value,typename F>
  666. BOOST_FORCEINLINE auto insert_or_visit(const Value& x,F&& f)
  667. ->enable_if_is_value_type<Value,bool>
  668. {
  669. return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
  670. }
  671. template<typename Value,typename F>
  672. BOOST_FORCEINLINE auto insert_or_cvisit(const Value& x,F&& f)
  673. ->enable_if_is_value_type<Value,bool>
  674. {
  675. return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
  676. }
  677. template<typename Value,typename F>
  678. BOOST_FORCEINLINE auto insert_or_visit(Value&& x,F&& f)
  679. ->enable_if_is_value_type<Value,bool>
  680. {
  681. return emplace_or_visit_impl(
  682. group_exclusive{},std::forward<F>(f),std::move(x));
  683. }
  684. template<typename Value,typename F>
  685. BOOST_FORCEINLINE auto insert_or_cvisit(Value&& x,F&& f)
  686. ->enable_if_is_value_type<Value,bool>
  687. {
  688. return emplace_or_visit_impl(
  689. group_shared{},std::forward<F>(f),std::move(x));
  690. }
  691. template<typename Key>
  692. BOOST_FORCEINLINE std::size_t erase(const Key& x)
  693. {
  694. return erase_if(x,[](const value_type&){return true;});
  695. }
  696. template<typename Key,typename F>
  697. BOOST_FORCEINLINE auto erase_if(const Key& x,F&& f)->typename std::enable_if<
  698. !is_execution_policy<Key>::value,std::size_t>::type
  699. {
  700. auto lck=shared_access();
  701. auto hash=this->hash_for(x);
  702. std::size_t res=0;
  703. unprotected_internal_visit(
  704. group_exclusive{},x,this->position_for(hash),hash,
  705. [&,this](group_type* pg,unsigned int n,element_type* p)
  706. {
  707. if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
  708. super::erase(pg,n,p);
  709. res=1;
  710. }
  711. });
  712. return res;
  713. }
  714. template<typename F>
  715. std::size_t erase_if(F&& f)
  716. {
  717. auto lck=shared_access();
  718. std::size_t res=0;
  719. for_all_elements(
  720. group_exclusive{},
  721. [&,this](group_type* pg,unsigned int n,element_type* p){
  722. if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
  723. super::erase(pg,n,p);
  724. ++res;
  725. }
  726. });
  727. return res;
  728. }
  729. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  730. template<typename ExecutionPolicy,typename F>
  731. auto erase_if(ExecutionPolicy&& policy,F&& f)->typename std::enable_if<
  732. is_execution_policy<ExecutionPolicy>::value,void>::type
  733. {
  734. auto lck=shared_access();
  735. for_all_elements(
  736. group_exclusive{},std::forward<ExecutionPolicy>(policy),
  737. [&,this](group_type* pg,unsigned int n,element_type* p){
  738. if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
  739. super::erase(pg,n,p);
  740. }
  741. });
  742. }
  743. #endif
  744. void swap(concurrent_table& x)
  745. noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>())))
  746. {
  747. auto lck=exclusive_access(*this,x);
  748. super::swap(x);
  749. }
  750. void clear()noexcept
  751. {
  752. auto lck=exclusive_access();
  753. super::clear();
  754. }
  755. // TODO: should we accept different allocator too?
  756. template<typename Hash2,typename Pred2>
  757. size_type merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& x)
  758. {
  759. using merge_table_type=concurrent_table<TypePolicy,Hash2,Pred2,Allocator>;
  760. using super2=typename merge_table_type::super;
  761. // for clang
  762. boost::ignore_unused<super2>();
  763. auto lck=exclusive_access(*this,x);
  764. size_type s=super::size();
  765. x.super2::for_all_elements( /* super2::for_all_elements -> unprotected */
  766. [&,this](group_type* pg,unsigned int n,element_type* p){
  767. typename merge_table_type::erase_on_exit e{x,pg,n,p};
  768. if(!unprotected_emplace(type_policy::move(*p)))e.rollback();
  769. });
  770. return size_type{super::size()-s};
  771. }
  772. template<typename Hash2,typename Pred2>
  773. void merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
  774. hasher hash_function()const
  775. {
  776. auto lck=shared_access();
  777. return super::hash_function();
  778. }
  779. key_equal key_eq()const
  780. {
  781. auto lck=shared_access();
  782. return super::key_eq();
  783. }
  784. template<typename Key>
  785. BOOST_FORCEINLINE std::size_t count(Key&& x)const
  786. {
  787. return (std::size_t)contains(std::forward<Key>(x));
  788. }
  789. template<typename Key>
  790. BOOST_FORCEINLINE bool contains(Key&& x)const
  791. {
  792. return visit(std::forward<Key>(x),[](const value_type&){})!=0;
  793. }
  794. std::size_t capacity()const noexcept
  795. {
  796. auto lck=shared_access();
  797. return super::capacity();
  798. }
  799. float load_factor()const noexcept
  800. {
  801. auto lck=shared_access();
  802. if(super::capacity()==0)return 0;
  803. else return float(unprotected_size())/
  804. float(super::capacity());
  805. }
  806. using super::max_load_factor;
  807. std::size_t max_load()const noexcept
  808. {
  809. auto lck=shared_access();
  810. return super::max_load();
  811. }
  812. void rehash(std::size_t n)
  813. {
  814. auto lck=exclusive_access();
  815. super::rehash(n);
  816. }
  817. void reserve(std::size_t n)
  818. {
  819. auto lck=exclusive_access();
  820. super::reserve(n);
  821. }
  822. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  823. /* already thread safe */
  824. using super::get_stats;
  825. using super::reset_stats;
  826. #endif
  827. template<typename Predicate>
  828. friend std::size_t erase_if(concurrent_table& x,Predicate&& pr)
  829. {
  830. return x.erase_if(std::forward<Predicate>(pr));
  831. }
  832. friend bool operator==(const concurrent_table& x,const concurrent_table& y)
  833. {
  834. auto lck=exclusive_access(x,y);
  835. return static_cast<const super&>(x)==static_cast<const super&>(y);
  836. }
  837. friend bool operator!=(const concurrent_table& x,const concurrent_table& y)
  838. {
  839. return !(x==y);
  840. }
  841. private:
  842. template<typename,typename,typename,typename> friend class concurrent_table;
  843. using mutex_type=rw_spinlock;
  844. using multimutex_type=multimutex<mutex_type,128>; // TODO: adapt 128 to the machine
  845. using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>;
  846. using exclusive_lock_guard=reentrancy_checked<lock_guard<multimutex_type>>;
  847. using exclusive_bilock_guard=
  848. reentrancy_bichecked<scoped_bilock<multimutex_type>>;
  849. using group_shared_lock_guard=typename group_access::shared_lock_guard;
  850. using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard;
  851. using group_insert_counter_type=typename group_access::insert_counter_type;
  852. concurrent_table(const concurrent_table& x,exclusive_lock_guard):
  853. super{x}{}
  854. concurrent_table(concurrent_table&& x,exclusive_lock_guard):
  855. super{std::move(x)}{}
  856. concurrent_table(
  857. const concurrent_table& x,const Allocator& al_,exclusive_lock_guard):
  858. super{x,al_}{}
  859. concurrent_table(
  860. concurrent_table&& x,const Allocator& al_,exclusive_lock_guard):
  861. super{std::move(x),al_}{}
  862. inline shared_lock_guard shared_access()const
  863. {
  864. thread_local auto id=(++thread_counter)%mutexes.size();
  865. return shared_lock_guard{this,mutexes[id]};
  866. }
  867. inline exclusive_lock_guard exclusive_access()const
  868. {
  869. return exclusive_lock_guard{this,mutexes};
  870. }
  871. static inline exclusive_bilock_guard exclusive_access(
  872. const concurrent_table& x,const concurrent_table& y)
  873. {
  874. return {&x,&y,x.mutexes,y.mutexes};
  875. }
  876. template<typename Hash2,typename Pred2>
  877. static inline exclusive_bilock_guard exclusive_access(
  878. const concurrent_table& x,
  879. const concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& y)
  880. {
  881. return {&x,&y,x.mutexes,y.mutexes};
  882. }
  883. /* Tag-dispatched shared/exclusive group access */
  884. using group_shared=std::false_type;
  885. using group_exclusive=std::true_type;
  886. inline group_shared_lock_guard access(group_shared,std::size_t pos)const
  887. {
  888. return this->arrays.group_accesses()[pos].shared_access();
  889. }
  890. inline group_exclusive_lock_guard access(
  891. group_exclusive,std::size_t pos)const
  892. {
  893. return this->arrays.group_accesses()[pos].exclusive_access();
  894. }
  895. inline group_insert_counter_type& insert_counter(std::size_t pos)const
  896. {
  897. return this->arrays.group_accesses()[pos].insert_counter();
  898. }
  899. /* Const casts value_type& according to the level of group access for
  900. * safe passing to visitation functions. When type_policy is set-like,
  901. * access is always const regardless of group access.
  902. */
  903. static inline const value_type&
  904. cast_for(group_shared,value_type& x){return x;}
  905. static inline typename std::conditional<
  906. std::is_same<key_type,value_type>::value,
  907. const value_type&,
  908. value_type&
  909. >::type
  910. cast_for(group_exclusive,value_type& x){return x;}
  911. struct erase_on_exit
  912. {
  913. erase_on_exit(
  914. concurrent_table& x_,
  915. group_type* pg_,unsigned int pos_,element_type* p_):
  916. x(x_),pg(pg_),pos(pos_),p(p_){}
  917. ~erase_on_exit(){if(!rollback_)x.super::erase(pg,pos,p);}
  918. void rollback(){rollback_=true;}
  919. concurrent_table &x;
  920. group_type *pg;
  921. unsigned int pos;
  922. element_type *p;
  923. bool rollback_=false;
  924. };
  925. template<typename GroupAccessMode,typename Key,typename F>
  926. BOOST_FORCEINLINE std::size_t visit_impl(
  927. GroupAccessMode access_mode,const Key& x,F&& f)const
  928. {
  929. auto lck=shared_access();
  930. auto hash=this->hash_for(x);
  931. return unprotected_visit(
  932. access_mode,x,this->position_for(hash),hash,std::forward<F>(f));
  933. }
  934. template<typename GroupAccessMode,typename FwdIterator,typename F>
  935. BOOST_FORCEINLINE
  936. std::size_t bulk_visit_impl(
  937. GroupAccessMode access_mode,FwdIterator first,FwdIterator last,F&& f)const
  938. {
  939. auto lck=shared_access();
  940. std::size_t res=0;
  941. auto n=static_cast<std::size_t>(std::distance(first,last));
  942. while(n){
  943. auto m=n<2*bulk_visit_size?n:bulk_visit_size;
  944. res+=unprotected_bulk_visit(access_mode,first,m,std::forward<F>(f));
  945. n-=m;
  946. std::advance(
  947. first,
  948. static_cast<
  949. typename std::iterator_traits<FwdIterator>::difference_type>(m));
  950. }
  951. return res;
  952. }
  953. template<typename GroupAccessMode,typename F>
  954. std::size_t visit_all_impl(GroupAccessMode access_mode,F&& f)const
  955. {
  956. auto lck=shared_access();
  957. std::size_t res=0;
  958. for_all_elements(access_mode,[&](element_type* p){
  959. f(cast_for(access_mode,type_policy::value_from(*p)));
  960. ++res;
  961. });
  962. return res;
  963. }
  964. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  965. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  966. void visit_all_impl(
  967. GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
  968. {
  969. auto lck=shared_access();
  970. for_all_elements(
  971. access_mode,std::forward<ExecutionPolicy>(policy),
  972. [&](element_type* p){
  973. f(cast_for(access_mode,type_policy::value_from(*p)));
  974. });
  975. }
  976. #endif
  977. template<typename GroupAccessMode,typename F>
  978. bool visit_while_impl(GroupAccessMode access_mode,F&& f)const
  979. {
  980. auto lck=shared_access();
  981. return for_all_elements_while(access_mode,[&](element_type* p){
  982. return f(cast_for(access_mode,type_policy::value_from(*p)));
  983. });
  984. }
  985. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  986. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  987. bool visit_while_impl(
  988. GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
  989. {
  990. auto lck=shared_access();
  991. return for_all_elements_while(
  992. access_mode,std::forward<ExecutionPolicy>(policy),
  993. [&](element_type* p){
  994. return f(cast_for(access_mode,type_policy::value_from(*p)));
  995. });
  996. }
  997. #endif
  998. template<typename GroupAccessMode,typename Key,typename F>
  999. BOOST_FORCEINLINE std::size_t unprotected_visit(
  1000. GroupAccessMode access_mode,
  1001. const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
  1002. {
  1003. return unprotected_internal_visit(
  1004. access_mode,x,pos0,hash,
  1005. [&](group_type*,unsigned int,element_type* p)
  1006. {f(cast_for(access_mode,type_policy::value_from(*p)));});
  1007. }
  1008. #if defined(BOOST_MSVC)
  1009. /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */
  1010. #pragma warning(push)
  1011. #pragma warning(disable:4800)
  1012. #endif
  1013. template<typename GroupAccessMode,typename Key,typename F>
  1014. BOOST_FORCEINLINE std::size_t unprotected_internal_visit(
  1015. GroupAccessMode access_mode,
  1016. const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
  1017. {
  1018. BOOST_UNORDERED_STATS_COUNTER(num_cmps);
  1019. prober pb(pos0);
  1020. do{
  1021. auto pos=pb.get();
  1022. auto pg=this->arrays.groups()+pos;
  1023. auto mask=pg->match(hash);
  1024. if(mask){
  1025. auto p=this->arrays.elements()+pos*N;
  1026. BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
  1027. auto lck=access(access_mode,pos);
  1028. do{
  1029. auto n=unchecked_countr_zero(mask);
  1030. if(BOOST_LIKELY(pg->is_occupied(n))){
  1031. BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
  1032. if(BOOST_LIKELY(bool(this->pred()(x,this->key_from(p[n]))))){
  1033. f(pg,n,p+n);
  1034. BOOST_UNORDERED_ADD_STATS(
  1035. this->cstats.successful_lookup,(pb.length(),num_cmps));
  1036. return 1;
  1037. }
  1038. }
  1039. mask&=mask-1;
  1040. }while(mask);
  1041. }
  1042. if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
  1043. BOOST_UNORDERED_ADD_STATS(
  1044. this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
  1045. return 0;
  1046. }
  1047. }
  1048. while(BOOST_LIKELY(pb.next(this->arrays.groups_size_mask)));
  1049. BOOST_UNORDERED_ADD_STATS(
  1050. this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
  1051. return 0;
  1052. }
  1053. template<typename GroupAccessMode,typename FwdIterator,typename F>
  1054. BOOST_FORCEINLINE std::size_t unprotected_bulk_visit(
  1055. GroupAccessMode access_mode,FwdIterator first,std::size_t m,F&& f)const
  1056. {
  1057. BOOST_ASSERT(m<2*bulk_visit_size);
  1058. std::size_t res=0,
  1059. hashes[2*bulk_visit_size-1],
  1060. positions[2*bulk_visit_size-1];
  1061. int masks[2*bulk_visit_size-1];
  1062. auto it=first;
  1063. for(auto i=m;i--;++it){
  1064. auto hash=hashes[i]=this->hash_for(*it);
  1065. auto pos=positions[i]=this->position_for(hash);
  1066. BOOST_UNORDERED_PREFETCH(this->arrays.groups()+pos);
  1067. }
  1068. for(auto i=m;i--;){
  1069. auto hash=hashes[i];
  1070. auto pos=positions[i];
  1071. auto mask=masks[i]=(this->arrays.groups()+pos)->match(hash);
  1072. if(mask){
  1073. BOOST_UNORDERED_PREFETCH(this->arrays.group_accesses()+pos);
  1074. BOOST_UNORDERED_PREFETCH(
  1075. this->arrays.elements()+pos*N+unchecked_countr_zero(mask));
  1076. }
  1077. }
  1078. it=first;
  1079. for(auto i=m;i--;++it){
  1080. BOOST_UNORDERED_STATS_COUNTER(num_cmps);
  1081. auto pos=positions[i];
  1082. prober pb(pos);
  1083. auto pg=this->arrays.groups()+pos;
  1084. auto mask=masks[i];
  1085. element_type *p;
  1086. if(!mask)goto post_mask;
  1087. p=this->arrays.elements()+pos*N;
  1088. for(;;){
  1089. {
  1090. auto lck=access(access_mode,pos);
  1091. do{
  1092. auto n=unchecked_countr_zero(mask);
  1093. if(BOOST_LIKELY(pg->is_occupied(n))){
  1094. BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
  1095. if(bool(this->pred()(*it,this->key_from(p[n])))){
  1096. f(cast_for(access_mode,type_policy::value_from(p[n])));
  1097. ++res;
  1098. BOOST_UNORDERED_ADD_STATS(
  1099. this->cstats.successful_lookup,(pb.length(),num_cmps));
  1100. goto next_key;
  1101. }
  1102. }
  1103. mask&=mask-1;
  1104. }while(mask);
  1105. }
  1106. post_mask:
  1107. do{
  1108. if(BOOST_LIKELY(pg->is_not_overflowed(hashes[i]))||
  1109. BOOST_UNLIKELY(!pb.next(this->arrays.groups_size_mask))){
  1110. BOOST_UNORDERED_ADD_STATS(
  1111. this->cstats.unsuccessful_lookup,(pb.length(),num_cmps));
  1112. goto next_key;
  1113. }
  1114. pos=pb.get();
  1115. pg=this->arrays.groups()+pos;
  1116. mask=pg->match(hashes[i]);
  1117. }while(!mask);
  1118. p=this->arrays.elements()+pos*N;
  1119. BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
  1120. }
  1121. next_key:;
  1122. }
  1123. return res;
  1124. }
  1125. #if defined(BOOST_MSVC)
  1126. #pragma warning(pop) /* C4800 */
  1127. #endif
  1128. std::size_t unprotected_size()const
  1129. {
  1130. std::size_t m=this->size_ctrl.ml;
  1131. std::size_t s=this->size_ctrl.size;
  1132. return s<=m?s:m;
  1133. }
  1134. template<typename... Args>
  1135. BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args)
  1136. {
  1137. return construct_and_emplace_or_visit(
  1138. group_shared{},[](const value_type&){},std::forward<Args>(args)...);
  1139. }
  1140. struct call_construct_and_emplace_or_visit
  1141. {
  1142. template<typename... Args>
  1143. BOOST_FORCEINLINE bool operator()(
  1144. concurrent_table* this_,Args&&... args)const
  1145. {
  1146. return this_->construct_and_emplace_or_visit(
  1147. std::forward<Args>(args)...);
  1148. }
  1149. };
  1150. template<typename GroupAccessMode,typename... Args>
  1151. BOOST_FORCEINLINE bool construct_and_emplace_or_visit_flast(
  1152. GroupAccessMode access_mode,Args&&... args)
  1153. {
  1154. return mp11::tuple_apply(
  1155. call_construct_and_emplace_or_visit{},
  1156. std::tuple_cat(
  1157. std::make_tuple(this,access_mode),
  1158. tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
  1159. )
  1160. );
  1161. }
  1162. template<typename GroupAccessMode,typename F,typename... Args>
  1163. BOOST_FORCEINLINE bool construct_and_emplace_or_visit(
  1164. GroupAccessMode access_mode,F&& f,Args&&... args)
  1165. {
  1166. auto lck=shared_access();
  1167. alloc_cted_insert_type<type_policy,Allocator,Args...> x(
  1168. this->al(),std::forward<Args>(args)...);
  1169. int res=unprotected_norehash_emplace_or_visit(
  1170. access_mode,std::forward<F>(f),type_policy::move(x.value()));
  1171. if(BOOST_LIKELY(res>=0))return res!=0;
  1172. lck.unlock();
  1173. rehash_if_full();
  1174. return noinline_emplace_or_visit(
  1175. access_mode,std::forward<F>(f),type_policy::move(x.value()));
  1176. }
  1177. template<typename... Args>
  1178. BOOST_FORCEINLINE bool emplace_impl(Args&&... args)
  1179. {
  1180. return emplace_or_visit_impl(
  1181. group_shared{},[](const value_type&){},std::forward<Args>(args)...);
  1182. }
  1183. template<typename GroupAccessMode,typename F,typename... Args>
  1184. BOOST_NOINLINE bool noinline_emplace_or_visit(
  1185. GroupAccessMode access_mode,F&& f,Args&&... args)
  1186. {
  1187. return emplace_or_visit_impl(
  1188. access_mode,std::forward<F>(f),std::forward<Args>(args)...);
  1189. }
  1190. struct call_emplace_or_visit_impl
  1191. {
  1192. template<typename... Args>
  1193. BOOST_FORCEINLINE bool operator()(
  1194. concurrent_table* this_,Args&&... args)const
  1195. {
  1196. return this_->emplace_or_visit_impl(std::forward<Args>(args)...);
  1197. }
  1198. };
  1199. template<typename GroupAccessMode,typename... Args>
  1200. BOOST_FORCEINLINE bool emplace_or_visit_flast(
  1201. GroupAccessMode access_mode,Args&&... args)
  1202. {
  1203. return mp11::tuple_apply(
  1204. call_emplace_or_visit_impl{},
  1205. std::tuple_cat(
  1206. std::make_tuple(this,access_mode),
  1207. tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
  1208. )
  1209. );
  1210. }
  1211. template<typename GroupAccessMode,typename F,typename... Args>
  1212. BOOST_FORCEINLINE bool emplace_or_visit_impl(
  1213. GroupAccessMode access_mode,F&& f,Args&&... args)
  1214. {
  1215. for(;;){
  1216. {
  1217. auto lck=shared_access();
  1218. int res=unprotected_norehash_emplace_or_visit(
  1219. access_mode,std::forward<F>(f),std::forward<Args>(args)...);
  1220. if(BOOST_LIKELY(res>=0))return res!=0;
  1221. }
  1222. rehash_if_full();
  1223. }
  1224. }
  1225. template<typename... Args>
  1226. BOOST_FORCEINLINE bool unprotected_emplace(Args&&... args)
  1227. {
  1228. const auto &k=this->key_from(std::forward<Args>(args)...);
  1229. auto hash=this->hash_for(k);
  1230. auto pos0=this->position_for(hash);
  1231. if(this->find(k,pos0,hash))return false;
  1232. if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
  1233. this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...);
  1234. }
  1235. else{
  1236. this->unchecked_emplace_with_rehash(hash,std::forward<Args>(args)...);
  1237. }
  1238. return true;
  1239. }
  1240. struct reserve_size
  1241. {
  1242. reserve_size(concurrent_table& x_):x(x_)
  1243. {
  1244. size_=++x.size_ctrl.size;
  1245. }
  1246. ~reserve_size()
  1247. {
  1248. if(!commit_)--x.size_ctrl.size;
  1249. }
  1250. bool succeeded()const{return size_<=x.size_ctrl.ml;}
  1251. void commit(){commit_=true;}
  1252. concurrent_table &x;
  1253. std::size_t size_;
  1254. bool commit_=false;
  1255. };
  1256. struct reserve_slot
  1257. {
  1258. reserve_slot(group_type* pg_,std::size_t pos_,std::size_t hash):
  1259. pg{pg_},pos{pos_}
  1260. {
  1261. pg->set(pos,hash);
  1262. }
  1263. ~reserve_slot()
  1264. {
  1265. if(!commit_)pg->reset(pos);
  1266. }
  1267. void commit(){commit_=true;}
  1268. group_type *pg;
  1269. std::size_t pos;
  1270. bool commit_=false;
  1271. };
  1272. template<typename GroupAccessMode,typename F,typename... Args>
  1273. BOOST_FORCEINLINE int
  1274. unprotected_norehash_emplace_or_visit(
  1275. GroupAccessMode access_mode,F&& f,Args&&... args)
  1276. {
  1277. const auto &k=this->key_from(std::forward<Args>(args)...);
  1278. auto hash=this->hash_for(k);
  1279. auto pos0=this->position_for(hash);
  1280. for(;;){
  1281. startover:
  1282. boost::uint32_t counter=insert_counter(pos0);
  1283. if(unprotected_visit(
  1284. access_mode,k,pos0,hash,std::forward<F>(f)))return 0;
  1285. reserve_size rsize(*this);
  1286. if(BOOST_LIKELY(rsize.succeeded())){
  1287. for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){
  1288. auto pos=pb.get();
  1289. auto pg=this->arrays.groups()+pos;
  1290. auto lck=access(group_exclusive{},pos);
  1291. auto mask=pg->match_available();
  1292. if(BOOST_LIKELY(mask!=0)){
  1293. auto n=unchecked_countr_zero(mask);
  1294. reserve_slot rslot{pg,n,hash};
  1295. if(BOOST_UNLIKELY(insert_counter(pos0)++!=counter)){
  1296. /* other thread inserted from pos0, need to start over */
  1297. goto startover;
  1298. }
  1299. auto p=this->arrays.elements()+pos*N+n;
  1300. this->construct_element(p,std::forward<Args>(args)...);
  1301. rslot.commit();
  1302. rsize.commit();
  1303. BOOST_UNORDERED_ADD_STATS(this->cstats.insertion,(pb.length()));
  1304. return 1;
  1305. }
  1306. pg->mark_overflow(hash);
  1307. }
  1308. }
  1309. else return -1;
  1310. }
  1311. }
  1312. void rehash_if_full()
  1313. {
  1314. auto lck=exclusive_access();
  1315. if(this->size_ctrl.size==this->size_ctrl.ml){
  1316. this->unchecked_rehash_for_growth();
  1317. }
  1318. }
  1319. template<typename GroupAccessMode,typename F>
  1320. auto for_all_elements(GroupAccessMode access_mode,F f)const
  1321. ->decltype(f(nullptr),void())
  1322. {
  1323. for_all_elements(
  1324. access_mode,[&](group_type*,unsigned int,element_type* p){f(p);});
  1325. }
  1326. template<typename GroupAccessMode,typename F>
  1327. auto for_all_elements(GroupAccessMode access_mode,F f)const
  1328. ->decltype(f(nullptr,0,nullptr),void())
  1329. {
  1330. for_all_elements_while(
  1331. access_mode,[&](group_type* pg,unsigned int n,element_type* p)
  1332. {f(pg,n,p);return true;});
  1333. }
  1334. template<typename GroupAccessMode,typename F>
  1335. auto for_all_elements_while(GroupAccessMode access_mode,F f)const
  1336. ->decltype(f(nullptr),bool())
  1337. {
  1338. return for_all_elements_while(
  1339. access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);});
  1340. }
  1341. template<typename GroupAccessMode,typename F>
  1342. auto for_all_elements_while(GroupAccessMode access_mode,F f)const
  1343. ->decltype(f(nullptr,0,nullptr),bool())
  1344. {
  1345. auto p=this->arrays.elements();
  1346. if(p){
  1347. for(auto pg=this->arrays.groups(),last=pg+this->arrays.groups_size_mask+1;
  1348. pg!=last;++pg,p+=N){
  1349. auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups()));
  1350. auto mask=this->match_really_occupied(pg,last);
  1351. while(mask){
  1352. auto n=unchecked_countr_zero(mask);
  1353. if(!f(pg,n,p+n))return false;
  1354. mask&=mask-1;
  1355. }
  1356. }
  1357. }
  1358. return true;
  1359. }
  1360. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  1361. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  1362. auto for_all_elements(
  1363. GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
  1364. ->decltype(f(nullptr),void())
  1365. {
  1366. for_all_elements(
  1367. access_mode,std::forward<ExecutionPolicy>(policy),
  1368. [&](group_type*,unsigned int,element_type* p){f(p);});
  1369. }
  1370. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  1371. auto for_all_elements(
  1372. GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
  1373. ->decltype(f(nullptr,0,nullptr),void())
  1374. {
  1375. if(!this->arrays.elements())return;
  1376. auto first=this->arrays.groups(),
  1377. last=first+this->arrays.groups_size_mask+1;
  1378. std::for_each(std::forward<ExecutionPolicy>(policy),first,last,
  1379. [&,this](group_type& g){
  1380. auto pos=static_cast<std::size_t>(&g-first);
  1381. auto p=this->arrays.elements()+pos*N;
  1382. auto lck=access(access_mode,pos);
  1383. auto mask=this->match_really_occupied(&g,last);
  1384. while(mask){
  1385. auto n=unchecked_countr_zero(mask);
  1386. f(&g,n,p+n);
  1387. mask&=mask-1;
  1388. }
  1389. }
  1390. );
  1391. }
  1392. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  1393. bool for_all_elements_while(
  1394. GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
  1395. {
  1396. if(!this->arrays.elements())return true;
  1397. auto first=this->arrays.groups(),
  1398. last=first+this->arrays.groups_size_mask+1;
  1399. return std::all_of(std::forward<ExecutionPolicy>(policy),first,last,
  1400. [&,this](group_type& g){
  1401. auto pos=static_cast<std::size_t>(&g-first);
  1402. auto p=this->arrays.elements()+pos*N;
  1403. auto lck=access(access_mode,pos);
  1404. auto mask=this->match_really_occupied(&g,last);
  1405. while(mask){
  1406. auto n=unchecked_countr_zero(mask);
  1407. if(!f(p+n))return false;
  1408. mask&=mask-1;
  1409. }
  1410. return true;
  1411. }
  1412. );
  1413. }
  1414. #endif
  1415. friend class boost::serialization::access;
  1416. template<typename Archive>
  1417. void serialize(Archive& ar,unsigned int version)
  1418. {
  1419. core::split_member(ar,*this,version);
  1420. }
  1421. template<typename Archive>
  1422. void save(Archive& ar,unsigned int version)const
  1423. {
  1424. save(
  1425. ar,version,
  1426. std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
  1427. }
  1428. template<typename Archive>
  1429. void save(Archive& ar,unsigned int,std::true_type /* set */)const
  1430. {
  1431. auto lck=exclusive_access();
  1432. const std::size_t s=super::size();
  1433. const serialization_version<value_type> value_version;
  1434. ar<<core::make_nvp("count",s);
  1435. ar<<core::make_nvp("value_version",value_version);
  1436. super::for_all_elements([&,this](element_type* p){
  1437. auto& x=type_policy::value_from(*p);
  1438. core::save_construct_data_adl(ar,std::addressof(x),value_version);
  1439. ar<<serialization::make_nvp("item",x);
  1440. });
  1441. }
  1442. template<typename Archive>
  1443. void save(Archive& ar,unsigned int,std::false_type /* map */)const
  1444. {
  1445. using raw_key_type=typename std::remove_const<key_type>::type;
  1446. using raw_mapped_type=typename std::remove_const<
  1447. typename TypePolicy::mapped_type>::type;
  1448. auto lck=exclusive_access();
  1449. const std::size_t s=super::size();
  1450. const serialization_version<raw_key_type> key_version;
  1451. const serialization_version<raw_mapped_type> mapped_version;
  1452. ar<<core::make_nvp("count",s);
  1453. ar<<core::make_nvp("key_version",key_version);
  1454. ar<<core::make_nvp("mapped_version",mapped_version);
  1455. super::for_all_elements([&,this](element_type* p){
  1456. /* To remain lib-independent from Boost.Serialization and not rely on
  1457. * the user having included the serialization code for std::pair
  1458. * (boost/serialization/utility.hpp), we serialize the key and the
  1459. * mapped value separately.
  1460. */
  1461. auto& x=type_policy::value_from(*p);
  1462. core::save_construct_data_adl(
  1463. ar,std::addressof(x.first),key_version);
  1464. ar<<serialization::make_nvp("key",x.first);
  1465. core::save_construct_data_adl(
  1466. ar,std::addressof(x.second),mapped_version);
  1467. ar<<serialization::make_nvp("mapped",x.second);
  1468. });
  1469. }
  1470. template<typename Archive>
  1471. void load(Archive& ar,unsigned int version)
  1472. {
  1473. load(
  1474. ar,version,
  1475. std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
  1476. }
  1477. template<typename Archive>
  1478. void load(Archive& ar,unsigned int,std::true_type /* set */)
  1479. {
  1480. auto lck=exclusive_access();
  1481. std::size_t s;
  1482. serialization_version<value_type> value_version;
  1483. ar>>core::make_nvp("count",s);
  1484. ar>>core::make_nvp("value_version",value_version);
  1485. super::clear();
  1486. super::reserve(s);
  1487. for(std::size_t n=0;n<s;++n){
  1488. archive_constructed<value_type> value("item",ar,value_version);
  1489. auto& x=value.get();
  1490. auto hash=this->hash_for(x);
  1491. auto pos0=this->position_for(hash);
  1492. if(this->find(x,pos0,hash))throw_exception(bad_archive_exception());
  1493. auto loc=this->unchecked_emplace_at(pos0,hash,std::move(x));
  1494. ar.reset_object_address(std::addressof(*loc.p),std::addressof(x));
  1495. }
  1496. }
  1497. template<typename Archive>
  1498. void load(Archive& ar,unsigned int,std::false_type /* map */)
  1499. {
  1500. using raw_key_type=typename std::remove_const<key_type>::type;
  1501. using raw_mapped_type=typename std::remove_const<
  1502. typename TypePolicy::mapped_type>::type;
  1503. auto lck=exclusive_access();
  1504. std::size_t s;
  1505. serialization_version<raw_key_type> key_version;
  1506. serialization_version<raw_mapped_type> mapped_version;
  1507. ar>>core::make_nvp("count",s);
  1508. ar>>core::make_nvp("key_version",key_version);
  1509. ar>>core::make_nvp("mapped_version",mapped_version);
  1510. super::clear();
  1511. super::reserve(s);
  1512. for(std::size_t n=0;n<s;++n){
  1513. archive_constructed<raw_key_type> key("key",ar,key_version);
  1514. archive_constructed<raw_mapped_type> mapped("mapped",ar,mapped_version);
  1515. auto& k=key.get();
  1516. auto& m=mapped.get();
  1517. auto hash=this->hash_for(k);
  1518. auto pos0=this->position_for(hash);
  1519. if(this->find(k,pos0,hash))throw_exception(bad_archive_exception());
  1520. auto loc=this->unchecked_emplace_at(pos0,hash,std::move(k),std::move(m));
  1521. ar.reset_object_address(std::addressof(loc.p->first),std::addressof(k));
  1522. ar.reset_object_address(std::addressof(loc.p->second),std::addressof(m));
  1523. }
  1524. }
  1525. static std::atomic<std::size_t> thread_counter;
  1526. mutable multimutex_type mutexes;
  1527. };
  1528. template<typename T,typename H,typename P,typename A>
  1529. std::atomic<std::size_t> concurrent_table<T,H,P,A>::thread_counter={};
  1530. #if defined(BOOST_MSVC)
  1531. #pragma warning(pop) /* C4714 */
  1532. #endif
  1533. #include <boost/unordered/detail/foa/restore_wshadow.hpp>
  1534. } /* namespace foa */
  1535. } /* namespace detail */
  1536. } /* namespace unordered */
  1537. } /* namespace boost */
  1538. #endif