core.hpp 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380
  1. /* Common base for Boost.Unordered open-addressing tables.
  2. *
  3. * Copyright 2022-2024 Joaquin M Lopez Munoz.
  4. * Copyright 2023 Christian Mazakas.
  5. * Copyright 2024 Braden Ganetsky.
  6. * Distributed under the Boost Software License, Version 1.0.
  7. * (See accompanying file LICENSE_1_0.txt or copy at
  8. * http://www.boost.org/LICENSE_1_0.txt)
  9. *
  10. * See https://www.boost.org/libs/unordered for library home page.
  11. */
  12. #ifndef BOOST_UNORDERED_DETAIL_FOA_CORE_HPP
  13. #define BOOST_UNORDERED_DETAIL_FOA_CORE_HPP
  14. #include <boost/assert.hpp>
  15. #include <boost/config.hpp>
  16. #include <boost/config/workaround.hpp>
  17. #include <boost/core/allocator_traits.hpp>
  18. #include <boost/core/bit.hpp>
  19. #include <boost/core/empty_value.hpp>
  20. #include <boost/core/no_exceptions_support.hpp>
  21. #include <boost/core/pointer_traits.hpp>
  22. #include <boost/cstdint.hpp>
  23. #include <boost/predef.h>
  24. #include <boost/unordered/detail/allocator_constructed.hpp>
  25. #include <boost/unordered/detail/narrow_cast.hpp>
  26. #include <boost/unordered/detail/mulx.hpp>
  27. #include <boost/unordered/detail/static_assert.hpp>
  28. #include <boost/unordered/detail/type_traits.hpp>
  29. #include <boost/unordered/hash_traits.hpp>
  30. #include <climits>
  31. #include <cmath>
  32. #include <cstddef>
  33. #include <cstring>
  34. #include <limits>
  35. #include <memory>
  36. #include <new>
  37. #include <tuple>
  38. #include <type_traits>
  39. #include <utility>
  40. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  41. #include <boost/unordered/detail/foa/cumulative_stats.hpp>
  42. #endif
  43. #if !defined(BOOST_UNORDERED_DISABLE_SSE2)
  44. #if defined(BOOST_UNORDERED_ENABLE_SSE2)|| \
  45. defined(__SSE2__)|| \
  46. defined(_M_X64)||(defined(_M_IX86_FP)&&_M_IX86_FP>=2)
  47. #define BOOST_UNORDERED_SSE2
  48. #endif
  49. #endif
  50. #if !defined(BOOST_UNORDERED_DISABLE_NEON)
  51. #if defined(BOOST_UNORDERED_ENABLE_NEON)||\
  52. (defined(__ARM_NEON)&&!defined(__ARM_BIG_ENDIAN))
  53. #define BOOST_UNORDERED_LITTLE_ENDIAN_NEON
  54. #endif
  55. #endif
  56. #if defined(BOOST_UNORDERED_SSE2)
  57. #include <emmintrin.h>
  58. #elif defined(BOOST_UNORDERED_LITTLE_ENDIAN_NEON)
  59. #include <arm_neon.h>
  60. #endif
  61. #ifdef __has_builtin
  62. #define BOOST_UNORDERED_HAS_BUILTIN(x) __has_builtin(x)
  63. #else
  64. #define BOOST_UNORDERED_HAS_BUILTIN(x) 0
  65. #endif
  66. #if !defined(NDEBUG)
  67. #define BOOST_UNORDERED_ASSUME(cond) BOOST_ASSERT(cond)
  68. #elif BOOST_UNORDERED_HAS_BUILTIN(__builtin_assume)
  69. #define BOOST_UNORDERED_ASSUME(cond) __builtin_assume(cond)
  70. #elif defined(__GNUC__) || BOOST_UNORDERED_HAS_BUILTIN(__builtin_unreachable)
  71. #define BOOST_UNORDERED_ASSUME(cond) \
  72. do{ \
  73. if(!(cond))__builtin_unreachable(); \
  74. }while(0)
  75. #elif defined(_MSC_VER)
  76. #define BOOST_UNORDERED_ASSUME(cond) __assume(cond)
  77. #else
  78. #define BOOST_UNORDERED_ASSUME(cond) \
  79. do{ \
  80. static_cast<void>(false&&(cond)); \
  81. }while(0)
  82. #endif
  83. /* We use BOOST_UNORDERED_PREFETCH[_ELEMENTS] macros rather than proper
  84. * functions because of https://gcc.gnu.org/bugzilla/show_bug.cgi?id=109985
  85. */
  86. #if defined(BOOST_GCC)||defined(BOOST_CLANG)
  87. #define BOOST_UNORDERED_PREFETCH(p) __builtin_prefetch((const char*)(p))
  88. #elif defined(BOOST_UNORDERED_SSE2)
  89. #define BOOST_UNORDERED_PREFETCH(p) _mm_prefetch((const char*)(p),_MM_HINT_T0)
  90. #else
  91. #define BOOST_UNORDERED_PREFETCH(p) ((void)(p))
  92. #endif
  93. /* We have experimentally confirmed that ARM architectures get a higher
  94. * speedup when around the first half of the element slots in a group are
  95. * prefetched, whereas for Intel just the first cache line is best.
  96. * Please report back if you find better tunings for some particular
  97. * architectures.
  98. */
  99. #if BOOST_ARCH_ARM
  100. /* Cache line size can't be known at compile time, so we settle on
  101. * the very frequent value of 64B.
  102. */
  103. #define BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N) \
  104. do{ \
  105. auto BOOST_UNORDERED_P=(p); \
  106. constexpr int cache_line=64; \
  107. const char *p0=reinterpret_cast<const char*>(BOOST_UNORDERED_P), \
  108. *p1=p0+sizeof(*BOOST_UNORDERED_P)*(N)/2; \
  109. for(;p0<p1;p0+=cache_line)BOOST_UNORDERED_PREFETCH(p0); \
  110. }while(0)
  111. #else
  112. #define BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N) BOOST_UNORDERED_PREFETCH(p)
  113. #endif
  114. #ifdef __has_feature
  115. #define BOOST_UNORDERED_HAS_FEATURE(x) __has_feature(x)
  116. #else
  117. #define BOOST_UNORDERED_HAS_FEATURE(x) 0
  118. #endif
  119. #if BOOST_UNORDERED_HAS_FEATURE(thread_sanitizer)|| \
  120. defined(__SANITIZE_THREAD__)
  121. #define BOOST_UNORDERED_THREAD_SANITIZER
  122. #endif
  123. #define BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) \
  124. static_assert(boost::unordered::detail::is_nothrow_swappable<Hash>::value, \
  125. "Template parameter Hash is required to be nothrow Swappable."); \
  126. static_assert(boost::unordered::detail::is_nothrow_swappable<Pred>::value, \
  127. "Template parameter Pred is required to be nothrow Swappable");
  128. namespace boost{
  129. namespace unordered{
  130. namespace detail{
  131. namespace foa{
  132. static constexpr std::size_t default_bucket_count=0;
  133. /* foa::table_core is the common base of foa::table and foa::concurrent_table,
  134. * which in their turn serve as the foundational core of
  135. * boost::unordered_(flat|node)_(map|set) and boost::concurrent_flat_(map|set),
  136. * respectively. Its main internal design aspects are:
  137. *
  138. * - Element slots are logically split into groups of size N=15. The number
  139. * of groups is always a power of two, so the number of allocated slots
  140. is of the form (N*2^n)-1 (final slot reserved for a sentinel mark).
  141. * - Positioning is done at the group level rather than the slot level, that
  142. * is, for any given element its hash value is used to locate a group and
  143. * insertion is performed on the first available element of that group;
  144. * if the group is full (overflow), further groups are tried using
  145. * quadratic probing.
  146. * - Each group has an associated 16B metadata word holding reduced hash
  147. * values and overflow information. Reduced hash values are used to
  148. * accelerate lookup within the group by using 128-bit SIMD or 64-bit word
  149. * operations.
  150. */
  151. /* group15 controls metadata information of a group of N=15 element slots.
  152. * The 16B metadata word is organized as follows (LSB depicted rightmost):
  153. *
  154. * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
  155. * |ofw|h14|h13|h13|h11|h10|h09|h08|h07|h06|h05|h04|h03|h02|h01|h00|
  156. * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
  157. *
  158. * hi is 0 if the i-th element slot is avalaible, 1 to mark a sentinel and,
  159. * when the slot is occupied, a value in the range [2,255] obtained from the
  160. * element's original hash value.
  161. * ofw is the so-called overflow byte. If insertion of an element with hash
  162. * value h is tried on a full group, then the (h%8)-th bit of the overflow
  163. * byte is set to 1 and a further group is probed. Having an overflow byte
  164. * brings two advantages:
  165. *
  166. * - There's no need to reserve a special value of hi to mark tombstone
  167. * slots; each reduced hash value keeps then log2(254)=7.99 bits of the
  168. * original hash (alternative approaches reserve one full bit to mark
  169. * if the slot is available/deleted, so their reduced hash values are 7 bit
  170. * strong only).
  171. * - When doing an unsuccessful lookup (i.e. the element is not present in
  172. * the table), probing stops at the first non-overflowed group. Having 8
  173. * bits for signalling overflow makes it very likely that we stop at the
  174. * current group (this happens when no element with the same (h%8) value
  175. * has overflowed in the group), saving us an additional group check even
  176. * under high-load/high-erase conditions. It is critical that hash
  177. * reduction is invariant under modulo 8 (see maybe_caused_overflow).
  178. *
  179. * When looking for an element with hash value h, match(h) returns a bitmask
  180. * signalling which slots have the same reduced hash value. If available,
  181. * match uses SSE2 or (little endian) Neon 128-bit SIMD operations. On non-SIMD
  182. * scenarios, the logical layout described above is physically mapped to two
  183. * 64-bit words with *bit interleaving*, i.e. the least significant 16 bits of
  184. * the first 64-bit word contain the least significant bits of each byte in the
  185. * "logical" 128-bit word, and so forth. With this layout, match can be
  186. * implemented with 4 ANDs, 3 shifts, 2 XORs, 1 OR and 1 NOT.
  187. *
  188. * IntegralWrapper<Integral> is used to implement group15's underlying
  189. * metadata: it behaves as a plain integral for foa::table or introduces
  190. * atomic ops for foa::concurrent_table. If IntegralWrapper<...> is trivially
  191. * constructible, so is group15, in which case it can be initialized via memset
  192. * etc. Where needed, group15::initialize resets the metadata to the all
  193. * zeros (default state).
  194. */
  195. #if defined(BOOST_UNORDERED_SSE2)
  196. template<template<typename> class IntegralWrapper>
  197. struct group15
  198. {
  199. static constexpr std::size_t N=15;
  200. static constexpr bool regular_layout=true;
  201. struct dummy_group_type
  202. {
  203. alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0};
  204. };
  205. inline void initialize()
  206. {
  207. _mm_store_si128(
  208. reinterpret_cast<__m128i*>(m),_mm_setzero_si128());
  209. }
  210. inline void set(std::size_t pos,std::size_t hash)
  211. {
  212. BOOST_ASSERT(pos<N);
  213. at(pos)=reduced_hash(hash);
  214. }
  215. inline void set_sentinel()
  216. {
  217. at(N-1)=sentinel_;
  218. }
  219. inline bool is_sentinel(std::size_t pos)const
  220. {
  221. BOOST_ASSERT(pos<N);
  222. return at(pos)==sentinel_;
  223. }
  224. static inline bool is_sentinel(unsigned char* pc)noexcept
  225. {
  226. return *pc==sentinel_;
  227. }
  228. inline void reset(std::size_t pos)
  229. {
  230. BOOST_ASSERT(pos<N);
  231. at(pos)=available_;
  232. }
  233. static inline void reset(unsigned char* pc)
  234. {
  235. *reinterpret_cast<slot_type*>(pc)=available_;
  236. }
  237. inline int match(std::size_t hash)const
  238. {
  239. return _mm_movemask_epi8(
  240. _mm_cmpeq_epi8(load_metadata(),_mm_set1_epi32(match_word(hash))))&0x7FFF;
  241. }
  242. inline bool is_not_overflowed(std::size_t hash)const
  243. {
  244. static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128};
  245. return !(overflow()&shift[hash%8]);
  246. }
  247. inline void mark_overflow(std::size_t hash)
  248. {
  249. overflow()|=static_cast<unsigned char>(1<<(hash%8));
  250. }
  251. static inline bool maybe_caused_overflow(unsigned char* pc)
  252. {
  253. std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
  254. group15 *pg=reinterpret_cast<group15*>(pc-pos);
  255. return !pg->is_not_overflowed(*pc);
  256. }
  257. inline int match_available()const
  258. {
  259. return _mm_movemask_epi8(
  260. _mm_cmpeq_epi8(load_metadata(),_mm_setzero_si128()))&0x7FFF;
  261. }
  262. inline bool is_occupied(std::size_t pos)const
  263. {
  264. BOOST_ASSERT(pos<N);
  265. return at(pos)!=available_;
  266. }
  267. static inline bool is_occupied(unsigned char* pc)noexcept
  268. {
  269. return *reinterpret_cast<slot_type*>(pc)!=available_;
  270. }
  271. inline int match_occupied()const
  272. {
  273. return (~match_available())&0x7FFF;
  274. }
  275. private:
  276. using slot_type=IntegralWrapper<unsigned char>;
  277. BOOST_UNORDERED_STATIC_ASSERT(sizeof(slot_type)==1);
  278. static constexpr unsigned char available_=0,
  279. sentinel_=1;
  280. inline __m128i load_metadata()const
  281. {
  282. #if defined(BOOST_UNORDERED_THREAD_SANITIZER)
  283. /* ThreadSanitizer complains on 1-byte atomic writes combined with
  284. * 16-byte atomic reads.
  285. */
  286. return _mm_set_epi8(
  287. (char)m[15],(char)m[14],(char)m[13],(char)m[12],
  288. (char)m[11],(char)m[10],(char)m[ 9],(char)m[ 8],
  289. (char)m[ 7],(char)m[ 6],(char)m[ 5],(char)m[ 4],
  290. (char)m[ 3],(char)m[ 2],(char)m[ 1],(char)m[ 0]);
  291. #else
  292. return _mm_load_si128(reinterpret_cast<const __m128i*>(m));
  293. #endif
  294. }
  295. inline static int match_word(std::size_t hash)
  296. {
  297. static constexpr boost::uint32_t word[]=
  298. {
  299. 0x08080808u,0x09090909u,0x02020202u,0x03030303u,0x04040404u,0x05050505u,
  300. 0x06060606u,0x07070707u,0x08080808u,0x09090909u,0x0A0A0A0Au,0x0B0B0B0Bu,
  301. 0x0C0C0C0Cu,0x0D0D0D0Du,0x0E0E0E0Eu,0x0F0F0F0Fu,0x10101010u,0x11111111u,
  302. 0x12121212u,0x13131313u,0x14141414u,0x15151515u,0x16161616u,0x17171717u,
  303. 0x18181818u,0x19191919u,0x1A1A1A1Au,0x1B1B1B1Bu,0x1C1C1C1Cu,0x1D1D1D1Du,
  304. 0x1E1E1E1Eu,0x1F1F1F1Fu,0x20202020u,0x21212121u,0x22222222u,0x23232323u,
  305. 0x24242424u,0x25252525u,0x26262626u,0x27272727u,0x28282828u,0x29292929u,
  306. 0x2A2A2A2Au,0x2B2B2B2Bu,0x2C2C2C2Cu,0x2D2D2D2Du,0x2E2E2E2Eu,0x2F2F2F2Fu,
  307. 0x30303030u,0x31313131u,0x32323232u,0x33333333u,0x34343434u,0x35353535u,
  308. 0x36363636u,0x37373737u,0x38383838u,0x39393939u,0x3A3A3A3Au,0x3B3B3B3Bu,
  309. 0x3C3C3C3Cu,0x3D3D3D3Du,0x3E3E3E3Eu,0x3F3F3F3Fu,0x40404040u,0x41414141u,
  310. 0x42424242u,0x43434343u,0x44444444u,0x45454545u,0x46464646u,0x47474747u,
  311. 0x48484848u,0x49494949u,0x4A4A4A4Au,0x4B4B4B4Bu,0x4C4C4C4Cu,0x4D4D4D4Du,
  312. 0x4E4E4E4Eu,0x4F4F4F4Fu,0x50505050u,0x51515151u,0x52525252u,0x53535353u,
  313. 0x54545454u,0x55555555u,0x56565656u,0x57575757u,0x58585858u,0x59595959u,
  314. 0x5A5A5A5Au,0x5B5B5B5Bu,0x5C5C5C5Cu,0x5D5D5D5Du,0x5E5E5E5Eu,0x5F5F5F5Fu,
  315. 0x60606060u,0x61616161u,0x62626262u,0x63636363u,0x64646464u,0x65656565u,
  316. 0x66666666u,0x67676767u,0x68686868u,0x69696969u,0x6A6A6A6Au,0x6B6B6B6Bu,
  317. 0x6C6C6C6Cu,0x6D6D6D6Du,0x6E6E6E6Eu,0x6F6F6F6Fu,0x70707070u,0x71717171u,
  318. 0x72727272u,0x73737373u,0x74747474u,0x75757575u,0x76767676u,0x77777777u,
  319. 0x78787878u,0x79797979u,0x7A7A7A7Au,0x7B7B7B7Bu,0x7C7C7C7Cu,0x7D7D7D7Du,
  320. 0x7E7E7E7Eu,0x7F7F7F7Fu,0x80808080u,0x81818181u,0x82828282u,0x83838383u,
  321. 0x84848484u,0x85858585u,0x86868686u,0x87878787u,0x88888888u,0x89898989u,
  322. 0x8A8A8A8Au,0x8B8B8B8Bu,0x8C8C8C8Cu,0x8D8D8D8Du,0x8E8E8E8Eu,0x8F8F8F8Fu,
  323. 0x90909090u,0x91919191u,0x92929292u,0x93939393u,0x94949494u,0x95959595u,
  324. 0x96969696u,0x97979797u,0x98989898u,0x99999999u,0x9A9A9A9Au,0x9B9B9B9Bu,
  325. 0x9C9C9C9Cu,0x9D9D9D9Du,0x9E9E9E9Eu,0x9F9F9F9Fu,0xA0A0A0A0u,0xA1A1A1A1u,
  326. 0xA2A2A2A2u,0xA3A3A3A3u,0xA4A4A4A4u,0xA5A5A5A5u,0xA6A6A6A6u,0xA7A7A7A7u,
  327. 0xA8A8A8A8u,0xA9A9A9A9u,0xAAAAAAAAu,0xABABABABu,0xACACACACu,0xADADADADu,
  328. 0xAEAEAEAEu,0xAFAFAFAFu,0xB0B0B0B0u,0xB1B1B1B1u,0xB2B2B2B2u,0xB3B3B3B3u,
  329. 0xB4B4B4B4u,0xB5B5B5B5u,0xB6B6B6B6u,0xB7B7B7B7u,0xB8B8B8B8u,0xB9B9B9B9u,
  330. 0xBABABABAu,0xBBBBBBBBu,0xBCBCBCBCu,0xBDBDBDBDu,0xBEBEBEBEu,0xBFBFBFBFu,
  331. 0xC0C0C0C0u,0xC1C1C1C1u,0xC2C2C2C2u,0xC3C3C3C3u,0xC4C4C4C4u,0xC5C5C5C5u,
  332. 0xC6C6C6C6u,0xC7C7C7C7u,0xC8C8C8C8u,0xC9C9C9C9u,0xCACACACAu,0xCBCBCBCBu,
  333. 0xCCCCCCCCu,0xCDCDCDCDu,0xCECECECEu,0xCFCFCFCFu,0xD0D0D0D0u,0xD1D1D1D1u,
  334. 0xD2D2D2D2u,0xD3D3D3D3u,0xD4D4D4D4u,0xD5D5D5D5u,0xD6D6D6D6u,0xD7D7D7D7u,
  335. 0xD8D8D8D8u,0xD9D9D9D9u,0xDADADADAu,0xDBDBDBDBu,0xDCDCDCDCu,0xDDDDDDDDu,
  336. 0xDEDEDEDEu,0xDFDFDFDFu,0xE0E0E0E0u,0xE1E1E1E1u,0xE2E2E2E2u,0xE3E3E3E3u,
  337. 0xE4E4E4E4u,0xE5E5E5E5u,0xE6E6E6E6u,0xE7E7E7E7u,0xE8E8E8E8u,0xE9E9E9E9u,
  338. 0xEAEAEAEAu,0xEBEBEBEBu,0xECECECECu,0xEDEDEDEDu,0xEEEEEEEEu,0xEFEFEFEFu,
  339. 0xF0F0F0F0u,0xF1F1F1F1u,0xF2F2F2F2u,0xF3F3F3F3u,0xF4F4F4F4u,0xF5F5F5F5u,
  340. 0xF6F6F6F6u,0xF7F7F7F7u,0xF8F8F8F8u,0xF9F9F9F9u,0xFAFAFAFAu,0xFBFBFBFBu,
  341. 0xFCFCFCFCu,0xFDFDFDFDu,0xFEFEFEFEu,0xFFFFFFFFu,
  342. };
  343. return (int)word[narrow_cast<unsigned char>(hash)];
  344. }
  345. inline static unsigned char reduced_hash(std::size_t hash)
  346. {
  347. return narrow_cast<unsigned char>(match_word(hash));
  348. }
  349. inline slot_type& at(std::size_t pos)
  350. {
  351. return m[pos];
  352. }
  353. inline const slot_type& at(std::size_t pos)const
  354. {
  355. return m[pos];
  356. }
  357. inline slot_type& overflow()
  358. {
  359. return at(N);
  360. }
  361. inline const slot_type& overflow()const
  362. {
  363. return at(N);
  364. }
  365. alignas(16) slot_type m[16];
  366. };
  367. #elif defined(BOOST_UNORDERED_LITTLE_ENDIAN_NEON)
  368. template<template<typename> class IntegralWrapper>
  369. struct group15
  370. {
  371. static constexpr std::size_t N=15;
  372. static constexpr bool regular_layout=true;
  373. struct dummy_group_type
  374. {
  375. alignas(16) unsigned char storage[N+1]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0};
  376. };
  377. inline void initialize()
  378. {
  379. vst1q_u8(reinterpret_cast<uint8_t*>(m),vdupq_n_u8(0));
  380. }
  381. inline void set(std::size_t pos,std::size_t hash)
  382. {
  383. BOOST_ASSERT(pos<N);
  384. at(pos)=reduced_hash(hash);
  385. }
  386. inline void set_sentinel()
  387. {
  388. at(N-1)=sentinel_;
  389. }
  390. inline bool is_sentinel(std::size_t pos)const
  391. {
  392. BOOST_ASSERT(pos<N);
  393. return pos==N-1&&at(N-1)==sentinel_;
  394. }
  395. static inline bool is_sentinel(unsigned char* pc)noexcept
  396. {
  397. return *reinterpret_cast<slot_type*>(pc)==sentinel_;
  398. }
  399. inline void reset(std::size_t pos)
  400. {
  401. BOOST_ASSERT(pos<N);
  402. at(pos)=available_;
  403. }
  404. static inline void reset(unsigned char* pc)
  405. {
  406. *reinterpret_cast<slot_type*>(pc)=available_;
  407. }
  408. inline int match(std::size_t hash)const
  409. {
  410. return simde_mm_movemask_epi8(vceqq_u8(
  411. load_metadata(),vdupq_n_u8(reduced_hash(hash))))&0x7FFF;
  412. }
  413. inline bool is_not_overflowed(std::size_t hash)const
  414. {
  415. static constexpr unsigned char shift[]={1,2,4,8,16,32,64,128};
  416. return !(overflow()&shift[hash%8]);
  417. }
  418. inline void mark_overflow(std::size_t hash)
  419. {
  420. overflow()|=static_cast<unsigned char>(1<<(hash%8));
  421. }
  422. static inline bool maybe_caused_overflow(unsigned char* pc)
  423. {
  424. std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
  425. group15 *pg=reinterpret_cast<group15*>(pc-pos);
  426. return !pg->is_not_overflowed(*pc);
  427. };
  428. inline int match_available()const
  429. {
  430. return simde_mm_movemask_epi8(vceqq_u8(
  431. load_metadata(),vdupq_n_u8(0)))&0x7FFF;
  432. }
  433. inline bool is_occupied(std::size_t pos)const
  434. {
  435. BOOST_ASSERT(pos<N);
  436. return at(pos)!=available_;
  437. }
  438. static inline bool is_occupied(unsigned char* pc)noexcept
  439. {
  440. return *reinterpret_cast<slot_type*>(pc)!=available_;
  441. }
  442. inline int match_occupied()const
  443. {
  444. return simde_mm_movemask_epi8(vcgtq_u8(
  445. load_metadata(),vdupq_n_u8(0)))&0x7FFF;
  446. }
  447. private:
  448. using slot_type=IntegralWrapper<unsigned char>;
  449. BOOST_UNORDERED_STATIC_ASSERT(sizeof(slot_type)==1);
  450. static constexpr unsigned char available_=0,
  451. sentinel_=1;
  452. inline uint8x16_t load_metadata()const
  453. {
  454. #if defined(BOOST_UNORDERED_THREAD_SANITIZER)
  455. /* ThreadSanitizer complains on 1-byte atomic writes combined with
  456. * 16-byte atomic reads.
  457. */
  458. alignas(16) uint8_t data[16]={
  459. m[ 0],m[ 1],m[ 2],m[ 3],m[ 4],m[ 5],m[ 6],m[ 7],
  460. m[ 8],m[ 9],m[10],m[11],m[12],m[13],m[14],m[15]};
  461. return vld1q_u8(data);
  462. #else
  463. return vld1q_u8(reinterpret_cast<const uint8_t*>(m));
  464. #endif
  465. }
  466. inline static unsigned char reduced_hash(std::size_t hash)
  467. {
  468. static constexpr unsigned char table[]={
  469. 8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
  470. 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
  471. 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
  472. 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
  473. 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
  474. 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
  475. 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
  476. 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
  477. 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
  478. 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
  479. 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
  480. 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
  481. 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
  482. 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
  483. 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
  484. 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
  485. };
  486. return table[(unsigned char)hash];
  487. }
  488. /* Copied from
  489. * https://github.com/simd-everywhere/simde/blob/master/simde/x86/
  490. * sse2.h#L3763
  491. */
  492. static inline int simde_mm_movemask_epi8(uint8x16_t a)
  493. {
  494. static constexpr uint8_t md[16]={
  495. 1 << 0, 1 << 1, 1 << 2, 1 << 3,
  496. 1 << 4, 1 << 5, 1 << 6, 1 << 7,
  497. 1 << 0, 1 << 1, 1 << 2, 1 << 3,
  498. 1 << 4, 1 << 5, 1 << 6, 1 << 7,
  499. };
  500. uint8x16_t masked=vandq_u8(vld1q_u8(md),a);
  501. uint8x8x2_t tmp=vzip_u8(vget_low_u8(masked),vget_high_u8(masked));
  502. uint16x8_t x=vreinterpretq_u16_u8(vcombine_u8(tmp.val[0],tmp.val[1]));
  503. #if defined(__ARM_ARCH_ISA_A64)
  504. return vaddvq_u16(x);
  505. #else
  506. uint64x2_t t64=vpaddlq_u32(vpaddlq_u16(x));
  507. return int(vgetq_lane_u64(t64,0))+int(vgetq_lane_u64(t64,1));
  508. #endif
  509. }
  510. inline slot_type& at(std::size_t pos)
  511. {
  512. return m[pos];
  513. }
  514. inline const slot_type& at(std::size_t pos)const
  515. {
  516. return m[pos];
  517. }
  518. inline slot_type& overflow()
  519. {
  520. return at(N);
  521. }
  522. inline const slot_type& overflow()const
  523. {
  524. return at(N);
  525. }
  526. alignas(16) slot_type m[16];
  527. };
  528. #else /* non-SIMD */
  529. template<template<typename> class IntegralWrapper>
  530. struct group15
  531. {
  532. static constexpr std::size_t N=15;
  533. static constexpr bool regular_layout=false;
  534. struct dummy_group_type
  535. {
  536. alignas(16) boost::uint64_t m[2]=
  537. {0x0000000000004000ull,0x0000000000000000ull};
  538. };
  539. inline void initialize(){m[0]=0;m[1]=0;}
  540. inline void set(std::size_t pos,std::size_t hash)
  541. {
  542. BOOST_ASSERT(pos<N);
  543. set_impl(pos,reduced_hash(hash));
  544. }
  545. inline void set_sentinel()
  546. {
  547. set_impl(N-1,sentinel_);
  548. }
  549. inline bool is_sentinel(std::size_t pos)const
  550. {
  551. BOOST_ASSERT(pos<N);
  552. return
  553. pos==N-1&&
  554. (m[0] & boost::uint64_t(0x4000400040004000ull))==
  555. boost::uint64_t(0x4000ull)&&
  556. (m[1] & boost::uint64_t(0x4000400040004000ull))==0;
  557. }
  558. inline void reset(std::size_t pos)
  559. {
  560. BOOST_ASSERT(pos<N);
  561. set_impl(pos,available_);
  562. }
  563. static inline void reset(unsigned char* pc)
  564. {
  565. std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
  566. pc-=pos;
  567. reinterpret_cast<group15*>(pc)->reset(pos);
  568. }
  569. inline int match(std::size_t hash)const
  570. {
  571. return match_impl(reduced_hash(hash));
  572. }
  573. inline bool is_not_overflowed(std::size_t hash)const
  574. {
  575. return !(reinterpret_cast<const boost::uint16_t*>(m)[hash%8] & 0x8000u);
  576. }
  577. inline void mark_overflow(std::size_t hash)
  578. {
  579. reinterpret_cast<boost::uint16_t*>(m)[hash%8]|=0x8000u;
  580. }
  581. static inline bool maybe_caused_overflow(unsigned char* pc)
  582. {
  583. std::size_t pos=reinterpret_cast<uintptr_t>(pc)%sizeof(group15);
  584. group15 *pg=reinterpret_cast<group15*>(pc-pos);
  585. boost::uint64_t x=((pg->m[0])>>pos)&0x000100010001ull;
  586. boost::uint32_t y=narrow_cast<boost::uint32_t>(x|(x>>15)|(x>>30));
  587. return !pg->is_not_overflowed(y);
  588. };
  589. inline int match_available()const
  590. {
  591. boost::uint64_t x=~(m[0]|m[1]);
  592. boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32));
  593. y&=y>>16;
  594. return y&0x7FFF;
  595. }
  596. inline bool is_occupied(std::size_t pos)const
  597. {
  598. BOOST_ASSERT(pos<N);
  599. boost::uint64_t x=m[0]|m[1];
  600. return (x&(0x0001000100010001ull<<pos))!=0;
  601. }
  602. inline int match_occupied()const
  603. {
  604. boost::uint64_t x=m[0]|m[1];
  605. boost::uint32_t y=narrow_cast<boost::uint32_t>(x|(x>>32));
  606. y|=y>>16;
  607. return y&0x7FFF;
  608. }
  609. private:
  610. using word_type=IntegralWrapper<uint64_t>;
  611. BOOST_UNORDERED_STATIC_ASSERT(sizeof(word_type)==8);
  612. static constexpr unsigned char available_=0,
  613. sentinel_=1;
  614. inline static unsigned char reduced_hash(std::size_t hash)
  615. {
  616. static constexpr unsigned char table[]={
  617. 8,9,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
  618. 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
  619. 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
  620. 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
  621. 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
  622. 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
  623. 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
  624. 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
  625. 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
  626. 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
  627. 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
  628. 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
  629. 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
  630. 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
  631. 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
  632. 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
  633. };
  634. return table[narrow_cast<unsigned char>(hash)];
  635. }
  636. inline void set_impl(std::size_t pos,std::size_t n)
  637. {
  638. BOOST_ASSERT(n<256);
  639. set_impl(m[0],pos,n&0xFu);
  640. set_impl(m[1],pos,n>>4);
  641. }
  642. static inline void set_impl(word_type& x,std::size_t pos,std::size_t n)
  643. {
  644. static constexpr boost::uint64_t mask[]=
  645. {
  646. 0x0000000000000000ull,0x0000000000000001ull,0x0000000000010000ull,
  647. 0x0000000000010001ull,0x0000000100000000ull,0x0000000100000001ull,
  648. 0x0000000100010000ull,0x0000000100010001ull,0x0001000000000000ull,
  649. 0x0001000000000001ull,0x0001000000010000ull,0x0001000000010001ull,
  650. 0x0001000100000000ull,0x0001000100000001ull,0x0001000100010000ull,
  651. 0x0001000100010001ull,
  652. };
  653. static constexpr boost::uint64_t imask[]=
  654. {
  655. 0x0001000100010001ull,0x0001000100010000ull,0x0001000100000001ull,
  656. 0x0001000100000000ull,0x0001000000010001ull,0x0001000000010000ull,
  657. 0x0001000000000001ull,0x0001000000000000ull,0x0000000100010001ull,
  658. 0x0000000100010000ull,0x0000000100000001ull,0x0000000100000000ull,
  659. 0x0000000000010001ull,0x0000000000010000ull,0x0000000000000001ull,
  660. 0x0000000000000000ull,
  661. };
  662. BOOST_ASSERT(pos<16&&n<16);
  663. x|= mask[n]<<pos;
  664. x&=~(imask[n]<<pos);
  665. }
  666. inline int match_impl(std::size_t n)const
  667. {
  668. static constexpr boost::uint64_t mask[]=
  669. {
  670. 0x0000000000000000ull,0x000000000000ffffull,0x00000000ffff0000ull,
  671. 0x00000000ffffffffull,0x0000ffff00000000ull,0x0000ffff0000ffffull,
  672. 0x0000ffffffff0000ull,0x0000ffffffffffffull,0xffff000000000000ull,
  673. 0xffff00000000ffffull,0xffff0000ffff0000ull,0xffff0000ffffffffull,
  674. 0xffffffff00000000ull,0xffffffff0000ffffull,0xffffffffffff0000ull,
  675. 0xffffffffffffffffull,
  676. };
  677. BOOST_ASSERT(n<256);
  678. boost::uint64_t x=m[0]^mask[n&0xFu];
  679. x=~((m[1]^mask[n>>4])|x);
  680. boost::uint32_t y=static_cast<boost::uint32_t>(x&(x>>32));
  681. y&=y>>16;
  682. return y&0x7FFF;
  683. }
  684. alignas(16) word_type m[2];
  685. };
  686. #endif
  687. /* foa::table_core uses a size policy to obtain the permissible sizes of the
  688. * group array (and, by implication, the element array) and to do the
  689. * hash->group mapping.
  690. *
  691. * - size_index(n) returns an unspecified "index" number used in other policy
  692. * operations.
  693. * - size(size_index_) returns the number of groups for the given index. It
  694. * is guaranteed that size(size_index(n)) >= n.
  695. * - min_size() is the minimum number of groups permissible, i.e.
  696. * size(size_index(0)).
  697. * - position(hash,size_index_) maps hash to a position in the range
  698. * [0,size(size_index_)).
  699. *
  700. * The reason we're introducing the intermediate index value for calculating
  701. * sizes and positions is that it allows us to optimize the implementation of
  702. * position, which is in the hot path of lookup and insertion operations:
  703. * pow2_size_policy, the actual size policy used by foa::table, returns 2^n
  704. * (n>0) as permissible sizes and returns the n most significant bits
  705. * of the hash value as the position in the group array; using a size index
  706. * defined as i = (bits in std::size_t) - n, we have an unbeatable
  707. * implementation of position(hash) as hash>>i.
  708. * There's a twofold reason for choosing the high bits of hash for positioning:
  709. * - Multiplication-based mixing tends to yield better entropy in the high
  710. * part of its result.
  711. * - group15 reduced-hash values take the *low* bits of hash, and we want
  712. * these values and positioning to be as uncorrelated as possible.
  713. */
  714. struct pow2_size_policy
  715. {
  716. static inline std::size_t size_index(std::size_t n)
  717. {
  718. // TODO: min size is 2, see if we can bring it down to 1 without loss
  719. // of performance
  720. return sizeof(std::size_t)*CHAR_BIT-
  721. (n<=2?1:((std::size_t)(boost::core::bit_width(n-1))));
  722. }
  723. static inline std::size_t size(std::size_t size_index_)
  724. {
  725. return std::size_t(1)<<(sizeof(std::size_t)*CHAR_BIT-size_index_);
  726. }
  727. static constexpr std::size_t min_size(){return 2;}
  728. static inline std::size_t position(std::size_t hash,std::size_t size_index_)
  729. {
  730. return hash>>size_index_;
  731. }
  732. };
  733. /* size index of a group array for a given *element* capacity */
  734. template<typename Group,typename SizePolicy>
  735. static inline std::size_t size_index_for(std::size_t n)
  736. {
  737. /* n/N+1 == ceil((n+1)/N) (extra +1 for the sentinel) */
  738. return SizePolicy::size_index(n/Group::N+1);
  739. }
  740. /* Quadratic prober over a power-of-two range using triangular numbers.
  741. * mask in next(mask) must be the range size minus one (and since size is 2^n,
  742. * mask has exactly its n first bits set to 1).
  743. */
  744. struct pow2_quadratic_prober
  745. {
  746. pow2_quadratic_prober(std::size_t pos_):pos{pos_}{}
  747. inline std::size_t get()const{return pos;}
  748. inline std::size_t length()const{return step+1;}
  749. /* next returns false when the whole array has been traversed, which ends
  750. * probing (in practice, full-table probing will only happen with very small
  751. * arrays).
  752. */
  753. inline bool next(std::size_t mask)
  754. {
  755. step+=1;
  756. pos=(pos+step)&mask;
  757. return step<=mask;
  758. }
  759. private:
  760. std::size_t pos,step=0;
  761. };
  762. /* Mixing policies: no_mix is the identity function, and mulx_mix
  763. * uses the mulx function from <boost/unordered/detail/mulx.hpp>.
  764. *
  765. * foa::table_core mixes hash results with mulx_mix unless the hash is marked
  766. * as avalanching, i.e. of good quality
  767. * (see <boost/unordered/hash_traits.hpp>).
  768. */
  769. struct no_mix
  770. {
  771. template<typename Hash,typename T>
  772. static inline std::size_t mix(const Hash& h,const T& x)
  773. {
  774. return h(x);
  775. }
  776. };
  777. struct mulx_mix
  778. {
  779. template<typename Hash,typename T>
  780. static inline std::size_t mix(const Hash& h,const T& x)
  781. {
  782. return mulx(h(x));
  783. }
  784. };
  785. /* boost::core::countr_zero has a potentially costly check for
  786. * the case x==0.
  787. */
  788. inline unsigned int unchecked_countr_zero(int x)
  789. {
  790. #if defined(BOOST_MSVC)
  791. unsigned long r;
  792. _BitScanForward(&r,(unsigned long)x);
  793. return (unsigned int)r;
  794. #else
  795. BOOST_UNORDERED_ASSUME(x!=0);
  796. return (unsigned int)boost::core::countr_zero((unsigned int)x);
  797. #endif
  798. }
  799. /* table_arrays controls allocation, initialization and deallocation of
  800. * paired arrays of groups and element slots. Only one chunk of memory is
  801. * allocated to place both arrays: this is not done for efficiency reasons,
  802. * but in order to be able to properly align the group array without storing
  803. * additional offset information --the alignment required (16B) is usually
  804. * greater than alignof(std::max_align_t) and thus not guaranteed by
  805. * allocators.
  806. */
  807. template<typename Group,std::size_t Size>
  808. Group* dummy_groups()
  809. {
  810. /* Dummy storage initialized as if in an empty container (actually, each
  811. * of its groups is initialized like a separate empty container).
  812. * We make table_arrays::groups point to this when capacity()==0, so that
  813. * we are not allocating any dynamic memory and yet lookup can be implemented
  814. * without checking for groups==nullptr. This space won't ever be used for
  815. * insertion as the container's capacity is precisely zero.
  816. */
  817. static constexpr typename Group::dummy_group_type
  818. storage[Size]={typename Group::dummy_group_type(),};
  819. return reinterpret_cast<Group*>(
  820. const_cast<typename Group::dummy_group_type*>(storage));
  821. }
  822. template<
  823. typename Ptr,typename Ptr2,
  824. typename std::enable_if<!std::is_same<Ptr,Ptr2>::value>::type* = nullptr
  825. >
  826. Ptr to_pointer(Ptr2 p)
  827. {
  828. if(!p){return nullptr;}
  829. return boost::pointer_traits<Ptr>::pointer_to(*p);
  830. }
  831. template<typename Ptr>
  832. Ptr to_pointer(Ptr p)
  833. {
  834. return p;
  835. }
  836. template<typename Arrays,typename Allocator>
  837. struct arrays_holder
  838. {
  839. arrays_holder(const Arrays& arrays,const Allocator& al):
  840. arrays_{arrays},al_{al}
  841. {}
  842. /* not defined but VS in pre-C++17 mode needs to see it for RVO */
  843. arrays_holder(arrays_holder const&);
  844. arrays_holder& operator=(arrays_holder const&)=delete;
  845. ~arrays_holder()
  846. {
  847. if(!released_){
  848. arrays_.delete_(typename Arrays::allocator_type(al_),arrays_);
  849. }
  850. }
  851. const Arrays& release()
  852. {
  853. released_=true;
  854. return arrays_;
  855. }
  856. private:
  857. Arrays arrays_;
  858. Allocator al_;
  859. bool released_=false;
  860. };
  861. template<typename Value,typename Group,typename SizePolicy,typename Allocator>
  862. struct table_arrays
  863. {
  864. using allocator_type=typename boost::allocator_rebind<Allocator,Value>::type;
  865. using value_type=Value;
  866. using group_type=Group;
  867. static constexpr auto N=group_type::N;
  868. using size_policy=SizePolicy;
  869. using value_type_pointer=
  870. typename boost::allocator_pointer<allocator_type>::type;
  871. using group_type_pointer=
  872. typename boost::pointer_traits<value_type_pointer>::template
  873. rebind<group_type>;
  874. using group_type_pointer_traits=boost::pointer_traits<group_type_pointer>;
  875. table_arrays(
  876. std::size_t gsi,std::size_t gsm,
  877. group_type_pointer pg,value_type_pointer pe):
  878. groups_size_index{gsi},groups_size_mask{gsm},groups_{pg},elements_{pe}{}
  879. value_type* elements()const noexcept{return boost::to_address(elements_);}
  880. group_type* groups()const noexcept{return boost::to_address(groups_);}
  881. static void set_arrays(table_arrays& arrays,allocator_type al,std::size_t n)
  882. {
  883. return set_arrays(
  884. arrays,al,n,std::is_same<group_type*,group_type_pointer>{});
  885. }
  886. static void set_arrays(
  887. table_arrays& arrays,allocator_type al,std::size_t,
  888. std::false_type /* always allocate */)
  889. {
  890. using storage_traits=boost::allocator_traits<allocator_type>;
  891. auto groups_size_index=arrays.groups_size_index;
  892. auto groups_size=size_policy::size(groups_size_index);
  893. auto sal=allocator_type(al);
  894. arrays.elements_=storage_traits::allocate(sal,buffer_size(groups_size));
  895. /* Align arrays.groups to sizeof(group_type). table_iterator critically
  896. * depends on such alignment for its increment operation.
  897. */
  898. auto p=reinterpret_cast<unsigned char*>(arrays.elements()+groups_size*N-1);
  899. p+=(uintptr_t(sizeof(group_type))-
  900. reinterpret_cast<uintptr_t>(p))%sizeof(group_type);
  901. arrays.groups_=
  902. group_type_pointer_traits::pointer_to(*reinterpret_cast<group_type*>(p));
  903. initialize_groups(
  904. arrays.groups(),groups_size,
  905. is_trivially_default_constructible<group_type>{});
  906. arrays.groups()[groups_size-1].set_sentinel();
  907. }
  908. static void set_arrays(
  909. table_arrays& arrays,allocator_type al,std::size_t n,
  910. std::true_type /* optimize for n==0*/)
  911. {
  912. if(!n){
  913. arrays.groups_=dummy_groups<group_type,size_policy::min_size()>();
  914. }
  915. else{
  916. set_arrays(arrays,al,n,std::false_type{});
  917. }
  918. }
  919. static table_arrays new_(allocator_type al,std::size_t n)
  920. {
  921. auto groups_size_index=size_index_for<group_type,size_policy>(n);
  922. auto groups_size=size_policy::size(groups_size_index);
  923. table_arrays arrays{groups_size_index,groups_size-1,nullptr,nullptr};
  924. set_arrays(arrays,al,n);
  925. return arrays;
  926. }
  927. static void delete_(allocator_type al,table_arrays& arrays)noexcept
  928. {
  929. using storage_traits=boost::allocator_traits<allocator_type>;
  930. auto sal=allocator_type(al);
  931. if(arrays.elements()){
  932. storage_traits::deallocate(
  933. sal,arrays.elements_,buffer_size(arrays.groups_size_mask+1));
  934. }
  935. }
  936. /* combined space for elements and groups measured in sizeof(value_type)s */
  937. static std::size_t buffer_size(std::size_t groups_size)
  938. {
  939. auto buffer_bytes=
  940. /* space for elements (we subtract 1 because of the sentinel) */
  941. sizeof(value_type)*(groups_size*N-1)+
  942. /* space for groups + padding for group alignment */
  943. sizeof(group_type)*(groups_size+1)-1;
  944. /* ceil(buffer_bytes/sizeof(value_type)) */
  945. return (buffer_bytes+sizeof(value_type)-1)/sizeof(value_type);
  946. }
  947. static void initialize_groups(
  948. group_type* pg,std::size_t size,std::true_type /* memset */)
  949. {
  950. /* memset faster/not slower than manual, assumes all zeros is group_type's
  951. * default layout.
  952. * reinterpret_cast: GCC may complain about group_type not being trivially
  953. * copy-assignable when we're relying on trivial copy constructibility.
  954. */
  955. std::memset(
  956. reinterpret_cast<unsigned char*>(pg),0,sizeof(group_type)*size);
  957. }
  958. static void initialize_groups(
  959. group_type* pg,std::size_t size,std::false_type /* manual */)
  960. {
  961. while(size--!=0)::new (pg++) group_type();
  962. }
  963. std::size_t groups_size_index;
  964. std::size_t groups_size_mask;
  965. group_type_pointer groups_;
  966. value_type_pointer elements_;
  967. };
  968. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  969. /* stats support */
  970. struct table_core_cumulative_stats
  971. {
  972. concurrent_cumulative_stats<1> insertion;
  973. concurrent_cumulative_stats<2> successful_lookup,
  974. unsuccessful_lookup;
  975. };
  976. struct table_core_insertion_stats
  977. {
  978. std::size_t count;
  979. sequence_stats_summary probe_length;
  980. };
  981. struct table_core_lookup_stats
  982. {
  983. std::size_t count;
  984. sequence_stats_summary probe_length;
  985. sequence_stats_summary num_comparisons;
  986. };
  987. struct table_core_stats
  988. {
  989. table_core_insertion_stats insertion;
  990. table_core_lookup_stats successful_lookup,
  991. unsuccessful_lookup;
  992. };
  993. #define BOOST_UNORDERED_ADD_STATS(stats,args) stats.add args
  994. #define BOOST_UNORDERED_SWAP_STATS(stats1,stats2) std::swap(stats1,stats2)
  995. #define BOOST_UNORDERED_COPY_STATS(stats1,stats2) stats1=stats2
  996. #define BOOST_UNORDERED_RESET_STATS_OF(x) x.reset_stats()
  997. #define BOOST_UNORDERED_STATS_COUNTER(name) std::size_t name=0
  998. #define BOOST_UNORDERED_INCREMENT_STATS_COUNTER(name) ++name
  999. #else
  1000. #define BOOST_UNORDERED_ADD_STATS(stats,args) ((void)0)
  1001. #define BOOST_UNORDERED_SWAP_STATS(stats1,stats2) ((void)0)
  1002. #define BOOST_UNORDERED_COPY_STATS(stats1,stats2) ((void)0)
  1003. #define BOOST_UNORDERED_RESET_STATS_OF(x) ((void)0)
  1004. #define BOOST_UNORDERED_STATS_COUNTER(name) ((void)0)
  1005. #define BOOST_UNORDERED_INCREMENT_STATS_COUNTER(name) ((void)0)
  1006. #endif
  1007. struct if_constexpr_void_else{void operator()()const{}};
  1008. template<bool B,typename F,typename G=if_constexpr_void_else>
  1009. void if_constexpr(F f,G g={})
  1010. {
  1011. std::get<B?0:1>(std::forward_as_tuple(f,g))();
  1012. }
  1013. template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
  1014. void copy_assign_if(T& x,const T& y){x=y;}
  1015. template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
  1016. void copy_assign_if(T&,const T&){}
  1017. template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
  1018. void move_assign_if(T& x,T& y){x=std::move(y);}
  1019. template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
  1020. void move_assign_if(T&,T&){}
  1021. template<bool B,typename T,typename std::enable_if<B>::type* =nullptr>
  1022. void swap_if(T& x,T& y){using std::swap; swap(x,y);}
  1023. template<bool B,typename T,typename std::enable_if<!B>::type* =nullptr>
  1024. void swap_if(T&,T&){}
  1025. template<typename Allocator>
  1026. struct is_std_allocator:std::false_type{};
  1027. template<typename T>
  1028. struct is_std_allocator<std::allocator<T>>:std::true_type{};
  1029. /* std::allocator::construct marked as deprecated */
  1030. #if defined(_LIBCPP_SUPPRESS_DEPRECATED_PUSH)
  1031. _LIBCPP_SUPPRESS_DEPRECATED_PUSH
  1032. #elif defined(_STL_DISABLE_DEPRECATED_WARNING)
  1033. _STL_DISABLE_DEPRECATED_WARNING
  1034. #elif defined(_MSC_VER)
  1035. #pragma warning(push)
  1036. #pragma warning(disable:4996)
  1037. #endif
  1038. template<typename Allocator,typename Ptr,typename... Args>
  1039. struct alloc_has_construct
  1040. {
  1041. private:
  1042. template<typename Allocator2>
  1043. static decltype(
  1044. std::declval<Allocator2&>().construct(
  1045. std::declval<Ptr>(),std::declval<Args&&>()...),
  1046. std::true_type{}
  1047. ) check(int);
  1048. template<typename> static std::false_type check(...);
  1049. public:
  1050. static constexpr bool value=decltype(check<Allocator>(0))::value;
  1051. };
  1052. #if defined(_LIBCPP_SUPPRESS_DEPRECATED_POP)
  1053. _LIBCPP_SUPPRESS_DEPRECATED_POP
  1054. #elif defined(_STL_RESTORE_DEPRECATED_WARNING)
  1055. _STL_RESTORE_DEPRECATED_WARNING
  1056. #elif defined(_MSC_VER)
  1057. #pragma warning(pop)
  1058. #endif
  1059. /* We expose the hard-coded max load factor so that tests can use it without
  1060. * needing to pull it from an instantiated class template such as the table
  1061. * class.
  1062. */
  1063. static constexpr float mlf=0.875f;
  1064. template<typename Group,typename Element>
  1065. struct table_locator
  1066. {
  1067. table_locator()=default;
  1068. table_locator(Group* pg_,unsigned int n_,Element* p_):pg{pg_},n{n_},p{p_}{}
  1069. explicit operator bool()const noexcept{return p!=nullptr;}
  1070. Group *pg=nullptr;
  1071. unsigned int n=0;
  1072. Element *p=nullptr;
  1073. };
  1074. struct try_emplace_args_t{};
  1075. template<typename TypePolicy,typename Allocator,typename... Args>
  1076. class alloc_cted_insert_type
  1077. {
  1078. using emplace_type=typename std::conditional<
  1079. std::is_constructible<typename TypePolicy::init_type,Args...>::value,
  1080. typename TypePolicy::init_type,
  1081. typename TypePolicy::value_type
  1082. >::type;
  1083. using insert_type=typename std::conditional<
  1084. std::is_constructible<typename TypePolicy::value_type,emplace_type>::value,
  1085. emplace_type,typename TypePolicy::element_type
  1086. >::type;
  1087. using alloc_cted = allocator_constructed<Allocator, insert_type, TypePolicy>;
  1088. alloc_cted val;
  1089. public:
  1090. alloc_cted_insert_type(const Allocator& al_,Args&&... args):val{al_,std::forward<Args>(args)...}
  1091. {
  1092. }
  1093. insert_type& value(){return val.value();}
  1094. };
  1095. template<typename TypePolicy,typename Allocator,typename... Args>
  1096. alloc_cted_insert_type<TypePolicy,Allocator,Args...>
  1097. alloc_make_insert_type(const Allocator& al,Args&&... args)
  1098. {
  1099. return {al,std::forward<Args>(args)...};
  1100. }
  1101. template <typename TypePolicy, typename Allocator, typename KFwdRef,
  1102. typename = void>
  1103. class alloc_cted_or_fwded_key_type
  1104. {
  1105. using key_type = typename TypePolicy::key_type;
  1106. allocator_constructed<Allocator, key_type, TypePolicy> val;
  1107. public:
  1108. alloc_cted_or_fwded_key_type(const Allocator& al_, KFwdRef k)
  1109. : val(al_, std::forward<KFwdRef>(k))
  1110. {
  1111. }
  1112. key_type&& move_or_fwd() { return std::move(val.value()); }
  1113. };
  1114. template <typename TypePolicy, typename Allocator, typename KFwdRef>
  1115. class alloc_cted_or_fwded_key_type<TypePolicy, Allocator, KFwdRef,
  1116. typename std::enable_if<
  1117. is_similar<KFwdRef, typename TypePolicy::key_type>::value>::type>
  1118. {
  1119. // This specialization acts as a forwarding-reference wrapper
  1120. BOOST_UNORDERED_STATIC_ASSERT(std::is_reference<KFwdRef>::value);
  1121. KFwdRef ref;
  1122. public:
  1123. alloc_cted_or_fwded_key_type(const Allocator&, KFwdRef k)
  1124. : ref(std::forward<KFwdRef>(k))
  1125. {
  1126. }
  1127. KFwdRef move_or_fwd() { return std::forward<KFwdRef>(ref); }
  1128. };
  1129. template <typename Container>
  1130. using is_map =
  1131. std::integral_constant<bool, !std::is_same<typename Container::key_type,
  1132. typename Container::value_type>::value>;
  1133. template <typename Container, typename K>
  1134. using is_emplace_kv_able = std::integral_constant<bool,
  1135. is_map<Container>::value &&
  1136. (is_similar<K, typename Container::key_type>::value ||
  1137. is_complete_and_move_constructible<typename Container::key_type>::value)>;
  1138. /* table_core. The TypePolicy template parameter is used to generate
  1139. * instantiations suitable for either maps or sets, and introduces non-standard
  1140. * init_type and element_type:
  1141. *
  1142. * - TypePolicy::key_type and TypePolicy::value_type have the obvious
  1143. * meaning. TypePolicy::mapped_type is expected to be provided as well
  1144. * when key_type and value_type are not the same.
  1145. *
  1146. * - TypePolicy::init_type is the type implicitly converted to when
  1147. * writing x.insert({...}). For maps, this is std::pair<Key,T> rather
  1148. * than std::pair<const Key,T> so that, for instance, x.insert({"hello",0})
  1149. * produces a cheaply moveable std::string&& ("hello") rather than
  1150. * a copyable const std::string&&. foa::table::insert is extended to accept
  1151. * both init_type and value_type references.
  1152. *
  1153. * - TypePolicy::construct and TypePolicy::destroy are used for the
  1154. * construction and destruction of the internal types: value_type,
  1155. * init_type, element_type, and key_type.
  1156. *
  1157. * - TypePolicy::move is used to provide move semantics for the internal
  1158. * types used by the container during rehashing and emplace. These types
  1159. * are init_type, value_type and emplace_type. During insertion, a
  1160. * stack-local type will be created based on the constructibility of the
  1161. * value_type and the supplied arguments. TypePolicy::move is used here
  1162. * for transfer of ownership. Similarly, TypePolicy::move is also used
  1163. * during rehashing when elements are moved to the new table.
  1164. *
  1165. * - TypePolicy::extract returns a const reference to the key part of
  1166. * a value of type value_type, init_type, element_type or
  1167. * decltype(TypePolicy::move(...)).
  1168. *
  1169. * - TypePolicy::element_type is the type that table_arrays uses when
  1170. * allocating buckets, which allows us to have flat and node container.
  1171. * For flat containers, element_type is value_type. For node
  1172. * containers, it is a strong typedef to value_type*.
  1173. *
  1174. * - TypePolicy::value_from returns a mutable reference to value_type from
  1175. * a given element_type. This is used when elements of the table themselves
  1176. * need to be moved, such as during move construction/assignment when
  1177. * allocators are unequal and there is no propagation. For all other cases,
  1178. * the element_type itself is moved.
  1179. */
  1180. #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
  1181. #if defined(BOOST_MSVC)
  1182. #pragma warning(push)
  1183. #pragma warning(disable:4714) /* marked as __forceinline not inlined */
  1184. #endif
  1185. #if BOOST_WORKAROUND(BOOST_MSVC,<=1900)
  1186. /* VS2015 marks as unreachable generic catch clauses around non-throwing
  1187. * code.
  1188. */
  1189. #pragma warning(push)
  1190. #pragma warning(disable:4702)
  1191. #endif
  1192. template<
  1193. typename TypePolicy,typename Group,template<typename...> class Arrays,
  1194. typename SizeControl,typename Hash,typename Pred,typename Allocator
  1195. >
  1196. class
  1197. #if defined(_MSC_VER)&&_MSC_FULL_VER>=190023918
  1198. __declspec(empty_bases) /* activate EBO with multiple inheritance */
  1199. #endif
  1200. table_core:empty_value<Hash,0>,empty_value<Pred,1>,empty_value<Allocator,2>
  1201. {
  1202. public:
  1203. using type_policy=TypePolicy;
  1204. using group_type=Group;
  1205. static constexpr auto N=group_type::N;
  1206. using size_policy=pow2_size_policy;
  1207. using prober=pow2_quadratic_prober;
  1208. using mix_policy=typename std::conditional<
  1209. hash_is_avalanching<Hash>::value,
  1210. no_mix,
  1211. mulx_mix
  1212. >::type;
  1213. using alloc_traits=boost::allocator_traits<Allocator>;
  1214. using element_type=typename type_policy::element_type;
  1215. using arrays_type=Arrays<element_type,group_type,size_policy,Allocator>;
  1216. using size_ctrl_type=SizeControl;
  1217. static constexpr auto uses_fancy_pointers=!std::is_same<
  1218. typename alloc_traits::pointer,
  1219. typename alloc_traits::value_type*
  1220. >::value;
  1221. using key_type=typename type_policy::key_type;
  1222. using init_type=typename type_policy::init_type;
  1223. using value_type=typename type_policy::value_type;
  1224. using hasher=Hash;
  1225. using key_equal=Pred;
  1226. using allocator_type=Allocator;
  1227. using pointer=value_type*;
  1228. using const_pointer=const value_type*;
  1229. using reference=value_type&;
  1230. using const_reference=const value_type&;
  1231. using size_type=std::size_t;
  1232. using difference_type=std::ptrdiff_t;
  1233. using locator=table_locator<group_type,element_type>;
  1234. using arrays_holder_type=arrays_holder<arrays_type,Allocator>;
  1235. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  1236. using cumulative_stats=table_core_cumulative_stats;
  1237. using stats=table_core_stats;
  1238. #endif
  1239. table_core(
  1240. std::size_t n=default_bucket_count,const Hash& h_=Hash(),
  1241. const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
  1242. hash_base{empty_init,h_},pred_base{empty_init,pred_},
  1243. allocator_base{empty_init,al_},arrays(new_arrays(n)),
  1244. size_ctrl{initial_max_load(),0}
  1245. {}
  1246. /* genericize on an ArraysFn so that we can do things like delay an
  1247. * allocation for the group_access data required by cfoa after the move
  1248. * constructors of Hash, Pred have been invoked
  1249. */
  1250. template<typename ArraysFn>
  1251. table_core(
  1252. Hash&& h_,Pred&& pred_,Allocator&& al_,
  1253. ArraysFn arrays_fn,const size_ctrl_type& size_ctrl_):
  1254. hash_base{empty_init,std::move(h_)},
  1255. pred_base{empty_init,std::move(pred_)},
  1256. allocator_base{empty_init,std::move(al_)},
  1257. arrays(arrays_fn()),size_ctrl(size_ctrl_)
  1258. {}
  1259. table_core(const table_core& x):
  1260. table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{}
  1261. template<typename ArraysFn>
  1262. table_core(table_core&& x,arrays_holder_type&& ah,ArraysFn arrays_fn):
  1263. table_core(
  1264. std::move(x.h()),std::move(x.pred()),std::move(x.al()),
  1265. arrays_fn,x.size_ctrl)
  1266. {
  1267. x.arrays=ah.release();
  1268. x.size_ctrl.ml=x.initial_max_load();
  1269. x.size_ctrl.size=0;
  1270. BOOST_UNORDERED_SWAP_STATS(cstats,x.cstats);
  1271. }
  1272. table_core(table_core&& x)
  1273. noexcept(
  1274. std::is_nothrow_move_constructible<Hash>::value&&
  1275. std::is_nothrow_move_constructible<Pred>::value&&
  1276. std::is_nothrow_move_constructible<Allocator>::value&&
  1277. !uses_fancy_pointers):
  1278. table_core{
  1279. std::move(x),x.make_empty_arrays(),[&x]{return x.arrays;}}
  1280. {}
  1281. table_core(const table_core& x,const Allocator& al_):
  1282. table_core{std::size_t(std::ceil(float(x.size())/mlf)),x.h(),x.pred(),al_}
  1283. {
  1284. copy_elements_from(x);
  1285. }
  1286. table_core(table_core&& x,const Allocator& al_):
  1287. table_core{std::move(x.h()),std::move(x.pred()),al_}
  1288. {
  1289. if(al()==x.al()){
  1290. using std::swap;
  1291. swap(arrays,x.arrays);
  1292. swap(size_ctrl,x.size_ctrl);
  1293. BOOST_UNORDERED_SWAP_STATS(cstats,x.cstats);
  1294. }
  1295. else{
  1296. reserve(x.size());
  1297. clear_on_exit c{x};
  1298. (void)c; /* unused var warning */
  1299. BOOST_UNORDERED_RESET_STATS_OF(x);
  1300. /* This works because subsequent x.clear() does not depend on the
  1301. * elements' values.
  1302. */
  1303. x.for_all_elements([this](element_type* p){
  1304. unchecked_insert(type_policy::move(type_policy::value_from(*p)));
  1305. });
  1306. }
  1307. }
  1308. ~table_core()noexcept
  1309. {
  1310. for_all_elements([this](element_type* p){
  1311. destroy_element(p);
  1312. });
  1313. delete_arrays(arrays);
  1314. }
  1315. std::size_t initial_max_load()const
  1316. {
  1317. static constexpr std::size_t small_capacity=2*N-1;
  1318. auto capacity_=capacity();
  1319. if(capacity_<=small_capacity){
  1320. return capacity_; /* we allow 100% usage */
  1321. }
  1322. else{
  1323. return (std::size_t)(mlf*(float)(capacity_));
  1324. }
  1325. }
  1326. arrays_holder_type make_empty_arrays()const
  1327. {
  1328. return make_arrays(0);
  1329. }
  1330. table_core& operator=(const table_core& x)
  1331. {
  1332. BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
  1333. static constexpr auto pocca=
  1334. alloc_traits::propagate_on_container_copy_assignment::value;
  1335. if(this!=std::addressof(x)){
  1336. /* If copy construction here winds up throwing, the container is still
  1337. * left intact so we perform these operations first.
  1338. */
  1339. hasher tmp_h=x.h();
  1340. key_equal tmp_p=x.pred();
  1341. clear();
  1342. /* Because we've asserted at compile-time that Hash and Pred are nothrow
  1343. * swappable, we can safely mutate our source container and maintain
  1344. * consistency between the Hash, Pred compatibility.
  1345. */
  1346. using std::swap;
  1347. swap(h(),tmp_h);
  1348. swap(pred(),tmp_p);
  1349. if_constexpr<pocca>([&,this]{
  1350. if(al()!=x.al()){
  1351. auto ah=x.make_arrays(std::size_t(std::ceil(float(x.size())/mlf)));
  1352. delete_arrays(arrays);
  1353. arrays=ah.release();
  1354. size_ctrl.ml=initial_max_load();
  1355. }
  1356. copy_assign_if<pocca>(al(),x.al());
  1357. });
  1358. /* noshrink: favor memory reuse over tightness */
  1359. noshrink_reserve(x.size());
  1360. copy_elements_from(x);
  1361. }
  1362. return *this;
  1363. }
  1364. #if defined(BOOST_MSVC)
  1365. #pragma warning(push)
  1366. #pragma warning(disable:4127) /* conditional expression is constant */
  1367. #endif
  1368. table_core& operator=(table_core&& x)
  1369. noexcept(
  1370. (alloc_traits::propagate_on_container_move_assignment::value||
  1371. alloc_traits::is_always_equal::value)&&!uses_fancy_pointers)
  1372. {
  1373. BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
  1374. static constexpr auto pocma=
  1375. alloc_traits::propagate_on_container_move_assignment::value;
  1376. if(this!=std::addressof(x)){
  1377. /* Given ambiguity in implementation strategies briefly discussed here:
  1378. * https://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#2227
  1379. *
  1380. * we opt into requiring nothrow swappability and eschew the move
  1381. * operations associated with Hash, Pred.
  1382. *
  1383. * To this end, we ensure that the user never has to consider the
  1384. * moved-from state of their Hash, Pred objects
  1385. */
  1386. using std::swap;
  1387. clear();
  1388. if(pocma||al()==x.al()){
  1389. auto ah=x.make_empty_arrays();
  1390. swap(h(),x.h());
  1391. swap(pred(),x.pred());
  1392. delete_arrays(arrays);
  1393. move_assign_if<pocma>(al(),x.al());
  1394. arrays=x.arrays;
  1395. size_ctrl.ml=std::size_t(x.size_ctrl.ml);
  1396. size_ctrl.size=std::size_t(x.size_ctrl.size);
  1397. BOOST_UNORDERED_COPY_STATS(cstats,x.cstats);
  1398. x.arrays=ah.release();
  1399. x.size_ctrl.ml=x.initial_max_load();
  1400. x.size_ctrl.size=0;
  1401. BOOST_UNORDERED_RESET_STATS_OF(x);
  1402. }
  1403. else{
  1404. swap(h(),x.h());
  1405. swap(pred(),x.pred());
  1406. /* noshrink: favor memory reuse over tightness */
  1407. noshrink_reserve(x.size());
  1408. clear_on_exit c{x};
  1409. (void)c; /* unused var warning */
  1410. BOOST_UNORDERED_RESET_STATS_OF(x);
  1411. /* This works because subsequent x.clear() does not depend on the
  1412. * elements' values.
  1413. */
  1414. x.for_all_elements([this](element_type* p){
  1415. unchecked_insert(type_policy::move(type_policy::value_from(*p)));
  1416. });
  1417. }
  1418. }
  1419. return *this;
  1420. }
  1421. #if defined(BOOST_MSVC)
  1422. #pragma warning(pop) /* C4127 */
  1423. #endif
  1424. allocator_type get_allocator()const noexcept{return al();}
  1425. bool empty()const noexcept{return size()==0;}
  1426. std::size_t size()const noexcept{return size_ctrl.size;}
  1427. std::size_t max_size()const noexcept{return SIZE_MAX;}
  1428. BOOST_FORCEINLINE
  1429. void erase(group_type* pg,unsigned int pos,element_type* p)noexcept
  1430. {
  1431. destroy_element(p);
  1432. recover_slot(pg,pos);
  1433. }
  1434. BOOST_FORCEINLINE
  1435. void erase(unsigned char* pc,element_type* p)noexcept
  1436. {
  1437. destroy_element(p);
  1438. recover_slot(pc);
  1439. }
  1440. template<typename Key>
  1441. BOOST_FORCEINLINE locator find(const Key& x)const
  1442. {
  1443. auto hash=hash_for(x);
  1444. return find(x,position_for(hash),hash);
  1445. }
  1446. #if defined(BOOST_MSVC)
  1447. /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */
  1448. #pragma warning(push)
  1449. #pragma warning(disable:4800)
  1450. #endif
  1451. template<typename Key>
  1452. BOOST_FORCEINLINE locator find(
  1453. const Key& x,std::size_t pos0,std::size_t hash)const
  1454. {
  1455. BOOST_UNORDERED_STATS_COUNTER(num_cmps);
  1456. prober pb(pos0);
  1457. do{
  1458. auto pos=pb.get();
  1459. auto pg=arrays.groups()+pos;
  1460. auto mask=pg->match(hash);
  1461. if(mask){
  1462. auto elements=arrays.elements();
  1463. BOOST_UNORDERED_ASSUME(elements!=nullptr);
  1464. auto p=elements+pos*N;
  1465. BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
  1466. do{
  1467. BOOST_UNORDERED_INCREMENT_STATS_COUNTER(num_cmps);
  1468. auto n=unchecked_countr_zero(mask);
  1469. if(BOOST_LIKELY(bool(pred()(x,key_from(p[n]))))){
  1470. BOOST_UNORDERED_ADD_STATS(
  1471. cstats.successful_lookup,(pb.length(),num_cmps));
  1472. return {pg,n,p+n};
  1473. }
  1474. mask&=mask-1;
  1475. }while(mask);
  1476. }
  1477. if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
  1478. BOOST_UNORDERED_ADD_STATS(
  1479. cstats.unsuccessful_lookup,(pb.length(),num_cmps));
  1480. return {};
  1481. }
  1482. }
  1483. while(BOOST_LIKELY(pb.next(arrays.groups_size_mask)));
  1484. BOOST_UNORDERED_ADD_STATS(
  1485. cstats.unsuccessful_lookup,(pb.length(),num_cmps));
  1486. return {};
  1487. }
  1488. #if defined(BOOST_MSVC)
  1489. #pragma warning(pop) /* C4800 */
  1490. #endif
  1491. void swap(table_core& x)
  1492. noexcept(
  1493. alloc_traits::propagate_on_container_swap::value||
  1494. alloc_traits::is_always_equal::value)
  1495. {
  1496. BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
  1497. static constexpr auto pocs=
  1498. alloc_traits::propagate_on_container_swap::value;
  1499. using std::swap;
  1500. if_constexpr<pocs>([&,this]{
  1501. swap_if<pocs>(al(),x.al());
  1502. },
  1503. [&,this]{ /* else */
  1504. BOOST_ASSERT(al()==x.al());
  1505. (void)this; /* makes sure captured this is used */
  1506. });
  1507. swap(h(),x.h());
  1508. swap(pred(),x.pred());
  1509. swap(arrays,x.arrays);
  1510. swap(size_ctrl,x.size_ctrl);
  1511. }
  1512. void clear()noexcept
  1513. {
  1514. auto p=arrays.elements();
  1515. if(p){
  1516. for(auto pg=arrays.groups(),last=pg+arrays.groups_size_mask+1;
  1517. pg!=last;++pg,p+=N){
  1518. auto mask=match_really_occupied(pg,last);
  1519. while(mask){
  1520. destroy_element(p+unchecked_countr_zero(mask));
  1521. mask&=mask-1;
  1522. }
  1523. /* we wipe the entire metadata to reset the overflow byte as well */
  1524. pg->initialize();
  1525. }
  1526. arrays.groups()[arrays.groups_size_mask].set_sentinel();
  1527. size_ctrl.ml=initial_max_load();
  1528. size_ctrl.size=0;
  1529. }
  1530. }
  1531. hasher hash_function()const{return h();}
  1532. key_equal key_eq()const{return pred();}
  1533. std::size_t capacity()const noexcept
  1534. {
  1535. return arrays.elements()?(arrays.groups_size_mask+1)*N-1:0;
  1536. }
  1537. float load_factor()const noexcept
  1538. {
  1539. if(capacity()==0)return 0;
  1540. else return float(size())/float(capacity());
  1541. }
  1542. float max_load_factor()const noexcept{return mlf;}
  1543. std::size_t max_load()const noexcept{return size_ctrl.ml;}
  1544. void rehash(std::size_t n)
  1545. {
  1546. auto m=size_t(std::ceil(float(size())/mlf));
  1547. if(m>n)n=m;
  1548. if(n)n=capacity_for(n); /* exact resulting capacity */
  1549. if(n!=capacity())unchecked_rehash(n);
  1550. }
  1551. void reserve(std::size_t n)
  1552. {
  1553. rehash(std::size_t(std::ceil(float(n)/mlf)));
  1554. }
  1555. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  1556. stats get_stats()const
  1557. {
  1558. auto insertion=cstats.insertion.get_summary();
  1559. auto successful_lookup=cstats.successful_lookup.get_summary();
  1560. auto unsuccessful_lookup=cstats.unsuccessful_lookup.get_summary();
  1561. return{
  1562. {
  1563. insertion.count,
  1564. insertion.sequence_summary[0]
  1565. },
  1566. {
  1567. successful_lookup.count,
  1568. successful_lookup.sequence_summary[0],
  1569. successful_lookup.sequence_summary[1]
  1570. },
  1571. {
  1572. unsuccessful_lookup.count,
  1573. unsuccessful_lookup.sequence_summary[0],
  1574. unsuccessful_lookup.sequence_summary[1]
  1575. },
  1576. };
  1577. }
  1578. void reset_stats()noexcept
  1579. {
  1580. cstats.insertion.reset();
  1581. cstats.successful_lookup.reset();
  1582. cstats.unsuccessful_lookup.reset();
  1583. }
  1584. #endif
  1585. friend bool operator==(const table_core& x,const table_core& y)
  1586. {
  1587. return
  1588. x.size()==y.size()&&
  1589. x.for_all_elements_while([&](element_type* p){
  1590. auto loc=y.find(key_from(*p));
  1591. return loc&&
  1592. const_cast<const value_type&>(type_policy::value_from(*p))==
  1593. const_cast<const value_type&>(type_policy::value_from(*loc.p));
  1594. });
  1595. }
  1596. friend bool operator!=(const table_core& x,const table_core& y)
  1597. {
  1598. return !(x==y);
  1599. }
  1600. struct clear_on_exit
  1601. {
  1602. ~clear_on_exit(){x.clear();}
  1603. table_core& x;
  1604. };
  1605. Hash& h(){return hash_base::get();}
  1606. const Hash& h()const{return hash_base::get();}
  1607. Pred& pred(){return pred_base::get();}
  1608. const Pred& pred()const{return pred_base::get();}
  1609. Allocator& al(){return allocator_base::get();}
  1610. const Allocator& al()const{return allocator_base::get();}
  1611. template<typename... Args>
  1612. void construct_element(element_type* p,Args&&... args)
  1613. {
  1614. type_policy::construct(al(),p,std::forward<Args>(args)...);
  1615. }
  1616. template<typename... Args>
  1617. void construct_element(element_type* p,try_emplace_args_t,Args&&... args)
  1618. {
  1619. construct_element_from_try_emplace_args(
  1620. p,
  1621. std::integral_constant<bool,std::is_same<key_type,value_type>::value>{},
  1622. std::forward<Args>(args)...);
  1623. }
  1624. void destroy_element(element_type* p)noexcept
  1625. {
  1626. type_policy::destroy(al(),p);
  1627. }
  1628. struct destroy_element_on_exit
  1629. {
  1630. ~destroy_element_on_exit(){this_->destroy_element(p);}
  1631. table_core *this_;
  1632. element_type *p;
  1633. };
  1634. template<typename T>
  1635. static inline auto key_from(const T& x)
  1636. ->decltype(type_policy::extract(x))
  1637. {
  1638. return type_policy::extract(x);
  1639. }
  1640. template<typename Key,typename... Args>
  1641. static inline const Key& key_from(
  1642. try_emplace_args_t,const Key& x,const Args&...)
  1643. {
  1644. return x;
  1645. }
  1646. template<typename Key>
  1647. inline std::size_t hash_for(const Key& x)const
  1648. {
  1649. return mix_policy::mix(h(),x);
  1650. }
  1651. inline std::size_t position_for(std::size_t hash)const
  1652. {
  1653. return position_for(hash,arrays);
  1654. }
  1655. static inline std::size_t position_for(
  1656. std::size_t hash,const arrays_type& arrays_)
  1657. {
  1658. return size_policy::position(hash,arrays_.groups_size_index);
  1659. }
  1660. static inline int match_really_occupied(group_type* pg,group_type* last)
  1661. {
  1662. /* excluding the sentinel */
  1663. return pg->match_occupied()&~(int(pg==last-1)<<(N-1));
  1664. }
  1665. template<typename... Args>
  1666. locator unchecked_emplace_at(
  1667. std::size_t pos0,std::size_t hash,Args&&... args)
  1668. {
  1669. auto res=nosize_unchecked_emplace_at(
  1670. arrays,pos0,hash,std::forward<Args>(args)...);
  1671. ++size_ctrl.size;
  1672. return res;
  1673. }
  1674. BOOST_NOINLINE void unchecked_rehash_for_growth()
  1675. {
  1676. auto new_arrays_=new_arrays_for_growth();
  1677. unchecked_rehash(new_arrays_);
  1678. }
  1679. template<typename... Args>
  1680. BOOST_NOINLINE locator
  1681. unchecked_emplace_with_rehash(std::size_t hash,Args&&... args)
  1682. {
  1683. auto new_arrays_=new_arrays_for_growth();
  1684. locator it;
  1685. BOOST_TRY{
  1686. /* strong exception guarantee -> try insertion before rehash */
  1687. it=nosize_unchecked_emplace_at(
  1688. new_arrays_,position_for(hash,new_arrays_),
  1689. hash,std::forward<Args>(args)...);
  1690. }
  1691. BOOST_CATCH(...){
  1692. delete_arrays(new_arrays_);
  1693. BOOST_RETHROW
  1694. }
  1695. BOOST_CATCH_END
  1696. /* new_arrays_ lifetime taken care of by unchecked_rehash */
  1697. unchecked_rehash(new_arrays_);
  1698. ++size_ctrl.size;
  1699. return it;
  1700. }
  1701. void noshrink_reserve(std::size_t n)
  1702. {
  1703. /* used only on assignment after element clearance */
  1704. BOOST_ASSERT(empty());
  1705. if(n){
  1706. n=std::size_t(std::ceil(float(n)/mlf)); /* elements -> slots */
  1707. n=capacity_for(n); /* exact resulting capacity */
  1708. if(n>capacity()){
  1709. auto new_arrays_=new_arrays(n);
  1710. delete_arrays(arrays);
  1711. arrays=new_arrays_;
  1712. size_ctrl.ml=initial_max_load();
  1713. }
  1714. }
  1715. }
  1716. template<typename F>
  1717. void for_all_elements(F f)const
  1718. {
  1719. for_all_elements(arrays,f);
  1720. }
  1721. template<typename F>
  1722. static auto for_all_elements(const arrays_type& arrays_,F f)
  1723. ->decltype(f(nullptr),void())
  1724. {
  1725. for_all_elements_while(arrays_,[&](element_type* p){f(p);return true;});
  1726. }
  1727. template<typename F>
  1728. static auto for_all_elements(const arrays_type& arrays_,F f)
  1729. ->decltype(f(nullptr,0,nullptr),void())
  1730. {
  1731. for_all_elements_while(
  1732. arrays_,[&](group_type* pg,unsigned int n,element_type* p)
  1733. {f(pg,n,p);return true;});
  1734. }
  1735. template<typename F>
  1736. bool for_all_elements_while(F f)const
  1737. {
  1738. return for_all_elements_while(arrays,f);
  1739. }
  1740. template<typename F>
  1741. static auto for_all_elements_while(const arrays_type& arrays_,F f)
  1742. ->decltype(f(nullptr),bool())
  1743. {
  1744. return for_all_elements_while(
  1745. arrays_,[&](group_type*,unsigned int,element_type* p){return f(p);});
  1746. }
  1747. template<typename F>
  1748. static auto for_all_elements_while(const arrays_type& arrays_,F f)
  1749. ->decltype(f(nullptr,0,nullptr),bool())
  1750. {
  1751. auto p=arrays_.elements();
  1752. if(p){
  1753. for(auto pg=arrays_.groups(),last=pg+arrays_.groups_size_mask+1;
  1754. pg!=last;++pg,p+=N){
  1755. auto mask=match_really_occupied(pg,last);
  1756. while(mask){
  1757. auto n=unchecked_countr_zero(mask);
  1758. if(!f(pg,n,p+n))return false;
  1759. mask&=mask-1;
  1760. }
  1761. }
  1762. }
  1763. return true;
  1764. }
  1765. arrays_type arrays;
  1766. size_ctrl_type size_ctrl;
  1767. #if defined(BOOST_UNORDERED_ENABLE_STATS)
  1768. mutable cumulative_stats cstats;
  1769. #endif
  1770. private:
  1771. template<
  1772. typename,typename,template<typename...> class,
  1773. typename,typename,typename,typename
  1774. >
  1775. friend class table_core;
  1776. using hash_base=empty_value<Hash,0>;
  1777. using pred_base=empty_value<Pred,1>;
  1778. using allocator_base=empty_value<Allocator,2>;
  1779. /* used by allocator-extended move ctor */
  1780. table_core(Hash&& h_,Pred&& pred_,const Allocator& al_):
  1781. hash_base{empty_init,std::move(h_)},
  1782. pred_base{empty_init,std::move(pred_)},
  1783. allocator_base{empty_init,al_},arrays(new_arrays(0)),
  1784. size_ctrl{initial_max_load(),0}
  1785. {
  1786. }
  1787. arrays_type new_arrays(std::size_t n)const
  1788. {
  1789. return arrays_type::new_(typename arrays_type::allocator_type(al()),n);
  1790. }
  1791. arrays_type new_arrays_for_growth()const
  1792. {
  1793. /* Due to the anti-drift mechanism (see recover_slot), the new arrays may
  1794. * be of the same size as the old arrays; in the limit, erasing one
  1795. * element at full load and then inserting could bring us back to the same
  1796. * capacity after a costly rehash. To avoid this, we jump to the next
  1797. * capacity level when the number of erased elements is <= 10% of total
  1798. * elements at full load, which is implemented by requesting additional
  1799. * F*size elements, with F = P * 10% / (1 - P * 10%), where P is the
  1800. * probability of an element having caused overflow; P has been measured as
  1801. * ~0.162 under ideal conditions, yielding F ~ 0.0165 ~ 1/61.
  1802. */
  1803. return new_arrays(std::size_t(
  1804. std::ceil(static_cast<float>(size()+size()/61+1)/mlf)));
  1805. }
  1806. void delete_arrays(arrays_type& arrays_)noexcept
  1807. {
  1808. arrays_type::delete_(typename arrays_type::allocator_type(al()),arrays_);
  1809. }
  1810. arrays_holder_type make_arrays(std::size_t n)const
  1811. {
  1812. return {new_arrays(n),al()};
  1813. }
  1814. template<typename Key,typename... Args>
  1815. void construct_element_from_try_emplace_args(
  1816. element_type* p,std::false_type,Key&& x,Args&&... args)
  1817. {
  1818. type_policy::construct(
  1819. this->al(),p,
  1820. std::piecewise_construct,
  1821. std::forward_as_tuple(std::forward<Key>(x)),
  1822. std::forward_as_tuple(std::forward<Args>(args)...));
  1823. }
  1824. /* This overload allows boost::unordered_flat_set to internally use
  1825. * try_emplace to implement heterogeneous insert (P2363).
  1826. */
  1827. template<typename Key>
  1828. void construct_element_from_try_emplace_args(
  1829. element_type* p,std::true_type,Key&& x)
  1830. {
  1831. type_policy::construct(this->al(),p,std::forward<Key>(x));
  1832. }
  1833. void copy_elements_from(const table_core& x)
  1834. {
  1835. BOOST_ASSERT(empty());
  1836. BOOST_ASSERT(this!=std::addressof(x));
  1837. if(arrays.groups_size_mask==x.arrays.groups_size_mask){
  1838. fast_copy_elements_from(x);
  1839. }
  1840. else{
  1841. x.for_all_elements([this](const element_type* p){
  1842. unchecked_insert(*p);
  1843. });
  1844. }
  1845. }
  1846. void fast_copy_elements_from(const table_core& x)
  1847. {
  1848. if(arrays.elements()&&x.arrays.elements()){
  1849. copy_elements_array_from(x);
  1850. copy_groups_array_from(x);
  1851. size_ctrl.ml=std::size_t(x.size_ctrl.ml);
  1852. size_ctrl.size=std::size_t(x.size_ctrl.size);
  1853. }
  1854. }
  1855. void copy_elements_array_from(const table_core& x)
  1856. {
  1857. copy_elements_array_from(
  1858. x,
  1859. std::integral_constant<
  1860. bool,
  1861. is_trivially_copy_constructible<element_type>::value&&(
  1862. is_std_allocator<Allocator>::value||
  1863. !alloc_has_construct<Allocator,value_type*,const value_type&>::value)
  1864. >{}
  1865. );
  1866. }
  1867. void copy_elements_array_from(
  1868. const table_core& x,std::true_type /* -> memcpy */)
  1869. {
  1870. /* reinterpret_cast: GCC may complain about value_type not being trivially
  1871. * copy-assignable when we're relying on trivial copy constructibility.
  1872. */
  1873. std::memcpy(
  1874. reinterpret_cast<unsigned char*>(arrays.elements()),
  1875. reinterpret_cast<unsigned char*>(x.arrays.elements()),
  1876. x.capacity()*sizeof(value_type));
  1877. }
  1878. void copy_elements_array_from(
  1879. const table_core& x,std::false_type /* -> manual */)
  1880. {
  1881. std::size_t num_constructed=0;
  1882. BOOST_TRY{
  1883. x.for_all_elements([&,this](const element_type* p){
  1884. construct_element(arrays.elements()+(p-x.arrays.elements()),*p);
  1885. ++num_constructed;
  1886. });
  1887. }
  1888. BOOST_CATCH(...){
  1889. if(num_constructed){
  1890. x.for_all_elements_while([&,this](const element_type* p){
  1891. destroy_element(arrays.elements()+(p-x.arrays.elements()));
  1892. return --num_constructed!=0;
  1893. });
  1894. }
  1895. BOOST_RETHROW
  1896. }
  1897. BOOST_CATCH_END
  1898. }
  1899. void copy_groups_array_from(const table_core& x) {
  1900. copy_groups_array_from(x,is_trivially_copy_assignable<group_type>{});
  1901. }
  1902. void copy_groups_array_from(
  1903. const table_core& x, std::true_type /* -> memcpy */)
  1904. {
  1905. std::memcpy(
  1906. arrays.groups(),x.arrays.groups(),
  1907. (arrays.groups_size_mask+1)*sizeof(group_type));
  1908. }
  1909. void copy_groups_array_from(
  1910. const table_core& x, std::false_type /* -> manual */)
  1911. {
  1912. auto pg=arrays.groups();
  1913. auto xpg=x.arrays.groups();
  1914. for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
  1915. pg[i]=xpg[i];
  1916. }
  1917. }
  1918. void recover_slot(unsigned char* pc)
  1919. {
  1920. /* If this slot potentially caused overflow, we decrease the maximum load
  1921. * so that average probe length won't increase unboundedly in repeated
  1922. * insert/erase cycles (drift).
  1923. */
  1924. size_ctrl.ml-=group_type::maybe_caused_overflow(pc);
  1925. group_type::reset(pc);
  1926. --size_ctrl.size;
  1927. }
  1928. void recover_slot(group_type* pg,std::size_t pos)
  1929. {
  1930. recover_slot(reinterpret_cast<unsigned char*>(pg)+pos);
  1931. }
  1932. static std::size_t capacity_for(std::size_t n)
  1933. {
  1934. return size_policy::size(size_index_for<group_type,size_policy>(n))*N-1;
  1935. }
  1936. BOOST_NOINLINE void unchecked_rehash(std::size_t n)
  1937. {
  1938. auto new_arrays_=new_arrays(n);
  1939. unchecked_rehash(new_arrays_);
  1940. }
  1941. BOOST_NOINLINE void unchecked_rehash(arrays_type& new_arrays_)
  1942. {
  1943. std::size_t num_destroyed=0;
  1944. BOOST_TRY{
  1945. for_all_elements([&,this](element_type* p){
  1946. nosize_transfer_element(p,new_arrays_,num_destroyed);
  1947. });
  1948. }
  1949. BOOST_CATCH(...){
  1950. if(num_destroyed){
  1951. for_all_elements_while(
  1952. [&,this](group_type* pg,unsigned int n,element_type*){
  1953. recover_slot(pg,n);
  1954. return --num_destroyed!=0;
  1955. }
  1956. );
  1957. }
  1958. for_all_elements(new_arrays_,[this](element_type* p){
  1959. destroy_element(p);
  1960. });
  1961. delete_arrays(new_arrays_);
  1962. BOOST_RETHROW
  1963. }
  1964. BOOST_CATCH_END
  1965. /* either all moved and destroyed or all copied */
  1966. BOOST_ASSERT(num_destroyed==size()||num_destroyed==0);
  1967. if(num_destroyed!=size()){
  1968. for_all_elements([this](element_type* p){
  1969. destroy_element(p);
  1970. });
  1971. }
  1972. delete_arrays(arrays);
  1973. arrays=new_arrays_;
  1974. size_ctrl.ml=initial_max_load();
  1975. }
  1976. template<typename Value>
  1977. void unchecked_insert(Value&& x)
  1978. {
  1979. auto hash=hash_for(key_from(x));
  1980. unchecked_emplace_at(position_for(hash),hash,std::forward<Value>(x));
  1981. }
  1982. void nosize_transfer_element(
  1983. element_type* p,const arrays_type& arrays_,std::size_t& num_destroyed)
  1984. {
  1985. nosize_transfer_element(
  1986. p,hash_for(key_from(*p)),arrays_,num_destroyed,
  1987. std::integral_constant< /* std::move_if_noexcept semantics */
  1988. bool,
  1989. std::is_nothrow_move_constructible<init_type>::value||
  1990. !std::is_same<element_type,value_type>::value||
  1991. !std::is_copy_constructible<element_type>::value>{});
  1992. }
  1993. void nosize_transfer_element(
  1994. element_type* p,std::size_t hash,const arrays_type& arrays_,
  1995. std::size_t& num_destroyed,std::true_type /* ->move */)
  1996. {
  1997. /* Destroy p even if an an exception is thrown in the middle of move
  1998. * construction, which could leave the source half-moved.
  1999. */
  2000. ++num_destroyed;
  2001. destroy_element_on_exit d{this,p};
  2002. (void)d; /* unused var warning */
  2003. nosize_unchecked_emplace_at(
  2004. arrays_,position_for(hash,arrays_),hash,type_policy::move(*p));
  2005. }
  2006. void nosize_transfer_element(
  2007. element_type* p,std::size_t hash,const arrays_type& arrays_,
  2008. std::size_t& /*num_destroyed*/,std::false_type /* ->copy */)
  2009. {
  2010. nosize_unchecked_emplace_at(
  2011. arrays_,position_for(hash,arrays_),hash,
  2012. const_cast<const element_type&>(*p));
  2013. }
  2014. template<typename... Args>
  2015. locator nosize_unchecked_emplace_at(
  2016. const arrays_type& arrays_,std::size_t pos0,std::size_t hash,
  2017. Args&&... args)
  2018. {
  2019. for(prober pb(pos0);;pb.next(arrays_.groups_size_mask)){
  2020. auto pos=pb.get();
  2021. auto pg=arrays_.groups()+pos;
  2022. auto mask=pg->match_available();
  2023. if(BOOST_LIKELY(mask!=0)){
  2024. auto n=unchecked_countr_zero(mask);
  2025. auto p=arrays_.elements()+pos*N+n;
  2026. construct_element(p,std::forward<Args>(args)...);
  2027. pg->set(n,hash);
  2028. BOOST_UNORDERED_ADD_STATS(cstats.insertion,(pb.length()));
  2029. return {pg,n,p};
  2030. }
  2031. else pg->mark_overflow(hash);
  2032. }
  2033. }
  2034. };
  2035. #if BOOST_WORKAROUND(BOOST_MSVC,<=1900)
  2036. #pragma warning(pop) /* C4702 */
  2037. #endif
  2038. #if defined(BOOST_MSVC)
  2039. #pragma warning(pop) /* C4714 */
  2040. #endif
  2041. #include <boost/unordered/detail/foa/restore_wshadow.hpp>
  2042. } /* namespace foa */
  2043. } /* namespace detail */
  2044. } /* namespace unordered */
  2045. } /* namespace boost */
  2046. #undef BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED
  2047. #undef BOOST_UNORDERED_HAS_FEATURE
  2048. #undef BOOST_UNORDERED_HAS_BUILTIN
  2049. #endif