rbtree_best_fit.hpp 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  11. #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. // interprocess
  22. #include <boost/interprocess/containers/allocation_type.hpp>
  23. #include <boost/interprocess/exceptions.hpp>
  24. #include <boost/interprocess/interprocess_fwd.hpp>
  25. #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
  26. #include <boost/interprocess/offset_ptr.hpp>
  27. #include <boost/interprocess/sync/scoped_lock.hpp>
  28. // interprocess/detail
  29. #include <boost/interprocess/detail/min_max.hpp>
  30. #include <boost/interprocess/detail/math_functions.hpp>
  31. #include <boost/interprocess/detail/type_traits.hpp>
  32. #include <boost/interprocess/detail/utilities.hpp>
  33. // container
  34. #include <boost/container/detail/multiallocation_chain.hpp>
  35. // container/detail
  36. #include <boost/container/detail/placement_new.hpp>
  37. // move/detail
  38. #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
  39. #include <boost/move/detail/force_ptr.hpp> //make_unsigned, alignment_of
  40. // intrusive
  41. #include <boost/intrusive/pointer_traits.hpp>
  42. #include <boost/intrusive/set.hpp>
  43. // other boost
  44. #include <boost/assert.hpp>
  45. #include <boost/static_assert.hpp>
  46. // std
  47. #include <climits>
  48. #include <cstring>
  49. //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  50. //to maintain ABI compatible with the original version
  51. //ABI had to be updated to fix compatibility issues when
  52. //sharing shared memory between 32 adn 64 bit processes.
  53. //!\file
  54. //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
  55. //!objects in shared memory. This class is intended as a base class for single segment
  56. //!and multi-segment implementations.
  57. namespace boost {
  58. namespace interprocess {
  59. //!This class implements an algorithm that stores the free nodes in a red-black tree
  60. //!to have logarithmic search/insert times.
  61. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  62. class rbtree_best_fit
  63. {
  64. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  65. //Non-copyable
  66. rbtree_best_fit();
  67. rbtree_best_fit(const rbtree_best_fit &);
  68. rbtree_best_fit &operator=(const rbtree_best_fit &);
  69. private:
  70. struct block_ctrl;
  71. typedef typename boost::intrusive::
  72. pointer_traits<VoidPointer>::template
  73. rebind_pointer<block_ctrl>::type block_ctrl_ptr;
  74. typedef typename boost::intrusive::
  75. pointer_traits<VoidPointer>::template
  76. rebind_pointer<char>::type char_ptr;
  77. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  78. public:
  79. //!Shared mutex family used for the rest of the Interprocess framework
  80. typedef MutexFamily mutex_family;
  81. //!Pointer type to be used with the rest of the Interprocess framework
  82. typedef VoidPointer void_pointer;
  83. typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
  84. typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
  85. typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
  86. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  87. private:
  88. typedef typename bi::make_set_base_hook
  89. < bi::void_pointer<VoidPointer>
  90. , bi::optimize_size<true>
  91. , bi::link_mode<bi::normal_link> >::type TreeHook;
  92. struct SizeHolder
  93. {
  94. static const size_type size_mask = size_type(-1) >> 2;
  95. //!This block's memory size (including block_ctrl
  96. //!header) in Alignment units
  97. size_type m_prev_size;
  98. size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
  99. size_type m_prev_allocated : 1;
  100. size_type m_allocated : 1;
  101. };
  102. //!Block control structure
  103. struct block_ctrl
  104. : public SizeHolder, public TreeHook
  105. {
  106. block_ctrl()
  107. { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
  108. friend bool operator<(const block_ctrl &a, const block_ctrl &b)
  109. { return a.m_size < b.m_size; }
  110. friend bool operator==(const block_ctrl &a, const block_ctrl &b)
  111. { return a.m_size == b.m_size; }
  112. };
  113. struct size_block_ctrl_compare
  114. {
  115. bool operator()(size_type size, const block_ctrl &block) const
  116. { return size < block.m_size; }
  117. bool operator()(const block_ctrl &block, size_type size) const
  118. { return block.m_size < size; }
  119. };
  120. //!Shared mutex to protect memory allocate/deallocate
  121. typedef typename MutexFamily::mutex_type mutex_type;
  122. typedef typename bi::make_multiset
  123. <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
  124. typedef typename Imultiset::iterator imultiset_iterator;
  125. typedef typename Imultiset::const_iterator imultiset_const_iterator;
  126. //!This struct includes needed data and derives from
  127. //!mutex_type to allow EBO when using null mutex_type
  128. struct header_t : public mutex_type
  129. {
  130. Imultiset m_imultiset;
  131. //!The extra size required by the segment
  132. size_type m_extra_hdr_bytes;
  133. //!Allocated bytes for internal checking
  134. size_type m_allocated;
  135. //!The size of the memory segment
  136. size_type m_size;
  137. } m_header;
  138. friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
  139. typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
  140. public:
  141. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  142. //!Constructor. "size" is the total size of the managed memory segment,
  143. //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
  144. //!offset that the allocator should not use at all.
  145. rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
  146. //!Destructor.
  147. ~rbtree_best_fit();
  148. //!Obtains the minimum size needed by the algorithm
  149. static size_type get_min_size (size_type extra_hdr_bytes);
  150. //Functions for single segment management
  151. //!Allocates bytes, returns 0 if there is not more memory
  152. void* allocate (size_type nbytes);
  153. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  154. //Experimental. Dont' use
  155. //!Multiple element allocation, same size
  156. void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
  157. {
  158. //-----------------------
  159. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  160. //-----------------------
  161. algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
  162. }
  163. //!Multiple element allocation, different size
  164. void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
  165. {
  166. //-----------------------
  167. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  168. //-----------------------
  169. algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
  170. }
  171. //!Multiple element allocation, different size
  172. void deallocate_many(multiallocation_chain &chain);
  173. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  174. //!Deallocates previously allocated bytes
  175. void deallocate (void *addr);
  176. //!Returns the size of the memory segment
  177. size_type get_size() const;
  178. //!Returns the number of free bytes of the segment
  179. size_type get_free_memory() const;
  180. //!Initializes to zero all the memory that's not in use.
  181. //!This function is normally used for security reasons.
  182. void zero_free_memory();
  183. //!Increases managed memory in
  184. //!extra_size bytes more
  185. void grow(size_type extra_size);
  186. //!Decreases managed memory as much as possible
  187. void shrink_to_fit();
  188. //!Returns true if all allocated memory has been deallocated
  189. bool all_memory_deallocated();
  190. //!Makes an internal sanity check
  191. //!and returns true if success
  192. bool check_sanity();
  193. template<class T>
  194. T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  195. size_type &prefer_in_recvd_out_size, T *&reuse);
  196. void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
  197. size_type &prefer_in_recvd_out_size,
  198. void *&reuse_ptr, size_type sizeof_object = 1);
  199. //!Returns the size of the buffer previously allocated pointed by ptr
  200. size_type size(const void *ptr) const;
  201. //!Allocates aligned bytes, returns 0 if there is not more memory.
  202. //!Alignment must be power of 2
  203. void* allocate_aligned (size_type nbytes, size_type alignment);
  204. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  205. private:
  206. static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
  207. block_ctrl *priv_first_block();
  208. block_ctrl *priv_end_block();
  209. void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
  210. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
  211. //!Real allocation algorithm with min allocation option
  212. void * priv_allocate( boost::interprocess::allocation_type command
  213. , size_type limit_size, size_type &prefer_in_recvd_out_size
  214. , void *&reuse_ptr, size_type backwards_multiple = 1);
  215. //!Obtains the block control structure of the user buffer
  216. static block_ctrl *priv_get_block(const void *ptr);
  217. //!Obtains the pointer returned to the user from the block control
  218. static void *priv_get_user_buffer(const block_ctrl *block);
  219. //!Returns the number of total units that a user buffer
  220. //!of "userbytes" bytes really occupies (including header)
  221. static size_type priv_get_total_units(size_type userbytes);
  222. //!Real expand function implementation
  223. bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
  224. //!Real expand to both sides implementation
  225. void* priv_expand_both_sides(boost::interprocess::allocation_type command
  226. ,size_type min_size
  227. ,size_type &prefer_in_recvd_out_size
  228. ,void *reuse_ptr
  229. ,bool only_preferred_backwards
  230. ,size_type backwards_multiple);
  231. //!Returns true if the previous block is allocated
  232. bool priv_is_prev_allocated(block_ctrl *ptr);
  233. //!Get a pointer of the "end" block from the first block of the segment
  234. static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
  235. //!Get a pointer of the "first" block from the end block of the segment
  236. static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
  237. //!Get poitner of the previous block (previous block must be free)
  238. static block_ctrl * priv_prev_block(block_ctrl *ptr);
  239. //!Get the size in the tail of the previous block
  240. static block_ctrl * priv_next_block(block_ctrl *ptr);
  241. //!Check if this block is free (not allocated)
  242. bool priv_is_allocated_block(block_ctrl *ptr);
  243. //!Marks the block as allocated
  244. void priv_mark_as_allocated_block(block_ctrl *ptr);
  245. //!Marks the block as allocated
  246. void priv_mark_new_allocated_block(block_ctrl *ptr)
  247. { return priv_mark_as_allocated_block(ptr); }
  248. //!Marks the block as allocated
  249. void priv_mark_as_free_block(block_ctrl *ptr);
  250. //!Checks if block has enough memory and splits/unlinks the block
  251. //!returning the address to the users
  252. void* priv_check_and_allocate(size_type units
  253. ,block_ctrl* block
  254. ,size_type &received_size);
  255. //!Real deallocation algorithm
  256. void priv_deallocate(void *addr);
  257. //!Makes a new memory portion available for allocation
  258. void priv_add_segment(void *addr, size_type size);
  259. public:
  260. static const size_type Alignment = !MemAlignment
  261. ? size_type(::boost::container::dtl::alignment_of
  262. < ::boost::container::dtl::max_align_t>::value)
  263. : size_type(MemAlignment)
  264. ;
  265. private:
  266. //Due to embedded bits in size, Alignment must be at least 4
  267. BOOST_STATIC_ASSERT((Alignment >= 4));
  268. //Due to rbtree size optimizations, Alignment must have at least pointer alignment
  269. BOOST_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
  270. static const size_type AlignmentMask = (Alignment - 1);
  271. static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
  272. static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
  273. static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  274. static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
  275. static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
  276. static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
  277. static const size_type MinBlockUnits = BlockCtrlUnits;
  278. static const size_type UsableByPreviousChunk = sizeof(size_type);
  279. //Make sure the maximum alignment is power of two
  280. BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
  281. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  282. public:
  283. static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
  284. };
  285. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  286. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  287. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  288. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  289. ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
  290. {
  291. size_type uint_this = (std::size_t)this_ptr;
  292. size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
  293. size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
  294. size_type block1_off = aligned_main_hdr_end - uint_this;
  295. algo_impl_t::assert_alignment(aligned_main_hdr_end);
  296. algo_impl_t::assert_alignment(uint_this + block1_off);
  297. return block1_off;
  298. }
  299. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  300. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  301. priv_add_segment(void *addr, size_type segment_size)
  302. {
  303. //Check alignment
  304. algo_impl_t::check_alignment(addr);
  305. //Check size
  306. BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
  307. //Initialize the first big block and the "end" node
  308. block_ctrl *first_big_block = ::new(addr, boost_container_new_t()) block_ctrl;
  309. first_big_block->m_size = (segment_size/Alignment - EndCtrlBlockUnits) & block_ctrl::size_mask;
  310. BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
  311. //The "end" node is just a node of size 0 with the "end" bit set
  312. SizeHolder *end_block =
  313. ::new(reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment, boost_container_new_t()) SizeHolder;
  314. //This will overwrite the prev part of the "end" node
  315. priv_mark_as_free_block (first_big_block);
  316. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  317. first_big_block->m_prev_size = end_block->m_size =
  318. size_type(reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignmen) & block_ctrl::size_mask;
  319. #else
  320. first_big_block->m_prev_size = end_block->m_size =
  321. size_type(reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment & block_ctrl::size_mask;
  322. #endif
  323. end_block->m_allocated = 1;
  324. first_big_block->m_prev_allocated = 1;
  325. BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
  326. BOOST_ASSERT(priv_prev_block((block_ctrl*)end_block) == first_big_block);
  327. BOOST_ASSERT(priv_first_block() == first_big_block);
  328. BOOST_ASSERT(priv_end_block() == end_block);
  329. //Some check to validate the algorithm, since it makes some assumptions
  330. //to optimize the space wasted in bookkeeping:
  331. //Check that the sizes of the header are placed before the rbtree
  332. BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
  333. < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
  334. //Insert it in the intrusive containers
  335. m_header.m_imultiset.insert(*first_big_block);
  336. }
  337. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  338. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  339. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  340. ::priv_first_block()
  341. {
  342. const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  343. return move_detail::force_ptr<block_ctrl*>(reinterpret_cast<char*>(this) + block1_off);
  344. }
  345. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  346. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  347. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
  348. ::priv_end_block()
  349. {
  350. const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  351. const size_type original_first_block_size = (m_header.m_size - block1_off)/Alignment - EndCtrlBlockUnits;
  352. block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
  353. (reinterpret_cast<char*>(this) + block1_off + original_first_block_size*Alignment);
  354. return end_block;
  355. }
  356. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  357. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  358. rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
  359. {
  360. //Initialize the header
  361. m_header.m_allocated = 0;
  362. m_header.m_size = segment_size;
  363. m_header.m_extra_hdr_bytes = extra_hdr_bytes;
  364. //Now write calculate the offset of the first big block that will
  365. //cover the whole segment
  366. BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
  367. size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
  368. priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
  369. }
  370. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  371. inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
  372. {
  373. //There is a memory leak!
  374. // BOOST_ASSERT(m_header.m_allocated == 0);
  375. // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
  376. }
  377. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  378. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
  379. {
  380. //Get the address of the first block
  381. block_ctrl *first_block = priv_first_block();
  382. block_ctrl *old_end_block = priv_end_block();
  383. size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
  384. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  385. //Update managed buffer's size
  386. m_header.m_size += extra_size;
  387. //We need at least MinBlockUnits blocks to create a new block
  388. if((m_header.m_size - old_border_offset) < MinBlockUnits){
  389. return;
  390. }
  391. //Now create a new block between the old end and the new end
  392. size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
  393. block_ctrl *new_end_block = move_detail::force_ptr<block_ctrl*>
  394. (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
  395. //the last and first block are special:
  396. //new_end_block->m_size & first_block->m_prev_size store the absolute value
  397. //between them
  398. new_end_block->m_allocated = 1;
  399. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  400. new_end_block->m_size = size_type(reinterpret_cast<char*>(first_block) -
  401. reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
  402. #else
  403. new_end_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
  404. reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
  405. #endif
  406. first_block->m_prev_size = new_end_block->m_size;
  407. first_block->m_prev_allocated = 1;
  408. BOOST_ASSERT(new_end_block == priv_end_block());
  409. //The old end block is the new block
  410. block_ctrl *new_block = old_end_block;
  411. new_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
  412. reinterpret_cast<char*>(new_block))/Alignment & block_ctrl::size_mask;
  413. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  414. priv_mark_as_allocated_block(new_block);
  415. BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
  416. m_header.m_allocated += (size_type)new_block->m_size*Alignment;
  417. //Now deallocate the newly created block
  418. this->priv_deallocate(priv_get_user_buffer(new_block));
  419. }
  420. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  421. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
  422. {
  423. //Get the address of the first block
  424. block_ctrl *first_block = priv_first_block();
  425. algo_impl_t::assert_alignment(first_block);
  426. //block_ctrl *old_end_block = priv_end_block(first_block);
  427. block_ctrl *old_end_block = priv_end_block();
  428. algo_impl_t::assert_alignment(old_end_block);
  429. size_type old_end_block_size = old_end_block->m_size;
  430. void *unique_buffer = 0;
  431. block_ctrl *last_block;
  432. //Check if no memory is allocated between the first and last block
  433. if(priv_next_block(first_block) == old_end_block){
  434. //If so check if we can allocate memory
  435. size_type ignore_recvd = 0;
  436. void *ignore_reuse = 0;
  437. unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
  438. //If not, return, we can't shrink
  439. if(!unique_buffer)
  440. return;
  441. //If we can, mark the position just after the new allocation as the new end
  442. algo_impl_t::assert_alignment(unique_buffer);
  443. block_ctrl *unique_block = priv_get_block(unique_buffer);
  444. BOOST_ASSERT(priv_is_allocated_block(unique_block));
  445. algo_impl_t::assert_alignment(unique_block);
  446. last_block = priv_next_block(unique_block);
  447. BOOST_ASSERT(!priv_is_allocated_block(last_block));
  448. algo_impl_t::assert_alignment(last_block);
  449. }
  450. else{
  451. //If memory is allocated, check if the last block is allocated
  452. if(priv_is_prev_allocated(old_end_block))
  453. return;
  454. //If not, mark last block after the free block
  455. last_block = priv_prev_block(old_end_block);
  456. }
  457. size_type last_block_size = last_block->m_size;
  458. //Erase block from the free tree, since we will erase it
  459. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
  460. size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
  461. reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
  462. block_ctrl *new_end_block = last_block;
  463. algo_impl_t::assert_alignment(new_end_block);
  464. //Write new end block attributes
  465. #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
  466. new_end_block->m_size =
  467. size_type(reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
  468. first_block->m_prev_size = new_end_block->m_size;
  469. #else
  470. new_end_block->m_size =
  471. size_type(reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
  472. first_block->m_prev_size = new_end_block->m_size;
  473. #endif
  474. new_end_block->m_allocated = 1;
  475. (void)last_block_size;
  476. (void)old_end_block_size;
  477. BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
  478. //Update managed buffer's size
  479. m_header.m_size = shrunk_border_offset & block_ctrl::size_mask;
  480. BOOST_ASSERT(priv_end_block() == new_end_block);
  481. if(unique_buffer)
  482. priv_deallocate(unique_buffer);
  483. }
  484. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  485. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  486. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
  487. { return m_header.m_size; }
  488. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  489. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  490. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
  491. {
  492. return m_header.m_size - m_header.m_allocated -
  493. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  494. }
  495. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  496. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  497. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  498. get_min_size (size_type extra_hdr_bytes)
  499. {
  500. return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
  501. algo_impl_t::ceil_units(extra_hdr_bytes) +
  502. MinBlockUnits + EndCtrlBlockUnits)*Alignment;
  503. }
  504. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  505. inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  506. all_memory_deallocated()
  507. {
  508. //-----------------------
  509. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  510. //-----------------------
  511. size_type block1_off =
  512. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  513. return m_header.m_allocated == 0 &&
  514. m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
  515. (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
  516. && m_header.m_imultiset.begin()->m_size ==
  517. (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
  518. }
  519. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  520. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  521. check_sanity()
  522. {
  523. //-----------------------
  524. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  525. //-----------------------
  526. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  527. size_type free_memory = 0;
  528. //Iterate through all blocks obtaining their size
  529. for(; ib != ie; ++ib){
  530. free_memory += (size_type)ib->m_size*Alignment;
  531. if(!algo_impl_t::check_alignment(&*ib))
  532. return false;
  533. }
  534. //Check allocated bytes are less than size
  535. if(m_header.m_allocated > m_header.m_size){
  536. return false;
  537. }
  538. size_type block1_off =
  539. priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
  540. //Check free bytes are less than size
  541. if(free_memory > (m_header.m_size - block1_off)){
  542. return false;
  543. }
  544. return true;
  545. }
  546. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  547. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  548. allocate(size_type nbytes)
  549. {
  550. //-----------------------
  551. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  552. //-----------------------
  553. size_type ignore_recvd = nbytes;
  554. void *ignore_reuse = 0;
  555. return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
  556. }
  557. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  558. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  559. allocate_aligned(size_type nbytes, size_type alignment)
  560. {
  561. //-----------------------
  562. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  563. //-----------------------
  564. return algo_impl_t::allocate_aligned(this, nbytes, alignment);
  565. }
  566. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  567. template<class T>
  568. inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  569. allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  570. size_type &prefer_in_recvd_out_size, T *&reuse)
  571. {
  572. void* raw_reuse = reuse;
  573. void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
  574. reuse = static_cast<T*>(raw_reuse);
  575. BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
  576. return static_cast<T*>(ret);
  577. }
  578. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  579. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  580. raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
  581. size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
  582. {
  583. size_type const preferred_objects = prefer_in_recvd_out_objects;
  584. if(!sizeof_object)
  585. return reuse_ptr = 0, static_cast<void*>(0);
  586. if(command & boost::interprocess::try_shrink_in_place){
  587. if(!reuse_ptr) return static_cast<void*>(0);
  588. const bool success = algo_impl_t::try_shrink
  589. ( this, reuse_ptr, limit_objects*sizeof_object
  590. , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
  591. prefer_in_recvd_out_objects /= sizeof_object;
  592. return success ? reuse_ptr : 0;
  593. }
  594. else{
  595. return priv_allocation_command
  596. (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
  597. }
  598. }
  599. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  600. inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  601. priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  602. size_type &prefer_in_recvd_out_size,
  603. void *&reuse_ptr, size_type sizeof_object)
  604. {
  605. void* ret;
  606. size_type const preferred_size = prefer_in_recvd_out_size;
  607. size_type const max_count = m_header.m_size/sizeof_object;
  608. if(limit_size > max_count || preferred_size > max_count){
  609. return reuse_ptr = 0, static_cast<void*>(0);
  610. }
  611. size_type l_size = limit_size*sizeof_object;
  612. size_type p_size = preferred_size*sizeof_object;
  613. size_type r_size;
  614. {
  615. //-----------------------
  616. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  617. //-----------------------
  618. ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
  619. }
  620. prefer_in_recvd_out_size = r_size/sizeof_object;
  621. return ret;
  622. }
  623. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  624. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  625. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  626. size(const void *ptr) const
  627. {
  628. //We need no synchronization since this block's size is not going
  629. //to be modified by anyone else
  630. //Obtain the real size of the block
  631. return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  632. }
  633. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  634. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
  635. {
  636. //-----------------------
  637. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  638. //-----------------------
  639. imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
  640. //Iterate through all blocks obtaining their size
  641. while(ib != ie){
  642. //Just clear user the memory part reserved for the user
  643. volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
  644. size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
  645. while(s--){
  646. *ptr++ = 0;
  647. }
  648. //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
  649. //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
  650. // , 0
  651. // , ib->m_size*Alignment - BlockCtrlBytes);
  652. ++ib;
  653. }
  654. }
  655. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  656. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  657. priv_expand_both_sides(boost::interprocess::allocation_type command
  658. ,size_type min_size
  659. ,size_type &prefer_in_recvd_out_size
  660. ,void *reuse_ptr
  661. ,bool only_preferred_backwards
  662. ,size_type backwards_multiple)
  663. {
  664. size_type const preferred_size = prefer_in_recvd_out_size;
  665. algo_impl_t::assert_alignment(reuse_ptr);
  666. if(command & boost::interprocess::expand_fwd){
  667. if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
  668. return reuse_ptr;
  669. }
  670. else{
  671. prefer_in_recvd_out_size = this->size(reuse_ptr);
  672. if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
  673. return reuse_ptr;
  674. }
  675. if(backwards_multiple){
  676. BOOST_ASSERT(0 == (min_size % backwards_multiple));
  677. BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
  678. }
  679. if(command & boost::interprocess::expand_bwd){
  680. //Obtain the real size of the block
  681. block_ctrl *reuse = priv_get_block(reuse_ptr);
  682. //Sanity check
  683. algo_impl_t::assert_alignment(reuse);
  684. block_ctrl *prev_block;
  685. //If the previous block is not free, there is nothing to do
  686. if(priv_is_prev_allocated(reuse)){
  687. return 0;
  688. }
  689. prev_block = priv_prev_block(reuse);
  690. BOOST_ASSERT(!priv_is_allocated_block(prev_block));
  691. //Some sanity checks
  692. BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
  693. algo_impl_t::assert_alignment(prev_block);
  694. size_type needs_backwards_aligned;
  695. size_type lcm;
  696. if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
  697. ( backwards_multiple
  698. , prefer_in_recvd_out_size
  699. , only_preferred_backwards ? preferred_size : min_size
  700. , lcm, needs_backwards_aligned)){
  701. return 0;
  702. }
  703. //Check if previous block has enough size
  704. if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
  705. //Now take all next space. This will succeed
  706. if(command & boost::interprocess::expand_fwd){
  707. size_type received_size2;
  708. if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
  709. BOOST_ASSERT(0);
  710. }
  711. BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
  712. }
  713. //We need a minimum size to split the previous one
  714. if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
  715. block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
  716. (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
  717. //Free old previous buffer
  718. new_block->m_size =
  719. (AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment) & block_ctrl::size_mask;
  720. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  721. priv_mark_as_allocated_block(new_block);
  722. prev_block->m_size = size_type(reinterpret_cast<char*>(new_block) -
  723. reinterpret_cast<char*>(prev_block))/Alignment & block_ctrl::size_mask;
  724. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  725. priv_mark_as_free_block(prev_block);
  726. //Update the old previous block in the free blocks tree
  727. //If the new size fulfills tree invariants do nothing,
  728. //otherwise erase() + insert()
  729. {
  730. imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
  731. imultiset_iterator was_smaller_it(prev_block_it);
  732. if(prev_block_it != m_header.m_imultiset.begin() &&
  733. (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
  734. m_header.m_imultiset.erase(prev_block_it);
  735. m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
  736. }
  737. }
  738. prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
  739. m_header.m_allocated += needs_backwards_aligned;
  740. //Check alignment
  741. algo_impl_t::assert_alignment(new_block);
  742. //If the backwards expansion has remaining bytes in the
  743. //first bytes, fill them with a pattern
  744. void *p = priv_get_user_buffer(new_block);
  745. void *user_ptr = reinterpret_cast<char*>(p);
  746. BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  747. algo_impl_t::assert_alignment(user_ptr);
  748. return user_ptr;
  749. }
  750. //Check if there is no place to create a new block and
  751. //the whole new block is multiple of the backwards expansion multiple
  752. else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
  753. 0 == ((prev_block->m_size*Alignment) % lcm)) {
  754. //Erase old previous block, since we will change it
  755. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
  756. //Just merge the whole previous block
  757. //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
  758. prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
  759. m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
  760. //Now update sizes
  761. prev_block->m_size = size_type(prev_block->m_size + reuse->m_size) & block_ctrl::size_mask;
  762. BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
  763. priv_mark_as_allocated_block(prev_block);
  764. //If the backwards expansion has remaining bytes in the
  765. //first bytes, fill them with a pattern
  766. void *user_ptr = priv_get_user_buffer(prev_block);
  767. BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
  768. algo_impl_t::assert_alignment(user_ptr);
  769. return user_ptr;
  770. }
  771. else{
  772. //Alignment issues
  773. }
  774. }
  775. }
  776. return 0;
  777. }
  778. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  779. inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  780. deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
  781. {
  782. //-----------------------
  783. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  784. //-----------------------
  785. algo_impl_t::deallocate_many(this, chain);
  786. }
  787. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  788. void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  789. priv_allocate(boost::interprocess::allocation_type command
  790. ,size_type limit_size
  791. ,size_type &prefer_in_recvd_out_size
  792. ,void *&reuse_ptr
  793. ,size_type backwards_multiple)
  794. {
  795. size_type const preferred_size = prefer_in_recvd_out_size;
  796. if(command & boost::interprocess::shrink_in_place){
  797. if(!reuse_ptr) return static_cast<void*>(0);
  798. bool success =
  799. algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
  800. return success ? reuse_ptr : 0;
  801. }
  802. prefer_in_recvd_out_size = 0;
  803. if(limit_size > preferred_size)
  804. return reuse_ptr = 0, static_cast<void*>(0);
  805. //Number of units to request (including block_ctrl header)
  806. size_type preferred_units = priv_get_total_units(preferred_size);
  807. //Number of units to request (including block_ctrl header)
  808. size_type limit_units = priv_get_total_units(limit_size);
  809. //Expand in place
  810. prefer_in_recvd_out_size = preferred_size;
  811. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  812. void *ret = priv_expand_both_sides
  813. (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
  814. if(ret)
  815. return ret;
  816. }
  817. if(command & boost::interprocess::allocate_new){
  818. size_block_ctrl_compare comp;
  819. imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
  820. if(it != m_header.m_imultiset.end()){
  821. return reuse_ptr = 0, this->priv_check_and_allocate
  822. (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
  823. }
  824. if(it != m_header.m_imultiset.begin()&&
  825. (--it)->m_size >= limit_units){
  826. return reuse_ptr = 0, this->priv_check_and_allocate
  827. (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
  828. }
  829. }
  830. //Now try to expand both sides with min size
  831. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  832. return priv_expand_both_sides
  833. (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
  834. }
  835. return reuse_ptr = 0, static_cast<void*>(0);
  836. }
  837. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  838. inline
  839. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  840. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
  841. {
  842. return const_cast<block_ctrl*>
  843. (move_detail::force_ptr<const block_ctrl*>
  844. (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
  845. }
  846. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  847. inline
  848. void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  849. priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  850. { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
  851. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  852. inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
  853. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  854. priv_get_total_units(size_type userbytes)
  855. {
  856. if(userbytes < UsableByPreviousChunk)
  857. userbytes = UsableByPreviousChunk;
  858. size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
  859. if(units < BlockCtrlUnits) units = BlockCtrlUnits;
  860. return units;
  861. }
  862. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  863. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
  864. priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
  865. {
  866. size_type const preferred_size = prefer_in_recvd_out_size;
  867. //Obtain the real size of the block
  868. block_ctrl *block = priv_get_block(ptr);
  869. size_type old_block_units = block->m_size;
  870. //The block must be marked as allocated and the sizes must be equal
  871. BOOST_ASSERT(priv_is_allocated_block(block));
  872. //Put this to a safe value
  873. prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  874. if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
  875. return true;
  876. //Now translate it to Alignment units
  877. const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
  878. const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
  879. //Some parameter checks
  880. BOOST_ASSERT(min_user_units <= preferred_user_units);
  881. block_ctrl *next_block;
  882. if(priv_is_allocated_block(next_block = priv_next_block(block))){
  883. return prefer_in_recvd_out_size >= min_size;
  884. }
  885. algo_impl_t::assert_alignment(next_block);
  886. //Is "block" + "next_block" big enough?
  887. const size_type merged_units = old_block_units + (size_type)next_block->m_size;
  888. //Now get the expansion size
  889. const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
  890. if(merged_user_units < min_user_units){
  891. prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
  892. return false;
  893. }
  894. //Now get the maximum size the user can allocate
  895. size_type intended_user_units = (merged_user_units < preferred_user_units) ?
  896. merged_user_units : preferred_user_units;
  897. //These are total units of the merged block (supposing the next block can be split)
  898. const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
  899. //Check if we can split the next one in two parts
  900. if((merged_units - intended_units) >= BlockCtrlUnits){
  901. //This block is bigger than needed, split it in
  902. //two blocks, the first one will be merged and
  903. //the second's size will be the remaining space
  904. BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
  905. const size_type rem_units = merged_units - intended_units;
  906. //Check if we we need to update the old next block in the free blocks tree
  907. //If the new size fulfills tree invariants, we just need to replace the node
  908. //(the block start has been displaced), otherwise erase() + insert().
  909. //
  910. //This fixup must be done in two parts, because the new next block might
  911. //overwrite the tree hook of the old next block. So we first erase the
  912. //old if needed and we'll insert the new one after creating the new next
  913. imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
  914. m_header.m_imultiset.erase(old_next_block_it);
  915. //This is the remaining block
  916. block_ctrl *rem_block =
  917. ::new(reinterpret_cast<char*>(block) + intended_units*Alignment, boost_container_new_t()) block_ctrl;
  918. rem_block->m_size = rem_units & block_ctrl::size_mask;
  919. algo_impl_t::assert_alignment(rem_block);
  920. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  921. priv_mark_as_free_block(rem_block);
  922. m_header.m_imultiset.insert(*rem_block);
  923. //Write the new length
  924. block->m_size = (intended_user_units + AllocatedCtrlUnits) & block_ctrl::size_mask;
  925. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  926. m_header.m_allocated += (intended_units - old_block_units)*Alignment;
  927. }
  928. //There is no free space to create a new node: just merge both blocks
  929. else{
  930. //Now we have to update the data in the tree
  931. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
  932. //Write the new length
  933. block->m_size = merged_units & block_ctrl::size_mask;
  934. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  935. m_header.m_allocated += (merged_units - old_block_units)*Alignment;
  936. }
  937. priv_mark_as_allocated_block(block);
  938. prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  939. return true;
  940. }
  941. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  942. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  943. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
  944. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  945. {
  946. BOOST_ASSERT(!ptr->m_prev_allocated);
  947. return move_detail::force_ptr<block_ctrl*>
  948. (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
  949. }
  950. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  951. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  952. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
  953. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
  954. {
  955. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  956. //distance with the end block
  957. BOOST_ASSERT(first_segment_block->m_prev_allocated);
  958. block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
  959. (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
  960. (void)end_block;
  961. BOOST_ASSERT(end_block->m_allocated == 1);
  962. BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
  963. BOOST_ASSERT(end_block > first_segment_block);
  964. return end_block;
  965. }
  966. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  967. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  968. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
  969. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
  970. {
  971. //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
  972. //distance with the end block
  973. BOOST_ASSERT(end_segment_block->m_allocated);
  974. block_ctrl *first_block = move_detail::force_ptr<block_ctrl*>
  975. (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
  976. (void)first_block;
  977. BOOST_ASSERT(first_block->m_prev_allocated == 1);
  978. BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
  979. BOOST_ASSERT(end_segment_block > first_block);
  980. return first_block;
  981. }
  982. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  983. typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
  984. rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
  985. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
  986. {
  987. return move_detail::force_ptr<block_ctrl*>
  988. (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
  989. }
  990. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  991. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
  992. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  993. {
  994. bool allocated = block->m_allocated != 0;
  995. #ifndef NDEBUG
  996. if(block != priv_end_block()){
  997. block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
  998. (reinterpret_cast<char*>(block) + block->m_size*Alignment);
  999. bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
  1000. (void)next_block_prev_allocated;
  1001. BOOST_ASSERT(allocated == next_block_prev_allocated);
  1002. }
  1003. #endif
  1004. return allocated;
  1005. }
  1006. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1007. bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
  1008. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1009. {
  1010. if(block->m_prev_allocated){
  1011. return true;
  1012. }
  1013. else{
  1014. #ifndef NDEBUG
  1015. if(block != priv_first_block()){
  1016. block_ctrl *prev = priv_prev_block(block);
  1017. (void)prev;
  1018. BOOST_ASSERT(!prev->m_allocated);
  1019. BOOST_ASSERT(prev->m_size == block->m_prev_size);
  1020. }
  1021. #endif
  1022. return false;
  1023. }
  1024. }
  1025. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1026. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
  1027. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1028. {
  1029. block->m_allocated = 1;
  1030. move_detail::force_ptr<block_ctrl*>
  1031. (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
  1032. }
  1033. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1034. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
  1035. (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
  1036. {
  1037. block->m_allocated = 0;
  1038. block_ctrl *next_block = priv_next_block(block);
  1039. next_block->m_prev_allocated = 0;
  1040. next_block->m_prev_size = block->m_size;
  1041. }
  1042. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
  1043. void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
  1044. (size_type nunits
  1045. ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
  1046. ,size_type &received_size)
  1047. {
  1048. size_type upper_nunits = nunits + BlockCtrlUnits;
  1049. imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
  1050. algo_impl_t::assert_alignment(block);
  1051. if (block->m_size >= upper_nunits){
  1052. //This block is bigger than needed, split it in
  1053. //two blocks, the first's size will be "units" and
  1054. //the second's size "block->m_size-units"
  1055. size_type block_old_size = block->m_size;
  1056. block->m_size = nunits & block_ctrl::size_mask;
  1057. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  1058. //This is the remaining block
  1059. block_ctrl *rem_block =
  1060. ::new(reinterpret_cast<char*>(block) + Alignment*nunits, boost_container_new_t()) block_ctrl;
  1061. algo_impl_t::assert_alignment(rem_block);
  1062. rem_block->m_size = (block_old_size - nunits) & block_ctrl::size_mask;
  1063. BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
  1064. priv_mark_as_free_block(rem_block);
  1065. //Now we have to update the data in the tree.
  1066. //Use the position of the erased one as a hint
  1067. m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
  1068. }
  1069. else if (block->m_size >= nunits){
  1070. m_header.m_imultiset.erase(it_old);
  1071. }
  1072. else{
  1073. BOOST_ASSERT(0);
  1074. return 0;
  1075. }
  1076. //We need block_ctrl for deallocation stuff, so
  1077. //return memory user can overwrite
  1078. m_header.m_allocated += (size_type)block->m_size*Alignment;
  1079. received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  1080. //Mark the block as allocated
  1081. priv_mark_as_allocated_block(block);
  1082. //Clear the memory occupied by the tree hook, since this won't be
  1083. //cleared with zero_free_memory
  1084. TreeHook *t = static_cast<TreeHook*>(block);
  1085. //Just clear the memory part reserved for the user
  1086. std::size_t tree_hook_offset_in_block = std::size_t((char*)t - (char*)block);
  1087. //volatile char *ptr =
  1088. char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
  1089. const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
  1090. std::memset(ptr, 0, s);
  1091. this->priv_next_block(block)->m_prev_size = 0;
  1092. return priv_get_user_buffer(block);
  1093. }
  1094. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1095. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
  1096. {
  1097. if(!addr) return;
  1098. //-----------------------
  1099. boost::interprocess::scoped_lock<mutex_type> guard(m_header);
  1100. //-----------------------
  1101. return this->priv_deallocate(addr);
  1102. }
  1103. template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
  1104. void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
  1105. {
  1106. if(!addr) return;
  1107. block_ctrl *block = priv_get_block(addr);
  1108. //The blocks must be marked as allocated and the sizes must be equal
  1109. BOOST_ASSERT(priv_is_allocated_block(block));
  1110. //Check if alignment and block size are right
  1111. algo_impl_t::assert_alignment(addr);
  1112. size_type block_old_size = Alignment*(size_type)block->m_size;
  1113. BOOST_ASSERT(m_header.m_allocated >= block_old_size);
  1114. //Update used memory count
  1115. m_header.m_allocated -= block_old_size;
  1116. //The block to insert in the tree
  1117. block_ctrl *block_to_insert = block;
  1118. //Get the next block
  1119. block_ctrl *const next_block = priv_next_block(block);
  1120. const bool merge_with_prev = !priv_is_prev_allocated(block);
  1121. const bool merge_with_next = !priv_is_allocated_block(next_block);
  1122. //Merge logic. First just update block sizes, then fix free blocks tree
  1123. if(merge_with_prev || merge_with_next){
  1124. //Merge if the previous is free
  1125. if(merge_with_prev){
  1126. //Get the previous block
  1127. block_to_insert = priv_prev_block(block);
  1128. block_to_insert->m_size = size_type(block_to_insert->m_size + block->m_size) & block_ctrl::size_mask;
  1129. BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
  1130. m_header.m_imultiset.erase(Imultiset::s_iterator_to(*block_to_insert));
  1131. }
  1132. //Merge if the next is free
  1133. if(merge_with_next){
  1134. block_to_insert->m_size = size_type(block_to_insert->m_size + next_block->m_size) & block_ctrl::size_mask;
  1135. BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
  1136. const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
  1137. m_header.m_imultiset.erase(next_it);
  1138. }
  1139. }
  1140. priv_mark_as_free_block(block_to_insert);
  1141. m_header.m_imultiset.insert(*block_to_insert);
  1142. }
  1143. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  1144. } //namespace interprocess {
  1145. } //namespace boost {
  1146. #include <boost/interprocess/detail/config_end.hpp>
  1147. #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP