simple_seq_fit_impl.hpp 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
  11. #define BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. #include <boost/intrusive/pointer_traits.hpp>
  22. #include <boost/interprocess/interprocess_fwd.hpp>
  23. #include <boost/interprocess/containers/allocation_type.hpp>
  24. #include <boost/container/detail/multiallocation_chain.hpp>
  25. #include <boost/interprocess/offset_ptr.hpp>
  26. #include <boost/interprocess/sync/interprocess_mutex.hpp>
  27. #include <boost/interprocess/exceptions.hpp>
  28. #include <boost/interprocess/detail/utilities.hpp>
  29. #include <boost/interprocess/detail/min_max.hpp>
  30. #include <boost/interprocess/detail/type_traits.hpp>
  31. #include <boost/interprocess/sync/scoped_lock.hpp>
  32. #include <boost/intrusive/pointer_traits.hpp>
  33. #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
  34. #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
  35. #include <boost/move/detail/force_ptr.hpp>
  36. #include <boost/intrusive/detail/minimal_pair_header.hpp>
  37. #include <cstring>
  38. #include <boost/assert.hpp>
  39. //!\file
  40. //!Describes sequential fit algorithm used to allocate objects in shared memory.
  41. //!This class is intended as a base class for single segment and multi-segment
  42. //!implementations.
  43. namespace boost {
  44. namespace interprocess {
  45. namespace ipcdetail {
  46. //!This class implements the simple sequential fit algorithm with a simply
  47. //!linked list of free buffers.
  48. //!This class is intended as a base class for single segment and multi-segment
  49. //!implementations.
  50. template<class MutexFamily, class VoidPointer>
  51. class simple_seq_fit_impl
  52. {
  53. //Non-copyable
  54. simple_seq_fit_impl();
  55. simple_seq_fit_impl(const simple_seq_fit_impl &);
  56. simple_seq_fit_impl &operator=(const simple_seq_fit_impl &);
  57. typedef typename boost::intrusive::
  58. pointer_traits<VoidPointer>::template
  59. rebind_pointer<char>::type char_ptr;
  60. public:
  61. //!Shared interprocess_mutex family used for the rest of the Interprocess framework
  62. typedef MutexFamily mutex_family;
  63. //!Pointer type to be used with the rest of the Interprocess framework
  64. typedef VoidPointer void_pointer;
  65. typedef boost::container::dtl::
  66. basic_multiallocation_chain<VoidPointer> multiallocation_chain;
  67. typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
  68. typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
  69. private:
  70. class block_ctrl;
  71. friend class block_ctrl;
  72. typedef typename boost::intrusive::
  73. pointer_traits<VoidPointer>::template
  74. rebind_pointer<block_ctrl>::type block_ctrl_ptr;
  75. //!Block control structure
  76. class block_ctrl
  77. {
  78. public:
  79. static const size_type size_mask = size_type(-1);
  80. //!Offset pointer to the next block.
  81. block_ctrl_ptr m_next;
  82. //!This block's memory size (including block_ctrl
  83. //!header) in BasicSize units
  84. size_type m_size;
  85. size_type get_user_bytes() const
  86. { return this->m_size*Alignment - BlockCtrlBytes; }
  87. size_type get_total_bytes() const
  88. { return this->m_size*Alignment; }
  89. };
  90. //!Shared interprocess_mutex to protect memory allocate/deallocate
  91. typedef typename MutexFamily::mutex_type interprocess_mutex;
  92. //!This struct includes needed data and derives from
  93. //!interprocess_mutex to allow EBO when using null interprocess_mutex
  94. struct header_t : public interprocess_mutex
  95. {
  96. //!Pointer to the first free block
  97. block_ctrl m_root;
  98. //!Allocated bytes for internal checking
  99. size_type m_allocated;
  100. //!The size of the memory segment
  101. size_type m_size;
  102. //!The extra size required by the segment
  103. size_type m_extra_hdr_bytes;
  104. } m_header;
  105. friend class ipcdetail::memory_algorithm_common<simple_seq_fit_impl>;
  106. typedef ipcdetail::memory_algorithm_common<simple_seq_fit_impl> algo_impl_t;
  107. public:
  108. //!Constructor. "size" is the total size of the managed memory segment,
  109. //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(simple_seq_fit_impl)
  110. //!offset that the allocator should not use at all.
  111. simple_seq_fit_impl (size_type size, size_type extra_hdr_bytes);
  112. //!Destructor
  113. ~simple_seq_fit_impl();
  114. //!Obtains the minimum size needed by the algorithm
  115. static size_type get_min_size (size_type extra_hdr_bytes);
  116. //Functions for single segment management
  117. //!Allocates bytes, returns 0 if there is not more memory
  118. void* allocate (size_type nbytes);
  119. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  120. //!Multiple element allocation, same size
  121. void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
  122. {
  123. //-----------------------
  124. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  125. //-----------------------
  126. algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
  127. }
  128. //!Multiple element allocation, different size
  129. void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
  130. {
  131. //-----------------------
  132. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  133. //-----------------------
  134. algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
  135. }
  136. //!Multiple element deallocation
  137. void deallocate_many(multiallocation_chain &chain);
  138. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  139. //!Deallocates previously allocated bytes
  140. void deallocate (void *addr);
  141. //!Returns the size of the memory segment
  142. size_type get_size() const;
  143. //!Returns the number of free bytes of the memory segment
  144. size_type get_free_memory() const;
  145. //!Increases managed memory in extra_size bytes more
  146. void grow(size_type extra_size);
  147. //!Decreases managed memory as much as possible
  148. void shrink_to_fit();
  149. //!Returns true if all allocated memory has been deallocated
  150. bool all_memory_deallocated();
  151. //!Makes an internal sanity check and returns true if success
  152. bool check_sanity();
  153. //!Initializes to zero all the memory that's not in use.
  154. //!This function is normally used for security reasons.
  155. void zero_free_memory();
  156. template<class T>
  157. T *allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  158. size_type &prefer_in_recvd_out_size, T *&reuse);
  159. void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  160. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object = 1);
  161. //!Returns the size of the buffer previously allocated pointed by ptr
  162. size_type size(const void *ptr) const;
  163. //!Allocates aligned bytes, returns 0 if there is not more memory.
  164. //!Alignment must be power of 2
  165. void* allocate_aligned (size_type nbytes, size_type alignment);
  166. private:
  167. //!Obtains the pointer returned to the user from the block control
  168. static void *priv_get_user_buffer(const block_ctrl *block);
  169. //!Obtains the block control structure of the user buffer
  170. static block_ctrl *priv_get_block(const void *ptr);
  171. //!Real allocation algorithm with min allocation option
  172. void * priv_allocate(boost::interprocess::allocation_type command
  173. ,size_type min_size
  174. ,size_type &prefer_in_recvd_out_size, void *&reuse_ptr);
  175. void * priv_allocation_command(boost::interprocess::allocation_type command
  176. ,size_type min_size
  177. ,size_type &prefer_in_recvd_out_size
  178. ,void *&reuse_ptr
  179. ,size_type sizeof_object);
  180. //!Returns the number of total units that a user buffer
  181. //!of "userbytes" bytes really occupies (including header)
  182. static size_type priv_get_total_units(size_type userbytes);
  183. static size_type priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes);
  184. size_type priv_block_end_offset() const;
  185. //!Returns next block if it's free.
  186. //!Returns 0 if next block is not free.
  187. block_ctrl *priv_next_block_if_free(block_ctrl *ptr);
  188. //!Check if this block is free (not allocated)
  189. bool priv_is_allocated_block(block_ctrl *ptr);
  190. //!Returns previous block's if it's free.
  191. //!Returns 0 if previous block is not free.
  192. std::pair<block_ctrl*, block_ctrl*> priv_prev_block_if_free(block_ctrl *ptr);
  193. //!Real expand function implementation
  194. bool priv_expand(void *ptr, size_type min_size, size_type &prefer_in_recvd_out_size);
  195. //!Real expand to both sides implementation
  196. void* priv_expand_both_sides(boost::interprocess::allocation_type command
  197. ,size_type min_size, size_type &prefer_in_recvd_out_size
  198. ,void *reuse_ptr
  199. ,bool only_preferred_backwards);
  200. //!Real private aligned allocation function
  201. //void* priv_allocate_aligned (size_type nbytes, size_type alignment);
  202. //!Checks if block has enough memory and splits/unlinks the block
  203. //!returning the address to the users
  204. void* priv_check_and_allocate(size_type units
  205. ,block_ctrl* prev
  206. ,block_ctrl* block
  207. ,size_type &received_size);
  208. //!Real deallocation algorithm
  209. void priv_deallocate(void *addr);
  210. //!Makes a new memory portion available for allocation
  211. void priv_add_segment(void *addr, size_type size);
  212. void priv_mark_new_allocated_block(block_ctrl *block);
  213. public:
  214. static const size_type Alignment = ::boost::container::dtl::alignment_of
  215. < ::boost::container::dtl::max_align_t>::value;
  216. private:
  217. static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
  218. static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
  219. static const size_type MinBlockUnits = BlockCtrlUnits;
  220. static const size_type MinBlockSize = MinBlockUnits*Alignment;
  221. static const size_type AllocatedCtrlBytes = BlockCtrlBytes;
  222. static const size_type AllocatedCtrlUnits = BlockCtrlUnits;
  223. static const size_type UsableByPreviousChunk = 0;
  224. public:
  225. static const size_type PayloadPerAllocation = BlockCtrlBytes;
  226. };
  227. template<class MutexFamily, class VoidPointer>
  228. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  229. simple_seq_fit_impl<MutexFamily, VoidPointer>
  230. ::priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes)
  231. {
  232. //First align "this" pointer
  233. size_type uint_this = (std::size_t)this_ptr;
  234. size_type uint_aligned_this = uint_this/Alignment*Alignment;
  235. size_type this_disalignment = (uint_this - uint_aligned_this);
  236. size_type block1_off =
  237. ipcdetail::get_rounded_size(sizeof(simple_seq_fit_impl) + extra_hdr_bytes + this_disalignment, Alignment)
  238. - this_disalignment;
  239. algo_impl_t::assert_alignment(this_disalignment + block1_off);
  240. return block1_off;
  241. }
  242. template<class MutexFamily, class VoidPointer>
  243. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  244. simple_seq_fit_impl<MutexFamily, VoidPointer>
  245. ::priv_block_end_offset() const
  246. {
  247. //First align "this" pointer
  248. size_type uint_this = (std::size_t)this;
  249. size_type uint_aligned_this = uint_this/Alignment*Alignment;
  250. size_type this_disalignment = (uint_this - uint_aligned_this);
  251. size_type old_end =
  252. ipcdetail::get_truncated_size(m_header.m_size + this_disalignment, Alignment)
  253. - this_disalignment;
  254. algo_impl_t::assert_alignment(old_end + this_disalignment);
  255. return old_end;
  256. }
  257. template<class MutexFamily, class VoidPointer>
  258. inline simple_seq_fit_impl<MutexFamily, VoidPointer>::
  259. simple_seq_fit_impl(size_type segment_size, size_type extra_hdr_bytes)
  260. {
  261. //Initialize sizes and counters
  262. m_header.m_allocated = 0;
  263. m_header.m_size = segment_size;
  264. m_header.m_extra_hdr_bytes = extra_hdr_bytes;
  265. //Initialize pointers
  266. size_type block1_off = priv_first_block_offset(this, extra_hdr_bytes);
  267. m_header.m_root.m_next = move_detail::force_ptr<block_ctrl*>
  268. ((reinterpret_cast<char*>(this) + block1_off));
  269. algo_impl_t::assert_alignment(ipcdetail::to_raw_pointer(m_header.m_root.m_next));
  270. m_header.m_root.m_next->m_size = (segment_size - block1_off)/Alignment;
  271. m_header.m_root.m_next->m_next = &m_header.m_root;
  272. }
  273. template<class MutexFamily, class VoidPointer>
  274. inline simple_seq_fit_impl<MutexFamily, VoidPointer>::~simple_seq_fit_impl()
  275. {
  276. //There is a memory leak!
  277. // BOOST_ASSERT(m_header.m_allocated == 0);
  278. // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
  279. }
  280. template<class MutexFamily, class VoidPointer>
  281. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::grow(size_type extra_size)
  282. {
  283. //Old highest address block's end offset
  284. size_type old_end = this->priv_block_end_offset();
  285. //Update managed buffer's size
  286. m_header.m_size += extra_size;
  287. //We need at least MinBlockSize blocks to create a new block
  288. if((m_header.m_size - old_end) < MinBlockSize){
  289. return;
  290. }
  291. //We'll create a new free block with extra_size bytes
  292. block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
  293. (reinterpret_cast<char*>(this) + old_end);
  294. algo_impl_t::assert_alignment(new_block);
  295. new_block->m_next = 0;
  296. new_block->m_size = (m_header.m_size - old_end)/Alignment;
  297. m_header.m_allocated += new_block->m_size*Alignment;
  298. this->priv_deallocate(priv_get_user_buffer(new_block));
  299. }
  300. template<class MutexFamily, class VoidPointer>
  301. void simple_seq_fit_impl<MutexFamily, VoidPointer>::shrink_to_fit()
  302. {
  303. //Get the root and the first memory block
  304. block_ctrl *prev = &m_header.m_root;
  305. block_ctrl *last = &m_header.m_root;
  306. block_ctrl *block = ipcdetail::to_raw_pointer(last->m_next);
  307. block_ctrl *root = &m_header.m_root;
  308. //No free block?
  309. if(block == root) return;
  310. //Iterate through the free block list
  311. while(block != root){
  312. prev = last;
  313. last = block;
  314. block = ipcdetail::to_raw_pointer(block->m_next);
  315. }
  316. char *last_free_end_address = reinterpret_cast<char*>(last) + last->m_size*Alignment;
  317. if(last_free_end_address != (reinterpret_cast<char*>(this) + priv_block_end_offset())){
  318. //there is an allocated block in the end of this block
  319. //so no shrinking is possible
  320. return;
  321. }
  322. //Check if have only 1 big free block
  323. void *unique_block = 0;
  324. if(!m_header.m_allocated){
  325. BOOST_ASSERT(prev == root);
  326. size_type ignore_recvd = 0;
  327. void *ignore_reuse = 0;
  328. unique_block = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
  329. if(!unique_block)
  330. return;
  331. last = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  332. BOOST_ASSERT(last_free_end_address == (reinterpret_cast<char*>(last) + last->m_size*Alignment));
  333. }
  334. size_type last_units = last->m_size;
  335. size_type received_size;
  336. void *addr = priv_check_and_allocate(last_units, prev, last, received_size);
  337. (void)addr;
  338. BOOST_ASSERT(addr);
  339. BOOST_ASSERT(received_size == last_units*Alignment - AllocatedCtrlBytes);
  340. //Shrink it
  341. m_header.m_size /= Alignment;
  342. m_header.m_size -= last->m_size;
  343. m_header.m_size *= Alignment;
  344. m_header.m_allocated -= last->m_size*Alignment;
  345. if(unique_block)
  346. priv_deallocate(unique_block);
  347. }
  348. template<class MutexFamily, class VoidPointer>
  349. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
  350. priv_mark_new_allocated_block(block_ctrl *new_block)
  351. {
  352. new_block->m_next = 0;
  353. }
  354. template<class MutexFamily, class VoidPointer>
  355. inline
  356. typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
  357. simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_get_block(const void *ptr)
  358. {
  359. return const_cast<block_ctrl*>(move_detail::force_ptr<const block_ctrl*>
  360. (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
  361. }
  362. template<class MutexFamily, class VoidPointer>
  363. inline
  364. void *simple_seq_fit_impl<MutexFamily, VoidPointer>::
  365. priv_get_user_buffer(const typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
  366. {
  367. return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes);
  368. }
  369. template<class MutexFamily, class VoidPointer>
  370. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_add_segment(void *addr, size_type segment_size)
  371. {
  372. algo_impl_t::assert_alignment(addr);
  373. //Check size
  374. BOOST_ASSERT(!(segment_size < MinBlockSize));
  375. if(segment_size < MinBlockSize)
  376. return;
  377. //Construct big block using the new segment
  378. block_ctrl *new_block = static_cast<block_ctrl *>(addr);
  379. new_block->m_size = segment_size/Alignment;
  380. new_block->m_next = 0;
  381. //Simulate this block was previously allocated
  382. m_header.m_allocated += new_block->m_size*Alignment;
  383. //Return block and insert it in the free block list
  384. this->priv_deallocate(priv_get_user_buffer(new_block));
  385. }
  386. template<class MutexFamily, class VoidPointer>
  387. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  388. simple_seq_fit_impl<MutexFamily, VoidPointer>::get_size() const
  389. { return m_header.m_size; }
  390. template<class MutexFamily, class VoidPointer>
  391. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  392. simple_seq_fit_impl<MutexFamily, VoidPointer>::get_free_memory() const
  393. {
  394. return m_header.m_size - m_header.m_allocated -
  395. algo_impl_t::multiple_of_units(sizeof(*this) + m_header.m_extra_hdr_bytes);
  396. }
  397. template<class MutexFamily, class VoidPointer>
  398. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  399. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  400. get_min_size (size_type extra_hdr_bytes)
  401. {
  402. return ipcdetail::get_rounded_size((size_type)sizeof(simple_seq_fit_impl),Alignment) +
  403. ipcdetail::get_rounded_size(extra_hdr_bytes,Alignment)
  404. + MinBlockSize;
  405. }
  406. template<class MutexFamily, class VoidPointer>
  407. inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
  408. all_memory_deallocated()
  409. {
  410. //-----------------------
  411. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  412. //-----------------------
  413. return m_header.m_allocated == 0 &&
  414. ipcdetail::to_raw_pointer(m_header.m_root.m_next->m_next) == &m_header.m_root;
  415. }
  416. template<class MutexFamily, class VoidPointer>
  417. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::zero_free_memory()
  418. {
  419. //-----------------------
  420. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  421. //-----------------------
  422. block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  423. //Iterate through all free portions
  424. do{
  425. //Just clear user the memory part reserved for the user
  426. std::memset( priv_get_user_buffer(block)
  427. , 0
  428. , block->get_user_bytes());
  429. block = ipcdetail::to_raw_pointer(block->m_next);
  430. }
  431. while(block != &m_header.m_root);
  432. }
  433. template<class MutexFamily, class VoidPointer>
  434. inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
  435. check_sanity()
  436. {
  437. //-----------------------
  438. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  439. //-----------------------
  440. block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  441. size_type free_memory = 0;
  442. //Iterate through all blocks obtaining their size
  443. while(block != &m_header.m_root){
  444. algo_impl_t::assert_alignment(block);
  445. if(!algo_impl_t::check_alignment(block))
  446. return false;
  447. //Free blocks's next must be always valid
  448. block_ctrl *next = ipcdetail::to_raw_pointer(block->m_next);
  449. if(!next){
  450. return false;
  451. }
  452. free_memory += block->m_size*Alignment;
  453. block = next;
  454. }
  455. //Check allocated bytes are less than size
  456. if(m_header.m_allocated > m_header.m_size){
  457. return false;
  458. }
  459. //Check free bytes are less than size
  460. if(free_memory > m_header.m_size){
  461. return false;
  462. }
  463. return true;
  464. }
  465. template<class MutexFamily, class VoidPointer>
  466. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  467. allocate(size_type nbytes)
  468. {
  469. //-----------------------
  470. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  471. //-----------------------
  472. size_type ignore_recvd = nbytes;
  473. void *ignore_reuse = 0;
  474. return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
  475. }
  476. template<class MutexFamily, class VoidPointer>
  477. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  478. allocate_aligned(size_type nbytes, size_type alignment)
  479. {
  480. //-----------------------
  481. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  482. //-----------------------
  483. return algo_impl_t::
  484. allocate_aligned(this, nbytes, alignment);
  485. }
  486. template<class MutexFamily, class VoidPointer>
  487. template<class T>
  488. inline T* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  489. allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  490. size_type &prefer_in_recvd_out_size, T *&reuse_ptr)
  491. {
  492. void *raw_reuse = reuse_ptr;
  493. void * const ret = priv_allocation_command
  494. (command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
  495. BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
  496. reuse_ptr = static_cast<T*>(raw_reuse);
  497. return static_cast<T*>(ret);
  498. }
  499. template<class MutexFamily, class VoidPointer>
  500. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  501. raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
  502. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
  503. {
  504. size_type const preferred_objects = prefer_in_recvd_out_size;
  505. if(!sizeof_object){
  506. return reuse_ptr = 0, static_cast<void*>(0);
  507. }
  508. if(command & boost::interprocess::try_shrink_in_place){
  509. if(!reuse_ptr) return static_cast<void*>(0);
  510. prefer_in_recvd_out_size = preferred_objects*sizeof_object;
  511. bool success = algo_impl_t::try_shrink
  512. ( this, reuse_ptr, limit_objects*sizeof_object, prefer_in_recvd_out_size);
  513. prefer_in_recvd_out_size /= sizeof_object;
  514. return success ? reuse_ptr : 0;
  515. }
  516. else{
  517. return priv_allocation_command
  518. (command, limit_objects, prefer_in_recvd_out_size, reuse_ptr, sizeof_object);
  519. }
  520. }
  521. template<class MutexFamily, class VoidPointer>
  522. inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  523. priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
  524. size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
  525. {
  526. size_type const preferred_size = prefer_in_recvd_out_size;
  527. command &= ~boost::interprocess::expand_bwd;
  528. if(!command){
  529. return reuse_ptr = 0, static_cast<void*>(0);
  530. }
  531. size_type max_count = m_header.m_size/sizeof_object;
  532. if(limit_size > max_count || preferred_size > max_count){
  533. return reuse_ptr = 0, static_cast<void*>(0);
  534. }
  535. size_type l_size = limit_size*sizeof_object;
  536. size_type r_size = preferred_size*sizeof_object;
  537. void *ret = 0;
  538. {
  539. //-----------------------
  540. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  541. //-----------------------
  542. ret = priv_allocate(command, l_size, r_size, reuse_ptr);
  543. }
  544. prefer_in_recvd_out_size = r_size/sizeof_object;
  545. return ret;
  546. }
  547. template<class MutexFamily, class VoidPointer>
  548. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  549. simple_seq_fit_impl<MutexFamily, VoidPointer>::size(const void *ptr) const
  550. {
  551. //We need no synchronization since this block is not going
  552. //to be modified
  553. //Obtain the real size of the block
  554. const block_ctrl *block = static_cast<const block_ctrl*>(priv_get_block(ptr));
  555. return block->get_user_bytes();
  556. }
  557. template<class MutexFamily, class VoidPointer>
  558. void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
  559. priv_expand_both_sides(boost::interprocess::allocation_type command
  560. ,size_type min_size
  561. ,size_type &prefer_in_recvd_out_size
  562. ,void *reuse_ptr
  563. ,bool only_preferred_backwards)
  564. {
  565. size_type const preferred_size = prefer_in_recvd_out_size;
  566. typedef std::pair<block_ctrl *, block_ctrl *> prev_block_t;
  567. block_ctrl *reuse = priv_get_block(reuse_ptr);
  568. prefer_in_recvd_out_size = 0;
  569. if(this->size(reuse_ptr) > min_size){
  570. prefer_in_recvd_out_size = this->size(reuse_ptr);
  571. return reuse_ptr;
  572. }
  573. if(command & boost::interprocess::expand_fwd){
  574. if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
  575. return reuse_ptr;
  576. }
  577. else{
  578. prefer_in_recvd_out_size = this->size(reuse_ptr);
  579. }
  580. if(command & boost::interprocess::expand_bwd){
  581. size_type extra_forward = !prefer_in_recvd_out_size ? 0 : prefer_in_recvd_out_size + BlockCtrlBytes;
  582. prev_block_t prev_pair = priv_prev_block_if_free(reuse);
  583. block_ctrl *prev = prev_pair.second;
  584. if(!prev){
  585. return 0;
  586. }
  587. size_type needs_backwards =
  588. ipcdetail::get_rounded_size(preferred_size - extra_forward, Alignment);
  589. if(!only_preferred_backwards){
  590. max_value(ipcdetail::get_rounded_size(min_size - extra_forward, Alignment)
  591. ,min_value(prev->get_user_bytes(), needs_backwards));
  592. }
  593. //Check if previous block has enough size
  594. if((prev->get_user_bytes()) >= needs_backwards){
  595. //Now take all next space. This will succeed
  596. if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, prefer_in_recvd_out_size)){
  597. BOOST_ASSERT(0);
  598. }
  599. //We need a minimum size to split the previous one
  600. if((prev->get_user_bytes() - needs_backwards) > 2*BlockCtrlBytes){
  601. block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
  602. (reinterpret_cast<char*>(reuse) - needs_backwards - BlockCtrlBytes);
  603. new_block->m_next = 0;
  604. new_block->m_size =
  605. BlockCtrlUnits + (needs_backwards + extra_forward)/Alignment;
  606. prev->m_size =
  607. (prev->get_total_bytes() - needs_backwards)/Alignment - BlockCtrlUnits;
  608. prefer_in_recvd_out_size = needs_backwards + extra_forward;
  609. m_header.m_allocated += needs_backwards + BlockCtrlBytes;
  610. return priv_get_user_buffer(new_block);
  611. }
  612. else{
  613. //Just merge the whole previous block
  614. block_ctrl *prev_2_block = prev_pair.first;
  615. //Update received size and allocation
  616. prefer_in_recvd_out_size = extra_forward + prev->get_user_bytes();
  617. m_header.m_allocated += prev->get_total_bytes();
  618. //Now unlink it from previous block
  619. prev_2_block->m_next = prev->m_next;
  620. prev->m_size = reuse->m_size + prev->m_size;
  621. prev->m_next = 0;
  622. priv_get_user_buffer(prev);
  623. }
  624. }
  625. }
  626. return 0;
  627. }
  628. template<class MutexFamily, class VoidPointer>
  629. inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
  630. deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_chain &chain)
  631. {
  632. //-----------------------
  633. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  634. //-----------------------
  635. while(!chain.empty()){
  636. this->priv_deallocate(to_raw_pointer(chain.pop_front()));
  637. }
  638. }
  639. template<class MutexFamily, class VoidPointer>
  640. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
  641. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  642. priv_get_total_units(size_type userbytes)
  643. {
  644. size_type s = ipcdetail::get_rounded_size(userbytes, Alignment)/Alignment;
  645. if(!s) ++s;
  646. return BlockCtrlUnits + s;
  647. }
  648. template<class MutexFamily, class VoidPointer>
  649. void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
  650. priv_allocate(boost::interprocess::allocation_type command
  651. ,size_type limit_size, size_type &prefer_in_recvd_out_size, void *&reuse_ptr)
  652. {
  653. size_type const preferred_size = prefer_in_recvd_out_size;
  654. if(command & boost::interprocess::shrink_in_place){
  655. if(!reuse_ptr) return static_cast<void*>(0);
  656. bool success = algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size);
  657. return success ? reuse_ptr : 0;
  658. }
  659. prefer_in_recvd_out_size = 0;
  660. if(limit_size > preferred_size){
  661. return reuse_ptr = 0, static_cast<void*>(0);
  662. }
  663. //Number of units to request (including block_ctrl header)
  664. size_type nunits = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment + BlockCtrlUnits;
  665. //Get the root and the first memory block
  666. block_ctrl *prev = &m_header.m_root;
  667. block_ctrl *block = ipcdetail::to_raw_pointer(prev->m_next);
  668. block_ctrl *root = &m_header.m_root;
  669. block_ctrl *biggest_block = 0;
  670. block_ctrl *prev_biggest_block = 0;
  671. size_type biggest_size = 0;
  672. //Expand in place
  673. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  674. void *ret = priv_expand_both_sides(command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, true);
  675. if(ret){
  676. algo_impl_t::assert_alignment(ret);
  677. return ret;
  678. }
  679. }
  680. if(command & boost::interprocess::allocate_new){
  681. prefer_in_recvd_out_size = 0;
  682. while(block != root){
  683. //Update biggest block pointers
  684. if(block->m_size > biggest_size){
  685. prev_biggest_block = prev;
  686. biggest_size = block->m_size;
  687. biggest_block = block;
  688. }
  689. algo_impl_t::assert_alignment(block);
  690. void *addr = this->priv_check_and_allocate(nunits, prev, block, prefer_in_recvd_out_size);
  691. if(addr){
  692. algo_impl_t::assert_alignment(addr);
  693. return reuse_ptr = 0, addr;
  694. }
  695. //Bad luck, let's check next block
  696. prev = block;
  697. block = ipcdetail::to_raw_pointer(block->m_next);
  698. }
  699. //Bad luck finding preferred_size, now if we have any biggest_block
  700. //try with this block
  701. if(biggest_block){
  702. size_type limit_units = ipcdetail::get_rounded_size(limit_size, Alignment)/Alignment + BlockCtrlUnits;
  703. if(biggest_block->m_size < limit_units){
  704. return reuse_ptr = 0, static_cast<void*>(0);
  705. }
  706. void *ret = this->priv_check_and_allocate
  707. (biggest_block->m_size, prev_biggest_block, biggest_block, prefer_in_recvd_out_size = biggest_block->m_size*Alignment - BlockCtrlUnits);
  708. BOOST_ASSERT(ret != 0);
  709. algo_impl_t::assert_alignment(ret);
  710. return reuse_ptr = 0, ret;
  711. }
  712. }
  713. //Now try to expand both sides with min size
  714. if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
  715. void *ret = priv_expand_both_sides (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false);
  716. algo_impl_t::assert_alignment(ret);
  717. return ret;
  718. }
  719. return reuse_ptr = 0, static_cast<void*>(0);
  720. }
  721. template<class MutexFamily, class VoidPointer> inline
  722. bool simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_is_allocated_block
  723. (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
  724. { return block->m_next == 0; }
  725. template<class MutexFamily, class VoidPointer>
  726. inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
  727. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  728. priv_next_block_if_free
  729. (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
  730. {
  731. //Take the address where the next block should go
  732. block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
  733. (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
  734. //Check if the adjacent block is in the managed segment
  735. char *this_char_ptr = reinterpret_cast<char*>(this);
  736. char *next_char_ptr = reinterpret_cast<char*>(next_block);
  737. size_type distance = (size_type)(next_char_ptr - this_char_ptr)/Alignment;
  738. if(distance >= (m_header.m_size/Alignment)){
  739. //"next_block" does not exist so we can't expand "block"
  740. return 0;
  741. }
  742. if(!next_block->m_next)
  743. return 0;
  744. return next_block;
  745. }
  746. template<class MutexFamily, class VoidPointer>
  747. inline
  748. std::pair<typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
  749. ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *>
  750. simple_seq_fit_impl<MutexFamily, VoidPointer>::
  751. priv_prev_block_if_free
  752. (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
  753. {
  754. typedef std::pair<block_ctrl *, block_ctrl *> prev_pair_t;
  755. //Take the address where the previous block should go
  756. block_ctrl *root = &m_header.m_root;
  757. block_ctrl *prev_2_block = root;
  758. block_ctrl *prev_block = ipcdetail::to_raw_pointer(root->m_next);
  759. while((reinterpret_cast<char*>(prev_block) + prev_block->m_size*Alignment)
  760. != reinterpret_cast<char*>(ptr)
  761. && prev_block != root){
  762. prev_2_block = prev_block;
  763. prev_block = ipcdetail::to_raw_pointer(prev_block->m_next);
  764. }
  765. if(prev_block == root || !prev_block->m_next)
  766. return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
  767. //Check if the previous block is in the managed segment
  768. char *this_char_ptr = reinterpret_cast<char*>(this);
  769. char *prev_char_ptr = reinterpret_cast<char*>(prev_block);
  770. size_type distance = (size_type)(prev_char_ptr - this_char_ptr)/Alignment;
  771. if(distance >= (m_header.m_size/Alignment)){
  772. //"previous_block" does not exist so we can't expand "block"
  773. return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
  774. }
  775. return prev_pair_t(prev_2_block, prev_block);
  776. }
  777. template<class MutexFamily, class VoidPointer>
  778. inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
  779. priv_expand (void *ptr, size_type min_size, size_type &received_size)
  780. {
  781. size_type preferred_size = received_size;
  782. //Obtain the real size of the block
  783. block_ctrl *block = move_detail::force_ptr<block_ctrl*>(priv_get_block(ptr));
  784. size_type old_block_size = block->m_size;
  785. //All used blocks' next is marked with 0 so check it
  786. BOOST_ASSERT(block->m_next == 0);
  787. //Put this to a safe value
  788. received_size = old_block_size*Alignment - BlockCtrlBytes;
  789. //Now translate it to Alignment units
  790. min_size = ipcdetail::get_rounded_size(min_size, Alignment)/Alignment;
  791. preferred_size = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment;
  792. //Some parameter checks
  793. if(min_size > preferred_size)
  794. return false;
  795. size_type data_size = old_block_size - BlockCtrlUnits;
  796. if(data_size >= min_size)
  797. return true;
  798. block_ctrl *next_block = priv_next_block_if_free(block);
  799. if(!next_block){
  800. return false;
  801. }
  802. //Is "block" + "next_block" big enough?
  803. size_type merged_size = old_block_size + next_block->m_size;
  804. //Now we can expand this block further than before
  805. received_size = merged_size*Alignment - BlockCtrlBytes;
  806. if(merged_size < (min_size + BlockCtrlUnits)){
  807. return false;
  808. }
  809. //We can fill expand. Merge both blocks,
  810. block->m_next = next_block->m_next;
  811. block->m_size = merged_size;
  812. //Find the previous free block of next_block
  813. block_ctrl *prev = &m_header.m_root;
  814. while(ipcdetail::to_raw_pointer(prev->m_next) != next_block){
  815. prev = ipcdetail::to_raw_pointer(prev->m_next);
  816. }
  817. //Now insert merged block in the free list
  818. //This allows reusing allocation logic in this function
  819. m_header.m_allocated -= old_block_size*Alignment;
  820. prev->m_next = block;
  821. //Now use check and allocate to do the allocation logic
  822. preferred_size += BlockCtrlUnits;
  823. size_type nunits = preferred_size < merged_size ? preferred_size : merged_size;
  824. //This must success since nunits is less than merged_size!
  825. if(!this->priv_check_and_allocate (nunits, prev, block, received_size)){
  826. //Something very ugly is happening here. This is a bug
  827. //or there is memory corruption
  828. BOOST_ASSERT(0);
  829. return false;
  830. }
  831. return true;
  832. }
  833. template<class MutexFamily, class VoidPointer> inline
  834. void* simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_check_and_allocate
  835. (size_type nunits
  836. ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* prev
  837. ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* block
  838. ,size_type &received_size)
  839. {
  840. size_type upper_nunits = nunits + BlockCtrlUnits;
  841. bool found = false;
  842. if (block->m_size > upper_nunits){
  843. //This block is bigger than needed, split it in
  844. //two blocks, the first's size will be "units"
  845. //the second's size will be "block->m_size-units"
  846. size_type total_size = block->m_size;
  847. block->m_size = nunits;
  848. block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
  849. (reinterpret_cast<char*>(block) + Alignment*nunits);
  850. new_block->m_size = total_size - nunits;
  851. new_block->m_next = block->m_next;
  852. prev->m_next = new_block;
  853. found = true;
  854. }
  855. else if (block->m_size >= nunits){
  856. //This block has exactly the right size with an extra
  857. //unusable extra bytes.
  858. prev->m_next = block->m_next;
  859. found = true;
  860. }
  861. if(found){
  862. //We need block_ctrl for deallocation stuff, so
  863. //return memory user can overwrite
  864. m_header.m_allocated += block->m_size*Alignment;
  865. received_size = block->get_user_bytes();
  866. //Mark the block as allocated
  867. block->m_next = 0;
  868. //Check alignment
  869. algo_impl_t::assert_alignment(block);
  870. return priv_get_user_buffer(block);
  871. }
  872. return 0;
  873. }
  874. template<class MutexFamily, class VoidPointer>
  875. void simple_seq_fit_impl<MutexFamily, VoidPointer>::deallocate(void* addr)
  876. {
  877. if(!addr) return;
  878. //-----------------------
  879. boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
  880. //-----------------------
  881. return this->priv_deallocate(addr);
  882. }
  883. template<class MutexFamily, class VoidPointer>
  884. void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_deallocate(void* addr)
  885. {
  886. if(!addr) return;
  887. //Let's get free block list. List is always sorted
  888. //by memory address to allow block merging.
  889. //Pointer next always points to the first
  890. //(lower address) block
  891. block_ctrl * prev = &m_header.m_root;
  892. block_ctrl * pos = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
  893. block_ctrl * block = move_detail::force_ptr<block_ctrl*>(priv_get_block(addr));
  894. //All used blocks' next is marked with 0 so check it
  895. BOOST_ASSERT(block->m_next == 0);
  896. //Check if alignment and block size are right
  897. algo_impl_t::assert_alignment(addr);
  898. size_type total_size = Alignment*block->m_size;
  899. BOOST_ASSERT(m_header.m_allocated >= total_size);
  900. //Update used memory count
  901. m_header.m_allocated -= total_size;
  902. //Let's find the previous and the next block of the block to deallocate
  903. //This ordering comparison must be done with original pointers
  904. //types since their mapping to raw pointers can be different
  905. //in each process
  906. while((ipcdetail::to_raw_pointer(pos) != &m_header.m_root) && (block > pos)){
  907. prev = pos;
  908. pos = ipcdetail::to_raw_pointer(pos->m_next);
  909. }
  910. //Try to combine with upper block
  911. char *block_char_ptr = reinterpret_cast<char*>(ipcdetail::to_raw_pointer(block));
  912. if ((block_char_ptr + Alignment*block->m_size) ==
  913. reinterpret_cast<char*>(ipcdetail::to_raw_pointer(pos))){
  914. block->m_size += pos->m_size;
  915. block->m_next = pos->m_next;
  916. }
  917. else{
  918. block->m_next = pos;
  919. }
  920. //Try to combine with lower block
  921. if ((reinterpret_cast<char*>(ipcdetail::to_raw_pointer(prev))
  922. + Alignment*prev->m_size) ==
  923. block_char_ptr){
  924. prev->m_size += block->m_size;
  925. prev->m_next = block->m_next;
  926. }
  927. else{
  928. prev->m_next = block;
  929. }
  930. }
  931. } //namespace ipcdetail {
  932. } //namespace interprocess {
  933. } //namespace boost {
  934. #include <boost/interprocess/detail/config_end.hpp>
  935. #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP