mem_algo_common.hpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
  11. #define BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. // interprocess
  22. #include <boost/interprocess/interprocess_fwd.hpp>
  23. #include <boost/interprocess/containers/allocation_type.hpp>
  24. // interprocess/detail
  25. #include <boost/interprocess/detail/math_functions.hpp>
  26. #include <boost/interprocess/detail/min_max.hpp>
  27. #include <boost/interprocess/detail/type_traits.hpp>
  28. #include <boost/interprocess/detail/utilities.hpp>
  29. // container/detail
  30. #include <boost/container/detail/multiallocation_chain.hpp>
  31. #include <boost/container/detail/placement_new.hpp>
  32. // move
  33. #include <boost/move/utility_core.hpp>
  34. // move/detail
  35. #include <boost/move/detail/force_ptr.hpp>
  36. // other boost
  37. #include <boost/static_assert.hpp>
  38. #include <boost/assert.hpp>
  39. //!\file
  40. //!Implements common operations for memory algorithms.
  41. namespace boost {
  42. namespace interprocess {
  43. namespace ipcdetail {
  44. template<class VoidPointer>
  45. class basic_multiallocation_chain
  46. : public boost::container::dtl::
  47. basic_multiallocation_chain<VoidPointer>
  48. {
  49. BOOST_MOVABLE_BUT_NOT_COPYABLE(basic_multiallocation_chain)
  50. typedef boost::container::dtl::
  51. basic_multiallocation_chain<VoidPointer> base_t;
  52. public:
  53. basic_multiallocation_chain()
  54. : base_t()
  55. {}
  56. basic_multiallocation_chain(BOOST_RV_REF(basic_multiallocation_chain) other)
  57. : base_t(::boost::move(static_cast<base_t&>(other)))
  58. {}
  59. basic_multiallocation_chain& operator=(BOOST_RV_REF(basic_multiallocation_chain) other)
  60. {
  61. this->base_t::operator=(::boost::move(static_cast<base_t&>(other)));
  62. return *this;
  63. }
  64. void *pop_front()
  65. {
  66. return boost::interprocess::ipcdetail::to_raw_pointer(this->base_t::pop_front());
  67. }
  68. };
  69. //!This class implements several allocation functions shared by different algorithms
  70. //!(aligned allocation, multiple allocation...).
  71. template<class MemoryAlgorithm>
  72. class memory_algorithm_common
  73. {
  74. public:
  75. typedef typename MemoryAlgorithm::void_pointer void_pointer;
  76. typedef typename MemoryAlgorithm::block_ctrl block_ctrl;
  77. typedef typename MemoryAlgorithm::multiallocation_chain multiallocation_chain;
  78. typedef memory_algorithm_common<MemoryAlgorithm> this_type;
  79. typedef typename MemoryAlgorithm::size_type size_type;
  80. static const size_type Alignment = MemoryAlgorithm::Alignment;
  81. static const size_type MinBlockUnits = MemoryAlgorithm::MinBlockUnits;
  82. static const size_type AllocatedCtrlBytes = MemoryAlgorithm::AllocatedCtrlBytes;
  83. static const size_type AllocatedCtrlUnits = MemoryAlgorithm::AllocatedCtrlUnits;
  84. static const size_type BlockCtrlBytes = MemoryAlgorithm::BlockCtrlBytes;
  85. static const size_type BlockCtrlUnits = MemoryAlgorithm::BlockCtrlUnits;
  86. static const size_type UsableByPreviousChunk = MemoryAlgorithm::UsableByPreviousChunk;
  87. static void assert_alignment(const void *ptr)
  88. { assert_alignment((std::size_t)ptr); }
  89. static void assert_alignment(size_type uint_ptr)
  90. {
  91. (void)uint_ptr;
  92. BOOST_ASSERT(uint_ptr % Alignment == 0);
  93. }
  94. static bool check_alignment(const void *ptr)
  95. { return (((std::size_t)ptr) % Alignment == 0); }
  96. static size_type ceil_units(size_type size)
  97. { return get_rounded_size(size, Alignment)/Alignment; }
  98. static size_type floor_units(size_type size)
  99. { return size/Alignment; }
  100. static size_type multiple_of_units(size_type size)
  101. { return get_rounded_size(size, Alignment); }
  102. static void allocate_many
  103. (MemoryAlgorithm *memory_algo, size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
  104. {
  105. return this_type::priv_allocate_many(memory_algo, &elem_bytes, n_elements, 0, chain);
  106. }
  107. static void deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
  108. {
  109. return this_type::priv_deallocate_many(memory_algo, chain);
  110. }
  111. static bool calculate_lcm_and_needs_backwards_lcmed
  112. (size_type backwards_multiple, size_type received_size, size_type size_to_achieve,
  113. size_type &lcm_out, size_type &needs_backwards_lcmed_out)
  114. {
  115. // Now calculate lcm_val
  116. size_type max = backwards_multiple;
  117. size_type min = Alignment;
  118. size_type needs_backwards;
  119. size_type needs_backwards_lcmed;
  120. size_type lcm_val;
  121. size_type current_forward;
  122. //Swap if necessary
  123. if(max < min){
  124. size_type tmp = min;
  125. min = max;
  126. max = tmp;
  127. }
  128. //Check if it's power of two
  129. if((backwards_multiple & (backwards_multiple-1)) == 0){
  130. if(0 != (size_to_achieve & ((backwards_multiple-1)))){
  131. return false;
  132. }
  133. lcm_val = max;
  134. //If we want to use minbytes data to get a buffer between maxbytes
  135. //and minbytes if maxbytes can't be achieved, calculate the
  136. //biggest of all possibilities
  137. current_forward = get_truncated_size_po2(received_size, backwards_multiple);
  138. needs_backwards = size_to_achieve - current_forward;
  139. BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
  140. needs_backwards_lcmed = get_rounded_size_po2(needs_backwards, lcm_val);
  141. lcm_out = lcm_val;
  142. needs_backwards_lcmed_out = needs_backwards_lcmed;
  143. return true;
  144. }
  145. //Check if it's multiple of alignment
  146. else if((backwards_multiple & (Alignment - 1u)) == 0){
  147. lcm_val = backwards_multiple;
  148. current_forward = get_truncated_size(received_size, backwards_multiple);
  149. //No need to round needs_backwards because backwards_multiple == lcm_val
  150. needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
  151. BOOST_ASSERT((needs_backwards_lcmed & (Alignment - 1u)) == 0);
  152. lcm_out = lcm_val;
  153. needs_backwards_lcmed_out = needs_backwards_lcmed;
  154. return true;
  155. }
  156. //Check if it's multiple of the half of the alignmment
  157. else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){
  158. lcm_val = backwards_multiple*2u;
  159. current_forward = get_truncated_size(received_size, backwards_multiple);
  160. needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
  161. if(0 != (needs_backwards_lcmed & (Alignment-1)))
  162. //while(0 != (needs_backwards_lcmed & (Alignment-1)))
  163. needs_backwards_lcmed += backwards_multiple;
  164. BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
  165. lcm_out = lcm_val;
  166. needs_backwards_lcmed_out = needs_backwards_lcmed;
  167. return true;
  168. }
  169. //Check if it's multiple of the quarter of the alignmment
  170. else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){
  171. size_type remainder;
  172. lcm_val = backwards_multiple*4u;
  173. current_forward = get_truncated_size(received_size, backwards_multiple);
  174. needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
  175. //while(0 != (needs_backwards_lcmed & (Alignment-1)))
  176. //needs_backwards_lcmed += backwards_multiple;
  177. if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){
  178. if(backwards_multiple & Alignment/2u){
  179. needs_backwards_lcmed += (remainder)*backwards_multiple;
  180. }
  181. else{
  182. needs_backwards_lcmed += (4-remainder)*backwards_multiple;
  183. }
  184. }
  185. BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
  186. lcm_out = lcm_val;
  187. needs_backwards_lcmed_out = needs_backwards_lcmed;
  188. return true;
  189. }
  190. else{
  191. lcm_val = lcm(max, min);
  192. }
  193. //If we want to use minbytes data to get a buffer between maxbytes
  194. //and minbytes if maxbytes can't be achieved, calculate the
  195. //biggest of all possibilities
  196. current_forward = get_truncated_size(received_size, backwards_multiple);
  197. needs_backwards = size_to_achieve - current_forward;
  198. BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
  199. needs_backwards_lcmed = get_rounded_size(needs_backwards, lcm_val);
  200. lcm_out = lcm_val;
  201. needs_backwards_lcmed_out = needs_backwards_lcmed;
  202. return true;
  203. }
  204. static void allocate_many
  205. ( MemoryAlgorithm *memory_algo
  206. , const size_type *elem_sizes
  207. , size_type n_elements
  208. , size_type sizeof_element
  209. , multiallocation_chain &chain)
  210. {
  211. this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element, chain);
  212. }
  213. static void* allocate_aligned
  214. (MemoryAlgorithm *memory_algo, size_type nbytes, size_type alignment)
  215. {
  216. //Ensure power of 2
  217. if ((alignment & (alignment - size_type(1u))) != 0){
  218. //Alignment is not power of two
  219. BOOST_ASSERT((alignment & (alignment - size_type(1u))) == 0);
  220. return 0;
  221. }
  222. size_type real_size = nbytes;
  223. if(alignment <= Alignment){
  224. void *ignore_reuse = 0;
  225. return memory_algo->priv_allocate
  226. (boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
  227. }
  228. if(nbytes > UsableByPreviousChunk)
  229. nbytes -= UsableByPreviousChunk;
  230. //We can find a aligned portion if we allocate a block that has alignment
  231. //nbytes + alignment bytes or more.
  232. size_type minimum_allocation = max_value
  233. (nbytes + alignment, size_type(MinBlockUnits*Alignment));
  234. //Since we will split that block, we must request a bit more memory
  235. //if the alignment is near the beginning of the buffer, because otherwise,
  236. //there is no space for a new block before the alignment.
  237. //
  238. // ____ Aligned here
  239. // |
  240. // -----------------------------------------------------
  241. // | MBU |
  242. // -----------------------------------------------------
  243. size_type request =
  244. minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes
  245. //prevsize - UsableByPreviousChunk
  246. );
  247. //Now allocate the buffer
  248. real_size = request;
  249. void *ignore_reuse = 0;
  250. void *buffer = memory_algo->priv_allocate(boost::interprocess::allocate_new, request, real_size, ignore_reuse);
  251. if(!buffer){
  252. return 0;
  253. }
  254. else if ((((std::size_t)(buffer)) % alignment) == 0){
  255. //If we are lucky and the buffer is aligned, just split it and
  256. //return the high part
  257. block_ctrl *first = memory_algo->priv_get_block(buffer);
  258. size_type old_size = first->m_size;
  259. const size_type first_min_units =
  260. max_value(ceil_units(nbytes) + AllocatedCtrlUnits, size_type(MinBlockUnits));
  261. //We can create a new block in the end of the segment
  262. if(old_size >= (first_min_units + MinBlockUnits)){
  263. block_ctrl *second = move_detail::force_ptr<block_ctrl*>
  264. (reinterpret_cast<char*>(first) + Alignment*first_min_units);
  265. first->m_size = first_min_units & block_ctrl::size_mask;
  266. second->m_size = (old_size - first->m_size) & block_ctrl::size_mask;
  267. BOOST_ASSERT(second->m_size >= MinBlockUnits);
  268. memory_algo->priv_mark_new_allocated_block(first);
  269. memory_algo->priv_mark_new_allocated_block(second);
  270. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(second));
  271. }
  272. return buffer;
  273. }
  274. //Buffer not aligned, find the aligned part.
  275. //
  276. // ____ Aligned here
  277. // |
  278. // -----------------------------------------------------
  279. // | MBU +more | ACB |
  280. // -----------------------------------------------------
  281. char *pos = reinterpret_cast<char*>
  282. (reinterpret_cast<std::size_t>(static_cast<char*>(buffer) +
  283. //This is the minimum size of (2)
  284. (MinBlockUnits*Alignment - AllocatedCtrlBytes) +
  285. //This is the next MBU for the aligned memory
  286. AllocatedCtrlBytes +
  287. //This is the alignment trick
  288. alignment - 1) & -alignment);
  289. //Now obtain the address of the blocks
  290. block_ctrl *first = memory_algo->priv_get_block(buffer);
  291. block_ctrl *second = memory_algo->priv_get_block(pos);
  292. BOOST_ASSERT(pos <= (reinterpret_cast<char*>(first) + first->m_size*Alignment));
  293. BOOST_ASSERT(first->m_size >= 2*MinBlockUnits);
  294. BOOST_ASSERT((pos + MinBlockUnits*Alignment - AllocatedCtrlBytes + nbytes*Alignment/Alignment) <=
  295. (reinterpret_cast<char*>(first) + first->m_size*Alignment));
  296. //Set the new size of the first block
  297. size_type old_size = first->m_size;
  298. first->m_size = size_type(size_type(reinterpret_cast<char*>(second) - reinterpret_cast<char*>(first))/Alignment
  299. & block_ctrl::size_mask);
  300. memory_algo->priv_mark_new_allocated_block(first);
  301. //Now check if we can create a new buffer in the end
  302. //
  303. // __"second" block
  304. // | __Aligned here
  305. // | | __"third" block
  306. // -----------|-----|-----|------------------------------
  307. // | MBU +more | ACB | (3) | BCU |
  308. // -----------------------------------------------------
  309. //This size will be the minimum size to be able to create a
  310. //new block in the end.
  311. const size_type second_min_units = max_value(size_type(MinBlockUnits),
  312. ceil_units(nbytes) + AllocatedCtrlUnits );
  313. //Check if we can create a new block (of size MinBlockUnits) in the end of the segment
  314. if((old_size - first->m_size) >= (second_min_units + MinBlockUnits)){
  315. //Now obtain the address of the end block
  316. block_ctrl *third = new (reinterpret_cast<char*>(second) + Alignment*second_min_units)block_ctrl;
  317. second->m_size = second_min_units & block_ctrl::size_mask;
  318. third->m_size = (old_size - first->m_size - second->m_size) & block_ctrl::size_mask;
  319. BOOST_ASSERT(third->m_size >= MinBlockUnits);
  320. memory_algo->priv_mark_new_allocated_block(second);
  321. memory_algo->priv_mark_new_allocated_block(third);
  322. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(third));
  323. }
  324. else{
  325. second->m_size = (old_size - first->m_size) & block_ctrl::size_mask;
  326. BOOST_ASSERT(second->m_size >= MinBlockUnits);
  327. memory_algo->priv_mark_new_allocated_block(second);
  328. }
  329. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(first));
  330. return memory_algo->priv_get_user_buffer(second);
  331. }
  332. static bool try_shrink
  333. (MemoryAlgorithm *memory_algo, void *ptr
  334. ,const size_type max_size, size_type &received_size)
  335. {
  336. size_type const preferred_size = received_size;
  337. (void)memory_algo;
  338. //Obtain the real block
  339. block_ctrl *block = memory_algo->priv_get_block(ptr);
  340. size_type old_block_units = (size_type)block->m_size;
  341. //The block must be marked as allocated
  342. BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
  343. //Check if alignment and block size are right
  344. assert_alignment(ptr);
  345. //Put this to a safe value
  346. received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
  347. //Now translate it to Alignment units
  348. const size_type max_user_units = floor_units(max_size - UsableByPreviousChunk);
  349. const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
  350. //Check if rounded max and preferred are possible correct
  351. if(max_user_units < preferred_user_units)
  352. return false;
  353. //Check if the block is smaller than the requested minimum
  354. size_type old_user_units = old_block_units - AllocatedCtrlUnits;
  355. if(old_user_units < preferred_user_units)
  356. return false;
  357. //If the block is smaller than the requested minimum
  358. if(old_user_units == preferred_user_units)
  359. return true;
  360. size_type shrunk_user_units =
  361. ((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
  362. ? (BlockCtrlUnits - AllocatedCtrlUnits)
  363. : preferred_user_units;
  364. //Some parameter checks
  365. if(max_user_units < shrunk_user_units)
  366. return false;
  367. //We must be able to create at least a new empty block
  368. if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
  369. return false;
  370. }
  371. //Update new size
  372. received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
  373. return true;
  374. }
  375. static bool shrink
  376. (MemoryAlgorithm *memory_algo, void *ptr
  377. ,const size_type max_size, size_type &received_size)
  378. {
  379. size_type const preferred_size = received_size;
  380. //Obtain the real block
  381. block_ctrl *block = memory_algo->priv_get_block(ptr);
  382. size_type old_block_units = (size_type)block->m_size;
  383. if(!try_shrink(memory_algo, ptr, max_size, received_size)){
  384. return false;
  385. }
  386. //Check if the old size was just the shrunk size (no splitting)
  387. if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
  388. return true;
  389. //Now we can just rewrite the size of the old buffer
  390. block->m_size = ((received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits) & block_ctrl::size_mask;
  391. BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
  392. //We create the new block
  393. block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
  394. (reinterpret_cast<char*>(block) + block->m_size*Alignment);
  395. //Write control data to simulate this new block was previously allocated
  396. //and deallocate it
  397. new_block->m_size = (old_block_units - block->m_size) & block_ctrl::size_mask;
  398. BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
  399. memory_algo->priv_mark_new_allocated_block(block);
  400. memory_algo->priv_mark_new_allocated_block(new_block);
  401. memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
  402. return true;
  403. }
  404. private:
  405. static void priv_allocate_many
  406. ( MemoryAlgorithm *memory_algo
  407. , const size_type *elem_sizes
  408. , size_type n_elements
  409. , size_type sizeof_element
  410. , multiallocation_chain &chain)
  411. {
  412. //Note: sizeof_element == 0 indicates that we want to
  413. //allocate n_elements of the same size "*elem_sizes"
  414. //Calculate the total size of all requests
  415. size_type total_request_units = 0;
  416. size_type elem_units = 0;
  417. const size_type ptr_size_units = memory_algo->priv_get_total_units(sizeof(void_pointer));
  418. if(!sizeof_element){
  419. elem_units = memory_algo->priv_get_total_units(*elem_sizes);
  420. elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
  421. total_request_units = n_elements*elem_units;
  422. }
  423. else{
  424. for(size_type i = 0; i < n_elements; ++i){
  425. if(multiplication_overflows(elem_sizes[i], sizeof_element)){
  426. total_request_units = 0;
  427. break;
  428. }
  429. elem_units = memory_algo->priv_get_total_units(elem_sizes[i]*sizeof_element);
  430. elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
  431. if(sum_overflows(total_request_units, elem_units)){
  432. total_request_units = 0;
  433. break;
  434. }
  435. total_request_units += elem_units;
  436. }
  437. }
  438. if(total_request_units && !multiplication_overflows(total_request_units, Alignment)){
  439. size_type low_idx = 0;
  440. while(low_idx < n_elements){
  441. size_type total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
  442. size_type min_allocation = (!sizeof_element)
  443. ? elem_units
  444. : memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
  445. min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
  446. size_type received_size = total_bytes;
  447. void *ignore_reuse = 0;
  448. void *ret = memory_algo->priv_allocate
  449. (boost::interprocess::allocate_new, min_allocation, received_size, ignore_reuse);
  450. if(!ret){
  451. break;
  452. }
  453. block_ctrl *block = memory_algo->priv_get_block(ret);
  454. size_type received_units = (size_type)block->m_size;
  455. char *block_address = reinterpret_cast<char*>(block);
  456. size_type total_used_units = 0;
  457. while(total_used_units < received_units){
  458. if(sizeof_element){
  459. elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
  460. elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
  461. }
  462. if(total_used_units + elem_units > received_units)
  463. break;
  464. total_request_units -= elem_units;
  465. //This is the position where the new block must be created
  466. block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>(block_address);
  467. assert_alignment(new_block);
  468. //The last block should take all the remaining space
  469. if((low_idx + 1) == n_elements ||
  470. (total_used_units + elem_units +
  471. ((!sizeof_element)
  472. ? elem_units
  473. : max_value(memory_algo->priv_get_total_units(elem_sizes[low_idx+1]*sizeof_element), ptr_size_units))
  474. > received_units)){
  475. //By default, the new block will use the rest of the buffer
  476. new_block->m_size = (received_units - total_used_units) & block_ctrl::size_mask;
  477. memory_algo->priv_mark_new_allocated_block(new_block);
  478. //If the remaining units are bigger than needed and we can
  479. //split it obtaining a new free memory block do it.
  480. if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){
  481. size_type shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
  482. size_type shrunk_received = shrunk_request;
  483. bool shrink_ok = shrink
  484. (memory_algo
  485. ,memory_algo->priv_get_user_buffer(new_block)
  486. ,shrunk_request
  487. ,shrunk_received);
  488. (void)shrink_ok;
  489. //Shrink must always succeed with passed parameters
  490. BOOST_ASSERT(shrink_ok);
  491. //Some sanity checks
  492. BOOST_ASSERT(shrunk_request == shrunk_received);
  493. BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits));
  494. //"new_block->m_size" must have been reduced to elem_units by "shrink"
  495. BOOST_ASSERT(new_block->m_size == elem_units);
  496. //Now update the total received units with the reduction
  497. received_units = elem_units + total_used_units;
  498. }
  499. }
  500. else{
  501. new_block->m_size = elem_units & block_ctrl::size_mask;
  502. memory_algo->priv_mark_new_allocated_block(new_block);
  503. }
  504. block_address += new_block->m_size*Alignment;
  505. total_used_units += (size_type)new_block->m_size;
  506. //Check we have enough room to overwrite the intrusive pointer
  507. BOOST_ASSERT((new_block->m_size*Alignment - AllocatedCtrlUnits) >= sizeof(void_pointer));
  508. void_pointer p = ::new(memory_algo->priv_get_user_buffer(new_block), boost_container_new_t())void_pointer(0);
  509. chain.push_back(p);
  510. ++low_idx;
  511. }
  512. //Sanity check
  513. BOOST_ASSERT(total_used_units == received_units);
  514. }
  515. if(low_idx != n_elements){
  516. priv_deallocate_many(memory_algo, chain);
  517. }
  518. }
  519. }
  520. static void priv_deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
  521. {
  522. while(!chain.empty()){
  523. memory_algo->priv_deallocate(to_raw_pointer(chain.pop_front()));
  524. }
  525. }
  526. };
  527. } //namespace ipcdetail {
  528. } //namespace interprocess {
  529. } //namespace boost {
  530. #include <boost/interprocess/detail/config_end.hpp>
  531. #endif //#ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP