123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359 |
- //////////////////////////////////////////////////////////////////////////////
- //
- // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
- // Software License, Version 1.0. (See accompanying file
- // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
- //
- // See http://www.boost.org/libs/interprocess for documentation.
- //
- //////////////////////////////////////////////////////////////////////////////
- #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
- #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
- #ifndef BOOST_CONFIG_HPP
- # include <boost/config.hpp>
- #endif
- #
- #if defined(BOOST_HAS_PRAGMA_ONCE)
- # pragma once
- #endif
- #include <boost/interprocess/detail/config_begin.hpp>
- #include <boost/interprocess/detail/workaround.hpp>
- // interprocess
- #include <boost/interprocess/containers/allocation_type.hpp>
- #include <boost/interprocess/exceptions.hpp>
- #include <boost/interprocess/interprocess_fwd.hpp>
- #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
- #include <boost/interprocess/offset_ptr.hpp>
- #include <boost/interprocess/sync/scoped_lock.hpp>
- // interprocess/detail
- #include <boost/interprocess/detail/min_max.hpp>
- #include <boost/interprocess/detail/math_functions.hpp>
- #include <boost/interprocess/detail/type_traits.hpp>
- #include <boost/interprocess/detail/utilities.hpp>
- // container
- #include <boost/container/detail/multiallocation_chain.hpp>
- // container/detail
- #include <boost/container/detail/placement_new.hpp>
- // move/detail
- #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
- #include <boost/move/detail/force_ptr.hpp> //make_unsigned, alignment_of
- // intrusive
- #include <boost/intrusive/pointer_traits.hpp>
- #include <boost/intrusive/set.hpp>
- // other boost
- #include <boost/assert.hpp>
- #include <boost/static_assert.hpp>
- // std
- #include <climits>
- #include <cstring>
- //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
- //to maintain ABI compatible with the original version
- //ABI had to be updated to fix compatibility issues when
- //sharing shared memory between 32 adn 64 bit processes.
- //!\file
- //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
- //!objects in shared memory. This class is intended as a base class for single segment
- //!and multi-segment implementations.
- namespace boost {
- namespace interprocess {
- //!This class implements an algorithm that stores the free nodes in a red-black tree
- //!to have logarithmic search/insert times.
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- class rbtree_best_fit
- {
- #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
- //Non-copyable
- rbtree_best_fit();
- rbtree_best_fit(const rbtree_best_fit &);
- rbtree_best_fit &operator=(const rbtree_best_fit &);
- private:
- struct block_ctrl;
- typedef typename boost::intrusive::
- pointer_traits<VoidPointer>::template
- rebind_pointer<block_ctrl>::type block_ctrl_ptr;
- typedef typename boost::intrusive::
- pointer_traits<VoidPointer>::template
- rebind_pointer<char>::type char_ptr;
- #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
- public:
- //!Shared mutex family used for the rest of the Interprocess framework
- typedef MutexFamily mutex_family;
- //!Pointer type to be used with the rest of the Interprocess framework
- typedef VoidPointer void_pointer;
- typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
- typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
- typedef typename boost::container::dtl::make_unsigned<difference_type>::type size_type;
- #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
- private:
- typedef typename bi::make_set_base_hook
- < bi::void_pointer<VoidPointer>
- , bi::optimize_size<true>
- , bi::link_mode<bi::normal_link> >::type TreeHook;
- struct SizeHolder
- {
- static const size_type size_mask = size_type(-1) >> 2;
- //!This block's memory size (including block_ctrl
- //!header) in Alignment units
- size_type m_prev_size;
- size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
- size_type m_prev_allocated : 1;
- size_type m_allocated : 1;
- };
- //!Block control structure
- struct block_ctrl
- : public SizeHolder, public TreeHook
- {
- block_ctrl()
- { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
- friend bool operator<(const block_ctrl &a, const block_ctrl &b)
- { return a.m_size < b.m_size; }
- friend bool operator==(const block_ctrl &a, const block_ctrl &b)
- { return a.m_size == b.m_size; }
- };
- struct size_block_ctrl_compare
- {
- bool operator()(size_type size, const block_ctrl &block) const
- { return size < block.m_size; }
- bool operator()(const block_ctrl &block, size_type size) const
- { return block.m_size < size; }
- };
- //!Shared mutex to protect memory allocate/deallocate
- typedef typename MutexFamily::mutex_type mutex_type;
- typedef typename bi::make_multiset
- <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
- typedef typename Imultiset::iterator imultiset_iterator;
- typedef typename Imultiset::const_iterator imultiset_const_iterator;
- //!This struct includes needed data and derives from
- //!mutex_type to allow EBO when using null mutex_type
- struct header_t : public mutex_type
- {
- Imultiset m_imultiset;
- //!The extra size required by the segment
- size_type m_extra_hdr_bytes;
- //!Allocated bytes for internal checking
- size_type m_allocated;
- //!The size of the memory segment
- size_type m_size;
- } m_header;
- friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
- typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
- public:
- #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
- //!Constructor. "size" is the total size of the managed memory segment,
- //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
- //!offset that the allocator should not use at all.
- rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
- //!Destructor.
- ~rbtree_best_fit();
- //!Obtains the minimum size needed by the algorithm
- static size_type get_min_size (size_type extra_hdr_bytes);
- //Functions for single segment management
- //!Allocates bytes, returns 0 if there is not more memory
- void* allocate (size_type nbytes);
- #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
- //Experimental. Dont' use
- //!Multiple element allocation, same size
- void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
- }
- //!Multiple element allocation, different size
- void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
- }
- //!Multiple element allocation, different size
- void deallocate_many(multiallocation_chain &chain);
- #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
- //!Deallocates previously allocated bytes
- void deallocate (void *addr);
- //!Returns the size of the memory segment
- size_type get_size() const;
- //!Returns the number of free bytes of the segment
- size_type get_free_memory() const;
- //!Initializes to zero all the memory that's not in use.
- //!This function is normally used for security reasons.
- void zero_free_memory();
- //!Increases managed memory in
- //!extra_size bytes more
- void grow(size_type extra_size);
- //!Decreases managed memory as much as possible
- void shrink_to_fit();
- //!Returns true if all allocated memory has been deallocated
- bool all_memory_deallocated();
- //!Makes an internal sanity check
- //!and returns true if success
- bool check_sanity();
- template<class T>
- T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
- size_type &prefer_in_recvd_out_size, T *&reuse);
- void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
- size_type &prefer_in_recvd_out_size,
- void *&reuse_ptr, size_type sizeof_object = 1);
- //!Returns the size of the buffer previously allocated pointed by ptr
- size_type size(const void *ptr) const;
- //!Allocates aligned bytes, returns 0 if there is not more memory.
- //!Alignment must be power of 2
- void* allocate_aligned (size_type nbytes, size_type alignment);
- #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
- private:
- static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
- block_ctrl *priv_first_block();
- block_ctrl *priv_end_block();
- void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
- size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
- //!Real allocation algorithm with min allocation option
- void * priv_allocate( boost::interprocess::allocation_type command
- , size_type limit_size, size_type &prefer_in_recvd_out_size
- , void *&reuse_ptr, size_type backwards_multiple = 1);
- //!Obtains the block control structure of the user buffer
- static block_ctrl *priv_get_block(const void *ptr);
- //!Obtains the pointer returned to the user from the block control
- static void *priv_get_user_buffer(const block_ctrl *block);
- //!Returns the number of total units that a user buffer
- //!of "userbytes" bytes really occupies (including header)
- static size_type priv_get_total_units(size_type userbytes);
- //!Real expand function implementation
- bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
- //!Real expand to both sides implementation
- void* priv_expand_both_sides(boost::interprocess::allocation_type command
- ,size_type min_size
- ,size_type &prefer_in_recvd_out_size
- ,void *reuse_ptr
- ,bool only_preferred_backwards
- ,size_type backwards_multiple);
- //!Returns true if the previous block is allocated
- bool priv_is_prev_allocated(block_ctrl *ptr);
- //!Get a pointer of the "end" block from the first block of the segment
- static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
- //!Get a pointer of the "first" block from the end block of the segment
- static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
- //!Get poitner of the previous block (previous block must be free)
- static block_ctrl * priv_prev_block(block_ctrl *ptr);
- //!Get the size in the tail of the previous block
- static block_ctrl * priv_next_block(block_ctrl *ptr);
- //!Check if this block is free (not allocated)
- bool priv_is_allocated_block(block_ctrl *ptr);
- //!Marks the block as allocated
- void priv_mark_as_allocated_block(block_ctrl *ptr);
- //!Marks the block as allocated
- void priv_mark_new_allocated_block(block_ctrl *ptr)
- { return priv_mark_as_allocated_block(ptr); }
- //!Marks the block as allocated
- void priv_mark_as_free_block(block_ctrl *ptr);
- //!Checks if block has enough memory and splits/unlinks the block
- //!returning the address to the users
- void* priv_check_and_allocate(size_type units
- ,block_ctrl* block
- ,size_type &received_size);
- //!Real deallocation algorithm
- void priv_deallocate(void *addr);
- //!Makes a new memory portion available for allocation
- void priv_add_segment(void *addr, size_type size);
- public:
- static const size_type Alignment = !MemAlignment
- ? size_type(::boost::container::dtl::alignment_of
- < ::boost::container::dtl::max_align_t>::value)
- : size_type(MemAlignment)
- ;
- private:
- //Due to embedded bits in size, Alignment must be at least 4
- BOOST_STATIC_ASSERT((Alignment >= 4));
- //Due to rbtree size optimizations, Alignment must have at least pointer alignment
- BOOST_STATIC_ASSERT((Alignment >= ::boost::container::dtl::alignment_of<void_pointer>::value));
- static const size_type AlignmentMask = (Alignment - 1);
- static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
- static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
- static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
- static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
- static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
- static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
- static const size_type MinBlockUnits = BlockCtrlUnits;
- static const size_type UsableByPreviousChunk = sizeof(size_type);
- //Make sure the maximum alignment is power of two
- BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
- #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
- public:
- static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
- };
- #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
- ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
- {
- size_type uint_this = (std::size_t)this_ptr;
- size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
- size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
- size_type block1_off = aligned_main_hdr_end - uint_this;
- algo_impl_t::assert_alignment(aligned_main_hdr_end);
- algo_impl_t::assert_alignment(uint_this + block1_off);
- return block1_off;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- priv_add_segment(void *addr, size_type segment_size)
- {
- //Check alignment
- algo_impl_t::check_alignment(addr);
- //Check size
- BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
- //Initialize the first big block and the "end" node
- block_ctrl *first_big_block = ::new(addr, boost_container_new_t()) block_ctrl;
- first_big_block->m_size = (segment_size/Alignment - EndCtrlBlockUnits) & block_ctrl::size_mask;
- BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
- //The "end" node is just a node of size 0 with the "end" bit set
- SizeHolder *end_block =
- ::new(reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment, boost_container_new_t()) SizeHolder;
- //This will overwrite the prev part of the "end" node
- priv_mark_as_free_block (first_big_block);
- #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
- first_big_block->m_prev_size = end_block->m_size =
- size_type(reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignmen) & block_ctrl::size_mask;
- #else
- first_big_block->m_prev_size = end_block->m_size =
- size_type(reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment & block_ctrl::size_mask;
- #endif
- end_block->m_allocated = 1;
- first_big_block->m_prev_allocated = 1;
- BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
- BOOST_ASSERT(priv_prev_block((block_ctrl*)end_block) == first_big_block);
- BOOST_ASSERT(priv_first_block() == first_big_block);
- BOOST_ASSERT(priv_end_block() == end_block);
- //Some check to validate the algorithm, since it makes some assumptions
- //to optimize the space wasted in bookkeeping:
- //Check that the sizes of the header are placed before the rbtree
- BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
- < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
- //Insert it in the intrusive containers
- m_header.m_imultiset.insert(*first_big_block);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
- ::priv_first_block()
- {
- const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
- return move_detail::force_ptr<block_ctrl*>(reinterpret_cast<char*>(this) + block1_off);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
- ::priv_end_block()
- {
- const size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
- const size_type original_first_block_size = (m_header.m_size - block1_off)/Alignment - EndCtrlBlockUnits;
- block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(this) + block1_off + original_first_block_size*Alignment);
- return end_block;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
- {
- //Initialize the header
- m_header.m_allocated = 0;
- m_header.m_size = segment_size;
- m_header.m_extra_hdr_bytes = extra_hdr_bytes;
- //Now write calculate the offset of the first big block that will
- //cover the whole segment
- BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
- size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
- priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
- {
- //There is a memory leak!
- // BOOST_ASSERT(m_header.m_allocated == 0);
- // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
- {
- //Get the address of the first block
- block_ctrl *first_block = priv_first_block();
- block_ctrl *old_end_block = priv_end_block();
- size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
- reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
- //Update managed buffer's size
- m_header.m_size += extra_size;
- //We need at least MinBlockUnits blocks to create a new block
- if((m_header.m_size - old_border_offset) < MinBlockUnits){
- return;
- }
- //Now create a new block between the old end and the new end
- size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
- block_ctrl *new_end_block = move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
- //the last and first block are special:
- //new_end_block->m_size & first_block->m_prev_size store the absolute value
- //between them
- new_end_block->m_allocated = 1;
- #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
- new_end_block->m_size = size_type(reinterpret_cast<char*>(first_block) -
- reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
- #else
- new_end_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
- reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
- #endif
- first_block->m_prev_size = new_end_block->m_size;
- first_block->m_prev_allocated = 1;
- BOOST_ASSERT(new_end_block == priv_end_block());
- //The old end block is the new block
- block_ctrl *new_block = old_end_block;
- new_block->m_size = size_type(reinterpret_cast<char*>(new_end_block) -
- reinterpret_cast<char*>(new_block))/Alignment & block_ctrl::size_mask;
- BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
- priv_mark_as_allocated_block(new_block);
- BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
- m_header.m_allocated += (size_type)new_block->m_size*Alignment;
- //Now deallocate the newly created block
- this->priv_deallocate(priv_get_user_buffer(new_block));
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
- {
- //Get the address of the first block
- block_ctrl *first_block = priv_first_block();
- algo_impl_t::assert_alignment(first_block);
- //block_ctrl *old_end_block = priv_end_block(first_block);
- block_ctrl *old_end_block = priv_end_block();
- algo_impl_t::assert_alignment(old_end_block);
- size_type old_end_block_size = old_end_block->m_size;
- void *unique_buffer = 0;
- block_ctrl *last_block;
- //Check if no memory is allocated between the first and last block
- if(priv_next_block(first_block) == old_end_block){
- //If so check if we can allocate memory
- size_type ignore_recvd = 0;
- void *ignore_reuse = 0;
- unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
- //If not, return, we can't shrink
- if(!unique_buffer)
- return;
- //If we can, mark the position just after the new allocation as the new end
- algo_impl_t::assert_alignment(unique_buffer);
- block_ctrl *unique_block = priv_get_block(unique_buffer);
- BOOST_ASSERT(priv_is_allocated_block(unique_block));
- algo_impl_t::assert_alignment(unique_block);
- last_block = priv_next_block(unique_block);
- BOOST_ASSERT(!priv_is_allocated_block(last_block));
- algo_impl_t::assert_alignment(last_block);
- }
- else{
- //If memory is allocated, check if the last block is allocated
- if(priv_is_prev_allocated(old_end_block))
- return;
- //If not, mark last block after the free block
- last_block = priv_prev_block(old_end_block);
- }
- size_type last_block_size = last_block->m_size;
- //Erase block from the free tree, since we will erase it
- m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
- size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
- reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
- block_ctrl *new_end_block = last_block;
- algo_impl_t::assert_alignment(new_end_block);
- //Write new end block attributes
- #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
- new_end_block->m_size =
- size_type(reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment & block_ctrl::size_mask;
- first_block->m_prev_size = new_end_block->m_size;
- #else
- new_end_block->m_size =
- size_type(reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment & block_ctrl::size_mask;
- first_block->m_prev_size = new_end_block->m_size;
- #endif
- new_end_block->m_allocated = 1;
- (void)last_block_size;
- (void)old_end_block_size;
- BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
- //Update managed buffer's size
- m_header.m_size = shrunk_border_offset & block_ctrl::size_mask;
- BOOST_ASSERT(priv_end_block() == new_end_block);
- if(unique_buffer)
- priv_deallocate(unique_buffer);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
- { return m_header.m_size; }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
- {
- return m_header.m_size - m_header.m_allocated -
- priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- get_min_size (size_type extra_hdr_bytes)
- {
- return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
- algo_impl_t::ceil_units(extra_hdr_bytes) +
- MinBlockUnits + EndCtrlBlockUnits)*Alignment;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- all_memory_deallocated()
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- size_type block1_off =
- priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
- return m_header.m_allocated == 0 &&
- m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
- (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
- && m_header.m_imultiset.begin()->m_size ==
- (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- check_sanity()
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
- size_type free_memory = 0;
- //Iterate through all blocks obtaining their size
- for(; ib != ie; ++ib){
- free_memory += (size_type)ib->m_size*Alignment;
- if(!algo_impl_t::check_alignment(&*ib))
- return false;
- }
- //Check allocated bytes are less than size
- if(m_header.m_allocated > m_header.m_size){
- return false;
- }
- size_type block1_off =
- priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
- //Check free bytes are less than size
- if(free_memory > (m_header.m_size - block1_off)){
- return false;
- }
- return true;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- allocate(size_type nbytes)
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- size_type ignore_recvd = nbytes;
- void *ignore_reuse = 0;
- return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- allocate_aligned(size_type nbytes, size_type alignment)
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- return algo_impl_t::allocate_aligned(this, nbytes, alignment);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- template<class T>
- inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
- size_type &prefer_in_recvd_out_size, T *&reuse)
- {
- void* raw_reuse = reuse;
- void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
- reuse = static_cast<T*>(raw_reuse);
- BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::dtl::alignment_of<T>::value));
- return static_cast<T*>(ret);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
- size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
- {
- size_type const preferred_objects = prefer_in_recvd_out_objects;
- if(!sizeof_object)
- return reuse_ptr = 0, static_cast<void*>(0);
- if(command & boost::interprocess::try_shrink_in_place){
- if(!reuse_ptr) return static_cast<void*>(0);
- const bool success = algo_impl_t::try_shrink
- ( this, reuse_ptr, limit_objects*sizeof_object
- , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
- prefer_in_recvd_out_objects /= sizeof_object;
- return success ? reuse_ptr : 0;
- }
- else{
- return priv_allocation_command
- (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
- }
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
- size_type &prefer_in_recvd_out_size,
- void *&reuse_ptr, size_type sizeof_object)
- {
- void* ret;
- size_type const preferred_size = prefer_in_recvd_out_size;
- size_type const max_count = m_header.m_size/sizeof_object;
- if(limit_size > max_count || preferred_size > max_count){
- return reuse_ptr = 0, static_cast<void*>(0);
- }
- size_type l_size = limit_size*sizeof_object;
- size_type p_size = preferred_size*sizeof_object;
- size_type r_size;
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
- }
- prefer_in_recvd_out_size = r_size/sizeof_object;
- return ret;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- size(const void *ptr) const
- {
- //We need no synchronization since this block's size is not going
- //to be modified by anyone else
- //Obtain the real size of the block
- return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
- //Iterate through all blocks obtaining their size
- while(ib != ie){
- //Just clear user the memory part reserved for the user
- volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
- size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
- while(s--){
- *ptr++ = 0;
- }
- //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
- //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
- // , 0
- // , ib->m_size*Alignment - BlockCtrlBytes);
- ++ib;
- }
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- priv_expand_both_sides(boost::interprocess::allocation_type command
- ,size_type min_size
- ,size_type &prefer_in_recvd_out_size
- ,void *reuse_ptr
- ,bool only_preferred_backwards
- ,size_type backwards_multiple)
- {
- size_type const preferred_size = prefer_in_recvd_out_size;
- algo_impl_t::assert_alignment(reuse_ptr);
- if(command & boost::interprocess::expand_fwd){
- if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
- return reuse_ptr;
- }
- else{
- prefer_in_recvd_out_size = this->size(reuse_ptr);
- if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
- return reuse_ptr;
- }
- if(backwards_multiple){
- BOOST_ASSERT(0 == (min_size % backwards_multiple));
- BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
- }
- if(command & boost::interprocess::expand_bwd){
- //Obtain the real size of the block
- block_ctrl *reuse = priv_get_block(reuse_ptr);
- //Sanity check
- algo_impl_t::assert_alignment(reuse);
- block_ctrl *prev_block;
- //If the previous block is not free, there is nothing to do
- if(priv_is_prev_allocated(reuse)){
- return 0;
- }
- prev_block = priv_prev_block(reuse);
- BOOST_ASSERT(!priv_is_allocated_block(prev_block));
- //Some sanity checks
- BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
- algo_impl_t::assert_alignment(prev_block);
- size_type needs_backwards_aligned;
- size_type lcm;
- if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
- ( backwards_multiple
- , prefer_in_recvd_out_size
- , only_preferred_backwards ? preferred_size : min_size
- , lcm, needs_backwards_aligned)){
- return 0;
- }
- //Check if previous block has enough size
- if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
- //Now take all next space. This will succeed
- if(command & boost::interprocess::expand_fwd){
- size_type received_size2;
- if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
- BOOST_ASSERT(0);
- }
- BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
- }
- //We need a minimum size to split the previous one
- if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
- block_ctrl *new_block = move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
- //Free old previous buffer
- new_block->m_size =
- (AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment) & block_ctrl::size_mask;
- BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
- priv_mark_as_allocated_block(new_block);
- prev_block->m_size = size_type(reinterpret_cast<char*>(new_block) -
- reinterpret_cast<char*>(prev_block))/Alignment & block_ctrl::size_mask;
- BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
- priv_mark_as_free_block(prev_block);
- //Update the old previous block in the free blocks tree
- //If the new size fulfills tree invariants do nothing,
- //otherwise erase() + insert()
- {
- imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
- imultiset_iterator was_smaller_it(prev_block_it);
- if(prev_block_it != m_header.m_imultiset.begin() &&
- (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
- m_header.m_imultiset.erase(prev_block_it);
- m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
- }
- }
- prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
- m_header.m_allocated += needs_backwards_aligned;
- //Check alignment
- algo_impl_t::assert_alignment(new_block);
- //If the backwards expansion has remaining bytes in the
- //first bytes, fill them with a pattern
- void *p = priv_get_user_buffer(new_block);
- void *user_ptr = reinterpret_cast<char*>(p);
- BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
- algo_impl_t::assert_alignment(user_ptr);
- return user_ptr;
- }
- //Check if there is no place to create a new block and
- //the whole new block is multiple of the backwards expansion multiple
- else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
- 0 == ((prev_block->m_size*Alignment) % lcm)) {
- //Erase old previous block, since we will change it
- m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
- //Just merge the whole previous block
- //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
- prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
- m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
- //Now update sizes
- prev_block->m_size = size_type(prev_block->m_size + reuse->m_size) & block_ctrl::size_mask;
- BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
- priv_mark_as_allocated_block(prev_block);
- //If the backwards expansion has remaining bytes in the
- //first bytes, fill them with a pattern
- void *user_ptr = priv_get_user_buffer(prev_block);
- BOOST_ASSERT(size_type(static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
- algo_impl_t::assert_alignment(user_ptr);
- return user_ptr;
- }
- else{
- //Alignment issues
- }
- }
- }
- return 0;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
- {
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- algo_impl_t::deallocate_many(this, chain);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- priv_allocate(boost::interprocess::allocation_type command
- ,size_type limit_size
- ,size_type &prefer_in_recvd_out_size
- ,void *&reuse_ptr
- ,size_type backwards_multiple)
- {
- size_type const preferred_size = prefer_in_recvd_out_size;
- if(command & boost::interprocess::shrink_in_place){
- if(!reuse_ptr) return static_cast<void*>(0);
- bool success =
- algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
- return success ? reuse_ptr : 0;
- }
- prefer_in_recvd_out_size = 0;
- if(limit_size > preferred_size)
- return reuse_ptr = 0, static_cast<void*>(0);
- //Number of units to request (including block_ctrl header)
- size_type preferred_units = priv_get_total_units(preferred_size);
- //Number of units to request (including block_ctrl header)
- size_type limit_units = priv_get_total_units(limit_size);
- //Expand in place
- prefer_in_recvd_out_size = preferred_size;
- if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
- void *ret = priv_expand_both_sides
- (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
- if(ret)
- return ret;
- }
- if(command & boost::interprocess::allocate_new){
- size_block_ctrl_compare comp;
- imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
- if(it != m_header.m_imultiset.end()){
- return reuse_ptr = 0, this->priv_check_and_allocate
- (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
- }
- if(it != m_header.m_imultiset.begin()&&
- (--it)->m_size >= limit_units){
- return reuse_ptr = 0, this->priv_check_and_allocate
- (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
- }
- }
- //Now try to expand both sides with min size
- if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
- return priv_expand_both_sides
- (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
- }
- return reuse_ptr = 0, static_cast<void*>(0);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
- {
- return const_cast<block_ctrl*>
- (move_detail::force_ptr<const block_ctrl*>
- (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline
- void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
- { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- priv_get_total_units(size_type userbytes)
- {
- if(userbytes < UsableByPreviousChunk)
- userbytes = UsableByPreviousChunk;
- size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
- if(units < BlockCtrlUnits) units = BlockCtrlUnits;
- return units;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
- priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
- {
- size_type const preferred_size = prefer_in_recvd_out_size;
- //Obtain the real size of the block
- block_ctrl *block = priv_get_block(ptr);
- size_type old_block_units = block->m_size;
- //The block must be marked as allocated and the sizes must be equal
- BOOST_ASSERT(priv_is_allocated_block(block));
- //Put this to a safe value
- prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
- if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
- return true;
- //Now translate it to Alignment units
- const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
- const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
- //Some parameter checks
- BOOST_ASSERT(min_user_units <= preferred_user_units);
- block_ctrl *next_block;
- if(priv_is_allocated_block(next_block = priv_next_block(block))){
- return prefer_in_recvd_out_size >= min_size;
- }
- algo_impl_t::assert_alignment(next_block);
- //Is "block" + "next_block" big enough?
- const size_type merged_units = old_block_units + (size_type)next_block->m_size;
- //Now get the expansion size
- const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
- if(merged_user_units < min_user_units){
- prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
- return false;
- }
- //Now get the maximum size the user can allocate
- size_type intended_user_units = (merged_user_units < preferred_user_units) ?
- merged_user_units : preferred_user_units;
- //These are total units of the merged block (supposing the next block can be split)
- const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
- //Check if we can split the next one in two parts
- if((merged_units - intended_units) >= BlockCtrlUnits){
- //This block is bigger than needed, split it in
- //two blocks, the first one will be merged and
- //the second's size will be the remaining space
- BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
- const size_type rem_units = merged_units - intended_units;
- //Check if we we need to update the old next block in the free blocks tree
- //If the new size fulfills tree invariants, we just need to replace the node
- //(the block start has been displaced), otherwise erase() + insert().
- //
- //This fixup must be done in two parts, because the new next block might
- //overwrite the tree hook of the old next block. So we first erase the
- //old if needed and we'll insert the new one after creating the new next
- imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
- m_header.m_imultiset.erase(old_next_block_it);
- //This is the remaining block
- block_ctrl *rem_block =
- ::new(reinterpret_cast<char*>(block) + intended_units*Alignment, boost_container_new_t()) block_ctrl;
- rem_block->m_size = rem_units & block_ctrl::size_mask;
- algo_impl_t::assert_alignment(rem_block);
- BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
- priv_mark_as_free_block(rem_block);
- m_header.m_imultiset.insert(*rem_block);
- //Write the new length
- block->m_size = (intended_user_units + AllocatedCtrlUnits) & block_ctrl::size_mask;
- BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
- m_header.m_allocated += (intended_units - old_block_units)*Alignment;
- }
- //There is no free space to create a new node: just merge both blocks
- else{
- //Now we have to update the data in the tree
- m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
- //Write the new length
- block->m_size = merged_units & block_ctrl::size_mask;
- BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
- m_header.m_allocated += (merged_units - old_block_units)*Alignment;
- }
- priv_mark_as_allocated_block(block);
- prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
- return true;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
- {
- BOOST_ASSERT(!ptr->m_prev_allocated);
- return move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
- {
- //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
- //distance with the end block
- BOOST_ASSERT(first_segment_block->m_prev_allocated);
- block_ctrl *end_block = move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
- (void)end_block;
- BOOST_ASSERT(end_block->m_allocated == 1);
- BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
- BOOST_ASSERT(end_block > first_segment_block);
- return end_block;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
- {
- //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
- //distance with the end block
- BOOST_ASSERT(end_segment_block->m_allocated);
- block_ctrl *first_block = move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
- (void)first_block;
- BOOST_ASSERT(first_block->m_prev_allocated == 1);
- BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
- BOOST_ASSERT(end_segment_block > first_block);
- return first_block;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
- rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
- {
- return move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
- {
- bool allocated = block->m_allocated != 0;
- #ifndef NDEBUG
- if(block != priv_end_block()){
- block_ctrl *next_block = move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(block) + block->m_size*Alignment);
- bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
- (void)next_block_prev_allocated;
- BOOST_ASSERT(allocated == next_block_prev_allocated);
- }
- #endif
- return allocated;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
- {
- if(block->m_prev_allocated){
- return true;
- }
- else{
- #ifndef NDEBUG
- if(block != priv_first_block()){
- block_ctrl *prev = priv_prev_block(block);
- (void)prev;
- BOOST_ASSERT(!prev->m_allocated);
- BOOST_ASSERT(prev->m_size == block->m_prev_size);
- }
- #endif
- return false;
- }
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
- {
- block->m_allocated = 1;
- move_detail::force_ptr<block_ctrl*>
- (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
- (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
- {
- block->m_allocated = 0;
- block_ctrl *next_block = priv_next_block(block);
- next_block->m_prev_allocated = 0;
- next_block->m_prev_size = block->m_size;
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
- void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
- (size_type nunits
- ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
- ,size_type &received_size)
- {
- size_type upper_nunits = nunits + BlockCtrlUnits;
- imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
- algo_impl_t::assert_alignment(block);
- if (block->m_size >= upper_nunits){
- //This block is bigger than needed, split it in
- //two blocks, the first's size will be "units" and
- //the second's size "block->m_size-units"
- size_type block_old_size = block->m_size;
- block->m_size = nunits & block_ctrl::size_mask;
- BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
- //This is the remaining block
- block_ctrl *rem_block =
- ::new(reinterpret_cast<char*>(block) + Alignment*nunits, boost_container_new_t()) block_ctrl;
- algo_impl_t::assert_alignment(rem_block);
- rem_block->m_size = (block_old_size - nunits) & block_ctrl::size_mask;
- BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
- priv_mark_as_free_block(rem_block);
- //Now we have to update the data in the tree.
- //Use the position of the erased one as a hint
- m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
- }
- else if (block->m_size >= nunits){
- m_header.m_imultiset.erase(it_old);
- }
- else{
- BOOST_ASSERT(0);
- return 0;
- }
- //We need block_ctrl for deallocation stuff, so
- //return memory user can overwrite
- m_header.m_allocated += (size_type)block->m_size*Alignment;
- received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
- //Mark the block as allocated
- priv_mark_as_allocated_block(block);
- //Clear the memory occupied by the tree hook, since this won't be
- //cleared with zero_free_memory
- TreeHook *t = static_cast<TreeHook*>(block);
- //Just clear the memory part reserved for the user
- std::size_t tree_hook_offset_in_block = std::size_t((char*)t - (char*)block);
- //volatile char *ptr =
- char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
- const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
- std::memset(ptr, 0, s);
- this->priv_next_block(block)->m_prev_size = 0;
- return priv_get_user_buffer(block);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
- {
- if(!addr) return;
- //-----------------------
- boost::interprocess::scoped_lock<mutex_type> guard(m_header);
- //-----------------------
- return this->priv_deallocate(addr);
- }
- template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
- void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
- {
- if(!addr) return;
- block_ctrl *block = priv_get_block(addr);
- //The blocks must be marked as allocated and the sizes must be equal
- BOOST_ASSERT(priv_is_allocated_block(block));
- //Check if alignment and block size are right
- algo_impl_t::assert_alignment(addr);
- size_type block_old_size = Alignment*(size_type)block->m_size;
- BOOST_ASSERT(m_header.m_allocated >= block_old_size);
- //Update used memory count
- m_header.m_allocated -= block_old_size;
- //The block to insert in the tree
- block_ctrl *block_to_insert = block;
- //Get the next block
- block_ctrl *const next_block = priv_next_block(block);
- const bool merge_with_prev = !priv_is_prev_allocated(block);
- const bool merge_with_next = !priv_is_allocated_block(next_block);
- //Merge logic. First just update block sizes, then fix free blocks tree
- if(merge_with_prev || merge_with_next){
- //Merge if the previous is free
- if(merge_with_prev){
- //Get the previous block
- block_to_insert = priv_prev_block(block);
- block_to_insert->m_size = size_type(block_to_insert->m_size + block->m_size) & block_ctrl::size_mask;
- BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
- m_header.m_imultiset.erase(Imultiset::s_iterator_to(*block_to_insert));
- }
- //Merge if the next is free
- if(merge_with_next){
- block_to_insert->m_size = size_type(block_to_insert->m_size + next_block->m_size) & block_ctrl::size_mask;
- BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
- const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
- m_header.m_imultiset.erase(next_it);
- }
- }
- priv_mark_as_free_block(block_to_insert);
- m_header.m_imultiset.insert(*block_to_insert);
- }
- #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
- } //namespace interprocess {
- } //namespace boost {
- #include <boost/interprocess/detail/config_end.hpp>
- #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
|