pool.hpp 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037
  1. // Copyright (C) 2000, 2001 Stephen Cleary
  2. //
  3. // Distributed under the Boost Software License, Version 1.0. (See
  4. // accompanying file LICENSE_1_0.txt or copy at
  5. // http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org for updates, documentation, and revision history.
  8. #ifndef BOOST_POOL_HPP
  9. #define BOOST_POOL_HPP
  10. #include <boost/config.hpp> // for workarounds
  11. // std::less, std::less_equal, std::greater
  12. #include <functional>
  13. // new[], delete[], std::nothrow
  14. #include <new>
  15. // std::size_t, std::ptrdiff_t
  16. #include <cstddef>
  17. // std::malloc, std::free
  18. #include <cstdlib>
  19. // std::invalid_argument
  20. #include <exception>
  21. // std::max
  22. #include <algorithm>
  23. #include <boost/pool/poolfwd.hpp>
  24. // std::numeric_limits
  25. #include <boost/limits.hpp>
  26. // boost::integer::static_lcm
  27. #include <boost/integer/common_factor_ct.hpp>
  28. // boost::simple_segregated_storage
  29. #include <boost/pool/simple_segregated_storage.hpp>
  30. // boost::alignment_of
  31. #include <boost/type_traits/alignment_of.hpp>
  32. // BOOST_ASSERT
  33. #include <boost/assert.hpp>
  34. #ifdef BOOST_POOL_INSTRUMENT
  35. #include <iostream>
  36. #include<iomanip>
  37. #endif
  38. #ifdef BOOST_POOL_VALGRIND
  39. #include <set>
  40. #include <valgrind/memcheck.h>
  41. #endif
  42. #ifdef BOOST_NO_STDC_NAMESPACE
  43. namespace std { using ::malloc; using ::free; }
  44. #endif
  45. // There are a few places in this file where the expression "this->m" is used.
  46. // This expression is used to force instantiation-time name lookup, which I am
  47. // informed is required for strict Standard compliance. It's only necessary
  48. // if "m" is a member of a base class that is dependent on a template
  49. // parameter.
  50. // Thanks to Jens Maurer for pointing this out!
  51. /*!
  52. \file
  53. \brief Provides class \ref pool: a fast memory allocator that guarantees proper alignment of all allocated chunks,
  54. and which extends and generalizes the framework provided by the simple segregated storage solution.
  55. Also provides two UserAllocator classes which can be used in conjuction with \ref pool.
  56. */
  57. /*!
  58. \mainpage Boost.Pool Memory Allocation Scheme
  59. \section intro_sec Introduction
  60. Pool allocation is a memory allocation scheme that is very fast, but limited in its usage.
  61. This Doxygen-style documentation is complementary to the
  62. full Quickbook-generated html and pdf documentation at www.boost.org.
  63. This page generated from file pool.hpp.
  64. */
  65. #ifdef BOOST_MSVC
  66. #pragma warning(push)
  67. #pragma warning(disable:4127) // Conditional expression is constant
  68. #endif
  69. namespace boost
  70. {
  71. //! \brief Allocator used as the default template parameter for
  72. //! a <a href="boost_pool/pool/pooling.html#boost_pool.pool.pooling.user_allocator">UserAllocator</a>
  73. //! template parameter. Uses new and delete.
  74. struct default_user_allocator_new_delete
  75. {
  76. typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
  77. typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
  78. static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
  79. { //! Attempts to allocate n bytes from the system. Returns 0 if out-of-memory
  80. return new (std::nothrow) char[bytes];
  81. }
  82. static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
  83. { //! Attempts to de-allocate block.
  84. //! \pre Block must have been previously returned from a call to UserAllocator::malloc.
  85. delete [] block;
  86. }
  87. };
  88. //! \brief <a href="boost_pool/pool/pooling.html#boost_pool.pool.pooling.user_allocator">UserAllocator</a>
  89. //! used as template parameter for \ref pool and \ref object_pool.
  90. //! Uses malloc and free internally.
  91. struct default_user_allocator_malloc_free
  92. {
  93. typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
  94. typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
  95. static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
  96. { return static_cast<char *>((std::malloc)(bytes)); }
  97. static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
  98. { (std::free)(block); }
  99. };
  100. namespace details
  101. { //! Implemention only.
  102. template <typename SizeType>
  103. class PODptr
  104. { //! PODptr is a class that pretends to be a "pointer" to different class types
  105. //! that don't really exist. It provides member functions to access the "data"
  106. //! of the "object" it points to. Since these "class" types are of variable
  107. //! size, and contains some information at the *end* of its memory
  108. //! (for alignment reasons),
  109. //! PODptr must contain the size of this "class" as well as the pointer to this "object".
  110. /*! \details A PODptr holds the location and size of a memory block allocated from the system.
  111. Each memory block is split logically into three sections:
  112. <b>Chunk area</b>. This section may be different sizes. PODptr does not care what the size of the chunks is,
  113. but it does care (and keep track of) the total size of the chunk area.
  114. <b>Next pointer</b>. This section is always the same size for a given SizeType. It holds a pointer
  115. to the location of the next memory block in the memory block list, or 0 if there is no such block.
  116. <b>Next size</b>. This section is always the same size for a given SizeType. It holds the size of the
  117. next memory block in the memory block list.
  118. The PODptr class just provides cleaner ways of dealing with raw memory blocks.
  119. A PODptr object is either valid or invalid. An invalid PODptr is analogous to a null pointer.
  120. The default constructor for PODptr will result in an invalid object.
  121. Calling the member function invalidate will result in that object becoming invalid.
  122. The member function valid can be used to test for validity.
  123. */
  124. public:
  125. typedef SizeType size_type;
  126. private:
  127. char * ptr;
  128. size_type sz;
  129. char * ptr_next_size() const
  130. {
  131. return (ptr + sz - sizeof(size_type));
  132. }
  133. char * ptr_next_ptr() const
  134. {
  135. return (ptr_next_size() -
  136. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value);
  137. }
  138. public:
  139. PODptr(char * const nptr, const size_type nsize)
  140. :ptr(nptr), sz(nsize)
  141. {
  142. //! A PODptr may be created to point to a memory block by passing
  143. //! the address and size of that memory block into the constructor.
  144. //! A PODptr constructed in this way is valid.
  145. }
  146. PODptr()
  147. : ptr(0), sz(0)
  148. { //! default constructor for PODptr will result in an invalid object.
  149. }
  150. bool valid() const
  151. { //! A PODptr object is either valid or invalid.
  152. //! An invalid PODptr is analogous to a null pointer.
  153. //! \returns true if PODptr is valid, false if invalid.
  154. return (begin() != 0);
  155. }
  156. void invalidate()
  157. { //! Make object invalid.
  158. begin() = 0;
  159. }
  160. char * & begin()
  161. { //! Each PODptr keeps the address and size of its memory block.
  162. //! \returns The address of its memory block.
  163. return ptr;
  164. }
  165. char * begin() const
  166. { //! Each PODptr keeps the address and size of its memory block.
  167. //! \return The address of its memory block.
  168. return ptr;
  169. }
  170. char * end() const
  171. { //! \returns begin() plus element_size (a 'past the end' value).
  172. return ptr_next_ptr();
  173. }
  174. size_type total_size() const
  175. { //! Each PODptr keeps the address and size of its memory block.
  176. //! The address may be read or written by the member functions begin.
  177. //! The size of the memory block may only be read,
  178. //! \returns size of the memory block.
  179. return sz;
  180. }
  181. size_type element_size() const
  182. { //! \returns size of element pointer area.
  183. return static_cast<size_type>(sz - sizeof(size_type) -
  184. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value);
  185. }
  186. size_type & next_size() const
  187. { //!
  188. //! \returns next_size.
  189. return *(static_cast<size_type *>(static_cast<void*>((ptr_next_size()))));
  190. }
  191. char * & next_ptr() const
  192. { //! \returns pointer to next pointer area.
  193. return *(static_cast<char **>(static_cast<void*>(ptr_next_ptr())));
  194. }
  195. PODptr next() const
  196. { //! \returns next PODptr.
  197. return PODptr<size_type>(next_ptr(), next_size());
  198. }
  199. void next(const PODptr & arg) const
  200. { //! Sets next PODptr.
  201. next_ptr() = arg.begin();
  202. next_size() = arg.total_size();
  203. }
  204. }; // class PODptr
  205. } // namespace details
  206. #ifndef BOOST_POOL_VALGRIND
  207. /*!
  208. \brief A fast memory allocator that guarantees proper alignment of all allocated chunks.
  209. \details Whenever an object of type pool needs memory from the system,
  210. it will request it from its UserAllocator template parameter.
  211. The amount requested is determined using a doubling algorithm;
  212. that is, each time more system memory is allocated,
  213. the amount of system memory requested is doubled.
  214. Users may control the doubling algorithm by using the following extensions:
  215. Users may pass an additional constructor parameter to pool.
  216. This parameter is of type size_type,
  217. and is the number of chunks to request from the system
  218. the first time that object needs to allocate system memory.
  219. The default is 32. This parameter may not be 0.
  220. Users may also pass an optional third parameter to pool's
  221. constructor. This parameter is of type size_type,
  222. and sets a maximum size for allocated chunks. When this
  223. parameter takes the default value of 0, then there is no upper
  224. limit on chunk size.
  225. Finally, if the doubling algorithm results in no memory
  226. being allocated, the pool will backtrack just once, halving
  227. the chunk size and trying again.
  228. <b>UserAllocator type</b> - the method that the Pool will use to allocate memory from the system.
  229. There are essentially two ways to use class pool: the client can call \ref malloc() and \ref free() to allocate
  230. and free single chunks of memory, this is the most efficient way to use a pool, but does not allow for
  231. the efficient allocation of arrays of chunks. Alternatively, the client may call \ref ordered_malloc() and \ref
  232. ordered_free(), in which case the free list is maintained in an ordered state, and efficient allocation of arrays
  233. of chunks are possible. However, this latter option can suffer from poor performance when large numbers of
  234. allocations are performed.
  235. */
  236. template <typename UserAllocator>
  237. class pool: protected simple_segregated_storage < typename UserAllocator::size_type >
  238. {
  239. public:
  240. typedef UserAllocator user_allocator; //!< User allocator.
  241. typedef typename UserAllocator::size_type size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
  242. typedef typename UserAllocator::difference_type difference_type; //!< A signed integral type that can represent the difference of any two pointers.
  243. private:
  244. BOOST_STATIC_CONSTANT(size_type, min_alloc_size =
  245. (::boost::integer::static_lcm<sizeof(void *), sizeof(size_type)>::value) );
  246. BOOST_STATIC_CONSTANT(size_type, min_align =
  247. (::boost::integer::static_lcm< ::boost::alignment_of<void *>::value, ::boost::alignment_of<size_type>::value>::value) );
  248. //! \returns 0 if out-of-memory.
  249. //! Called if malloc/ordered_malloc needs to resize the free list.
  250. void * malloc_need_resize(); //! Called if malloc needs to resize the free list.
  251. void * ordered_malloc_need_resize(); //! Called if ordered_malloc needs to resize the free list.
  252. protected:
  253. details::PODptr<size_type> list; //!< List structure holding ordered blocks.
  254. simple_segregated_storage<size_type> & store()
  255. { //! \returns pointer to store.
  256. return *this;
  257. }
  258. const simple_segregated_storage<size_type> & store() const
  259. { //! \returns pointer to store.
  260. return *this;
  261. }
  262. const size_type requested_size;
  263. size_type next_size;
  264. size_type start_size;
  265. size_type max_size;
  266. //! finds which POD in the list 'chunk' was allocated from.
  267. details::PODptr<size_type> find_POD(void * const chunk) const;
  268. // is_from() tests a chunk to determine if it belongs in a block.
  269. static bool is_from(void * const chunk, char * const i,
  270. const size_type sizeof_i)
  271. { //! \param chunk chunk to check if is from this pool.
  272. //! \param i memory chunk at i with element sizeof_i.
  273. //! \param sizeof_i element size (size of the chunk area of that block, not the total size of that block).
  274. //! \returns true if chunk was allocated or may be returned.
  275. //! as the result of a future allocation.
  276. //!
  277. //! Returns false if chunk was allocated from some other pool,
  278. //! or may be returned as the result of a future allocation from some other pool.
  279. //! Otherwise, the return value is meaningless.
  280. //!
  281. //! Note that this function may not be used to reliably test random pointer values.
  282. // We use std::less_equal and std::less to test 'chunk'
  283. // against the array bounds because standard operators
  284. // may return unspecified results.
  285. // This is to ensure portability. The operators < <= > >= are only
  286. // defined for pointers to objects that are 1) in the same array, or
  287. // 2) subobjects of the same object [5.9/2].
  288. // The functor objects guarantee a total order for any pointer [20.3.3/8]
  289. std::less_equal<void *> lt_eq;
  290. std::less<void *> lt;
  291. return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
  292. }
  293. size_type alloc_size() const
  294. { //! Calculated size of the memory chunks that will be allocated by this Pool.
  295. //! \returns allocated size.
  296. // For alignment reasons, this used to be defined to be lcm(requested_size, sizeof(void *), sizeof(size_type)),
  297. // but is now more parsimonious: just rounding up to the minimum required alignment of our housekeeping data
  298. // when required. This works provided all alignments are powers of two.
  299. size_type s = (std::max)(requested_size, min_alloc_size);
  300. size_type rem = s % min_align;
  301. if(rem)
  302. s += min_align - rem;
  303. BOOST_ASSERT(s >= min_alloc_size);
  304. BOOST_ASSERT(s % min_align == 0);
  305. return s;
  306. }
  307. size_type max_chunks() const
  308. { //! Calculated maximum number of memory chunks that can be allocated in a single call by this Pool.
  309. size_type POD_size = integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
  310. return ((std::numeric_limits<size_type>::max)() - POD_size) / alloc_size();
  311. }
  312. static void * & nextof(void * const ptr)
  313. { //! \returns Pointer dereferenced.
  314. //! (Provided and used for the sake of code readability :)
  315. return *(static_cast<void **>(ptr));
  316. }
  317. public:
  318. // pre: npartition_size != 0 && nnext_size != 0
  319. explicit pool(const size_type nrequested_size,
  320. const size_type nnext_size = 32,
  321. const size_type nmax_size = 0)
  322. :
  323. list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size)
  324. { //! Constructs a new empty Pool that can be used to allocate chunks of size RequestedSize.
  325. //! \param nrequested_size Requested chunk size
  326. //! \param nnext_size parameter is of type size_type,
  327. //! is the number of chunks to request from the system
  328. //! the first time that object needs to allocate system memory.
  329. //! The default is 32. This parameter may not be 0.
  330. //! \param nmax_size is the maximum number of chunks to allocate in one block.
  331. set_next_size(nnext_size);
  332. set_max_size(nmax_size);
  333. }
  334. ~pool()
  335. { //! Destructs the Pool, freeing its list of memory blocks.
  336. purge_memory();
  337. }
  338. // Releases memory blocks that don't have chunks allocated
  339. // pre: lists are ordered
  340. // Returns true if memory was actually deallocated
  341. bool release_memory();
  342. // Releases *all* memory blocks, even if chunks are still allocated
  343. // Returns true if memory was actually deallocated
  344. bool purge_memory();
  345. size_type get_next_size() const
  346. { //! Number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be 0.
  347. //! \returns next_size;
  348. return next_size;
  349. }
  350. void set_next_size(const size_type nnext_size)
  351. { //! Set number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be set to 0.
  352. BOOST_USING_STD_MIN();
  353. next_size = start_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(nnext_size, max_chunks());
  354. }
  355. size_type get_max_size() const
  356. { //! \returns max_size.
  357. return max_size;
  358. }
  359. void set_max_size(const size_type nmax_size)
  360. { //! Set max_size.
  361. BOOST_USING_STD_MIN();
  362. max_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(nmax_size, max_chunks());
  363. }
  364. size_type get_requested_size() const
  365. { //! \returns the requested size passed into the constructor.
  366. //! (This value will not change during the lifetime of a Pool object).
  367. return requested_size;
  368. }
  369. // Both malloc and ordered_malloc do a quick inlined check first for any
  370. // free chunks. Only if we need to get another memory block do we call
  371. // the non-inlined *_need_resize() functions.
  372. // Returns 0 if out-of-memory
  373. void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
  374. { //! Allocates a chunk of memory. Searches in the list of memory blocks
  375. //! for a block that has a free chunk, and returns that free chunk if found.
  376. //! Otherwise, creates a new memory block, adds its free list to pool's free list,
  377. //! \returns a free chunk from that block.
  378. //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
  379. // Look for a non-empty storage
  380. if (!store().empty())
  381. return (store().malloc)();
  382. return malloc_need_resize();
  383. }
  384. void * ordered_malloc()
  385. { //! Same as malloc, only merges the free lists, to preserve order. Amortized O(1).
  386. //! \returns a free chunk from that block.
  387. //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
  388. // Look for a non-empty storage
  389. if (!store().empty())
  390. return (store().malloc)();
  391. return ordered_malloc_need_resize();
  392. }
  393. // Returns 0 if out-of-memory
  394. // Allocate a contiguous section of n chunks
  395. void * ordered_malloc(size_type n);
  396. //! Same as malloc, only allocates enough contiguous chunks to cover n * requested_size bytes. Amortized O(n).
  397. //! \returns a free chunk from that block.
  398. //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
  399. // pre: 'chunk' must have been previously
  400. // returned by *this.malloc().
  401. void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk)
  402. { //! Deallocates a chunk of memory. Note that chunk may not be 0. O(1).
  403. //!
  404. //! Chunk must have been previously returned by t.malloc() or t.ordered_malloc().
  405. //! Assumes that chunk actually refers to a block of chunks
  406. //! spanning n * partition_sz bytes.
  407. //! deallocates each chunk in that block.
  408. //! Note that chunk may not be 0. O(n).
  409. (store().free)(chunk);
  410. }
  411. // pre: 'chunk' must have been previously
  412. // returned by *this.malloc().
  413. void ordered_free(void * const chunk)
  414. { //! Same as above, but is order-preserving.
  415. //!
  416. //! Note that chunk may not be 0. O(N) with respect to the size of the free list.
  417. //! chunk must have been previously returned by t.malloc() or t.ordered_malloc().
  418. store().ordered_free(chunk);
  419. }
  420. // pre: 'chunk' must have been previously
  421. // returned by *this.malloc(n).
  422. void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunks, const size_type n)
  423. { //! Assumes that chunk actually refers to a block of chunks.
  424. //!
  425. //! chunk must have been previously returned by t.ordered_malloc(n)
  426. //! spanning n * partition_sz bytes.
  427. //! Deallocates each chunk in that block.
  428. //! Note that chunk may not be 0. O(n).
  429. const size_type partition_size = alloc_size();
  430. const size_type total_req_size = n * requested_size;
  431. const size_type num_chunks = total_req_size / partition_size +
  432. ((total_req_size % partition_size) ? true : false);
  433. store().free_n(chunks, num_chunks, partition_size);
  434. }
  435. // pre: 'chunk' must have been previously
  436. // returned by *this.malloc(n).
  437. void ordered_free(void * const chunks, const size_type n)
  438. { //! Assumes that chunk actually refers to a block of chunks spanning n * partition_sz bytes;
  439. //! deallocates each chunk in that block.
  440. //!
  441. //! Note that chunk may not be 0. Order-preserving. O(N + n) where N is the size of the free list.
  442. //! chunk must have been previously returned by t.malloc() or t.ordered_malloc().
  443. const size_type partition_size = alloc_size();
  444. const size_type total_req_size = n * requested_size;
  445. const size_type num_chunks = total_req_size / partition_size +
  446. ((total_req_size % partition_size) ? true : false);
  447. store().ordered_free_n(chunks, num_chunks, partition_size);
  448. }
  449. // is_from() tests a chunk to determine if it was allocated from *this
  450. bool is_from(void * const chunk) const
  451. { //! \returns Returns true if chunk was allocated from u or
  452. //! may be returned as the result of a future allocation from u.
  453. //! Returns false if chunk was allocated from some other pool or
  454. //! may be returned as the result of a future allocation from some other pool.
  455. //! Otherwise, the return value is meaningless.
  456. //! Note that this function may not be used to reliably test random pointer values.
  457. return (find_POD(chunk).valid());
  458. }
  459. };
  460. #ifndef BOOST_NO_INCLASS_MEMBER_INITIALIZATION
  461. template <typename UserAllocator>
  462. typename pool<UserAllocator>::size_type const pool<UserAllocator>::min_alloc_size;
  463. template <typename UserAllocator>
  464. typename pool<UserAllocator>::size_type const pool<UserAllocator>::min_align;
  465. #endif
  466. template <typename UserAllocator>
  467. bool pool<UserAllocator>::release_memory()
  468. { //! pool must be ordered. Frees every memory block that doesn't have any allocated chunks.
  469. //! \returns true if at least one memory block was freed.
  470. // ret is the return value: it will be set to true when we actually call
  471. // UserAllocator::free(..)
  472. bool ret = false;
  473. // This is a current & previous iterator pair over the memory block list
  474. details::PODptr<size_type> ptr = list;
  475. details::PODptr<size_type> prev;
  476. // This is a current & previous iterator pair over the free memory chunk list
  477. // Note that "prev_free" in this case does NOT point to the previous memory
  478. // chunk in the free list, but rather the last free memory chunk before the
  479. // current block.
  480. void * free_p = this->first;
  481. void * prev_free_p = 0;
  482. const size_type partition_size = alloc_size();
  483. // Search through all the all the allocated memory blocks
  484. while (ptr.valid())
  485. {
  486. // At this point:
  487. // ptr points to a valid memory block
  488. // free_p points to either:
  489. // 0 if there are no more free chunks
  490. // the first free chunk in this or some next memory block
  491. // prev_free_p points to either:
  492. // the last free chunk in some previous memory block
  493. // 0 if there is no such free chunk
  494. // prev is either:
  495. // the PODptr whose next() is ptr
  496. // !valid() if there is no such PODptr
  497. // If there are no more free memory chunks, then every remaining
  498. // block is allocated out to its fullest capacity, and we can't
  499. // release any more memory
  500. if (free_p == 0)
  501. break;
  502. // We have to check all the chunks. If they are *all* free (i.e., present
  503. // in the free list), then we can free the block.
  504. bool all_chunks_free = true;
  505. // Iterate 'i' through all chunks in the memory block
  506. // if free starts in the memory block, be careful to keep it there
  507. void * saved_free = free_p;
  508. for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
  509. {
  510. // If this chunk is not free
  511. if (i != free_p)
  512. {
  513. // We won't be able to free this block
  514. all_chunks_free = false;
  515. // free_p might have travelled outside ptr
  516. free_p = saved_free;
  517. // Abort searching the chunks; we won't be able to free this
  518. // block because a chunk is not free.
  519. break;
  520. }
  521. // We do not increment prev_free_p because we are in the same block
  522. free_p = nextof(free_p);
  523. }
  524. // post: if the memory block has any chunks, free_p points to one of them
  525. // otherwise, our assertions above are still valid
  526. const details::PODptr<size_type> next = ptr.next();
  527. if (!all_chunks_free)
  528. {
  529. if (is_from(free_p, ptr.begin(), ptr.element_size()))
  530. {
  531. std::less<void *> lt;
  532. void * const end = ptr.end();
  533. do
  534. {
  535. prev_free_p = free_p;
  536. free_p = nextof(free_p);
  537. } while (free_p && lt(free_p, end));
  538. }
  539. // This invariant is now restored:
  540. // free_p points to the first free chunk in some next memory block, or
  541. // 0 if there is no such chunk.
  542. // prev_free_p points to the last free chunk in this memory block.
  543. // We are just about to advance ptr. Maintain the invariant:
  544. // prev is the PODptr whose next() is ptr, or !valid()
  545. // if there is no such PODptr
  546. prev = ptr;
  547. }
  548. else
  549. {
  550. // All chunks from this block are free
  551. // Remove block from list
  552. if (prev.valid())
  553. prev.next(next);
  554. else
  555. list = next;
  556. // Remove all entries in the free list from this block
  557. if (prev_free_p != 0)
  558. nextof(prev_free_p) = free_p;
  559. else
  560. this->first = free_p;
  561. // And release memory
  562. (UserAllocator::free)(ptr.begin());
  563. ret = true;
  564. }
  565. // Increment ptr
  566. ptr = next;
  567. }
  568. next_size = start_size;
  569. return ret;
  570. }
  571. template <typename UserAllocator>
  572. bool pool<UserAllocator>::purge_memory()
  573. { //! pool must be ordered.
  574. //! Frees every memory block.
  575. //!
  576. //! This function invalidates any pointers previously returned
  577. //! by allocation functions of t.
  578. //! \returns true if at least one memory block was freed.
  579. details::PODptr<size_type> iter = list;
  580. if (!iter.valid())
  581. return false;
  582. do
  583. {
  584. // hold "next" pointer
  585. const details::PODptr<size_type> next = iter.next();
  586. // delete the storage
  587. (UserAllocator::free)(iter.begin());
  588. // increment iter
  589. iter = next;
  590. } while (iter.valid());
  591. list.invalidate();
  592. this->first = 0;
  593. next_size = start_size;
  594. return true;
  595. }
  596. template <typename UserAllocator>
  597. void * pool<UserAllocator>::malloc_need_resize()
  598. { //! No memory in any of our storages; make a new storage,
  599. //! Allocates chunk in newly malloc aftert resize.
  600. //! \returns pointer to chunk.
  601. size_type partition_size = alloc_size();
  602. size_type POD_size = static_cast<size_type>(next_size * partition_size +
  603. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
  604. char * ptr = (UserAllocator::malloc)(POD_size);
  605. if (ptr == 0)
  606. {
  607. if(next_size > 4)
  608. {
  609. next_size >>= 1;
  610. partition_size = alloc_size();
  611. POD_size = static_cast<size_type>(next_size * partition_size +
  612. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
  613. ptr = (UserAllocator::malloc)(POD_size);
  614. }
  615. if(ptr == 0)
  616. return 0;
  617. }
  618. const details::PODptr<size_type> node(ptr, POD_size);
  619. BOOST_USING_STD_MIN();
  620. if(!max_size)
  621. set_next_size(next_size << 1);
  622. else if( next_size*partition_size/requested_size < max_size)
  623. set_next_size(min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size * requested_size / partition_size));
  624. // initialize it,
  625. store().add_block(node.begin(), node.element_size(), partition_size);
  626. // insert it into the list,
  627. node.next(list);
  628. list = node;
  629. // and return a chunk from it.
  630. return (store().malloc)();
  631. }
  632. template <typename UserAllocator>
  633. void * pool<UserAllocator>::ordered_malloc_need_resize()
  634. { //! No memory in any of our storages; make a new storage,
  635. //! \returns pointer to new chunk.
  636. size_type partition_size = alloc_size();
  637. size_type POD_size = static_cast<size_type>(next_size * partition_size +
  638. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
  639. char * ptr = (UserAllocator::malloc)(POD_size);
  640. if (ptr == 0)
  641. {
  642. if(next_size > 4)
  643. {
  644. next_size >>= 1;
  645. partition_size = alloc_size();
  646. POD_size = static_cast<size_type>(next_size * partition_size +
  647. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
  648. ptr = (UserAllocator::malloc)(POD_size);
  649. }
  650. if(ptr == 0)
  651. return 0;
  652. }
  653. const details::PODptr<size_type> node(ptr, POD_size);
  654. BOOST_USING_STD_MIN();
  655. if(!max_size)
  656. set_next_size(next_size << 1);
  657. else if( next_size*partition_size/requested_size < max_size)
  658. set_next_size(min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size * requested_size / partition_size));
  659. // initialize it,
  660. // (we can use "add_block" here because we know that
  661. // the free list is empty, so we don't have to use
  662. // the slower ordered version)
  663. store().add_ordered_block(node.begin(), node.element_size(), partition_size);
  664. // insert it into the list,
  665. // handle border case
  666. if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
  667. {
  668. node.next(list);
  669. list = node;
  670. }
  671. else
  672. {
  673. details::PODptr<size_type> prev = list;
  674. while (true)
  675. {
  676. // if we're about to hit the end or
  677. // if we've found where "node" goes
  678. if (prev.next_ptr() == 0
  679. || std::greater<void *>()(prev.next_ptr(), node.begin()))
  680. break;
  681. prev = prev.next();
  682. }
  683. node.next(prev.next());
  684. prev.next(node);
  685. }
  686. // and return a chunk from it.
  687. return (store().malloc)();
  688. }
  689. template <typename UserAllocator>
  690. void * pool<UserAllocator>::ordered_malloc(const size_type n)
  691. { //! Gets address of a chunk n, allocating new memory if not already available.
  692. //! \returns Address of chunk n if allocated ok.
  693. //! \returns 0 if not enough memory for n chunks.
  694. if (n > max_chunks())
  695. return 0;
  696. const size_type partition_size = alloc_size();
  697. const size_type total_req_size = n * requested_size;
  698. const size_type num_chunks = total_req_size / partition_size +
  699. ((total_req_size % partition_size) ? true : false);
  700. void * ret = store().malloc_n(num_chunks, partition_size);
  701. #ifdef BOOST_POOL_INSTRUMENT
  702. std::cout << "Allocating " << n << " chunks from pool of size " << partition_size << std::endl;
  703. #endif
  704. if ((ret != 0) || (n == 0))
  705. return ret;
  706. #ifdef BOOST_POOL_INSTRUMENT
  707. std::cout << "Cache miss, allocating another chunk...\n";
  708. #endif
  709. // Not enough memory in our storages; make a new storage,
  710. BOOST_USING_STD_MAX();
  711. next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
  712. size_type POD_size = static_cast<size_type>(next_size * partition_size +
  713. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
  714. char * ptr = (UserAllocator::malloc)(POD_size);
  715. if (ptr == 0)
  716. {
  717. if(num_chunks < next_size)
  718. {
  719. // Try again with just enough memory to do the job, or at least whatever we
  720. // allocated last time:
  721. next_size >>= 1;
  722. next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
  723. POD_size = static_cast<size_type>(next_size * partition_size +
  724. integer::static_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type));
  725. ptr = (UserAllocator::malloc)(POD_size);
  726. }
  727. if(ptr == 0)
  728. return 0;
  729. }
  730. const details::PODptr<size_type> node(ptr, POD_size);
  731. // Split up block so we can use what wasn't requested.
  732. if (next_size > num_chunks)
  733. store().add_ordered_block(node.begin() + num_chunks * partition_size,
  734. node.element_size() - num_chunks * partition_size, partition_size);
  735. BOOST_USING_STD_MIN();
  736. if(!max_size)
  737. set_next_size(next_size << 1);
  738. else if( next_size*partition_size/requested_size < max_size)
  739. set_next_size(min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size * requested_size / partition_size));
  740. // insert it into the list,
  741. // handle border case.
  742. if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
  743. {
  744. node.next(list);
  745. list = node;
  746. }
  747. else
  748. {
  749. details::PODptr<size_type> prev = list;
  750. while (true)
  751. {
  752. // if we're about to hit the end, or if we've found where "node" goes.
  753. if (prev.next_ptr() == 0
  754. || std::greater<void *>()(prev.next_ptr(), node.begin()))
  755. break;
  756. prev = prev.next();
  757. }
  758. node.next(prev.next());
  759. prev.next(node);
  760. }
  761. // and return it.
  762. return node.begin();
  763. }
  764. template <typename UserAllocator>
  765. details::PODptr<typename pool<UserAllocator>::size_type>
  766. pool<UserAllocator>::find_POD(void * const chunk) const
  767. { //! find which PODptr storage memory that this chunk is from.
  768. //! \returns the PODptr that holds this chunk.
  769. // Iterate down list to find which storage this chunk is from.
  770. details::PODptr<size_type> iter = list;
  771. while (iter.valid())
  772. {
  773. if (is_from(chunk, iter.begin(), iter.element_size()))
  774. return iter;
  775. iter = iter.next();
  776. }
  777. return iter;
  778. }
  779. #else // BOOST_POOL_VALGRIND
  780. template<typename UserAllocator>
  781. class pool
  782. {
  783. public:
  784. // types
  785. typedef UserAllocator user_allocator; // User allocator.
  786. typedef typename UserAllocator::size_type size_type; // An unsigned integral type that can represent the size of the largest object to be allocated.
  787. typedef typename UserAllocator::difference_type difference_type; // A signed integral type that can represent the difference of any two pointers.
  788. // construct/copy/destruct
  789. explicit pool(const size_type s, const size_type = 32, const size_type m = 0) : chunk_size(s), max_alloc_size(m) {}
  790. ~pool()
  791. {
  792. purge_memory();
  793. }
  794. bool release_memory()
  795. {
  796. bool ret = free_list.empty() ? false : true;
  797. for(std::set<void*>::iterator pos = free_list.begin(); pos != free_list.end(); ++pos)
  798. {
  799. (user_allocator::free)(static_cast<char*>(*pos));
  800. }
  801. free_list.clear();
  802. return ret;
  803. }
  804. bool purge_memory()
  805. {
  806. bool ret = free_list.empty() && used_list.empty() ? false : true;
  807. for(std::set<void*>::iterator pos = free_list.begin(); pos != free_list.end(); ++pos)
  808. {
  809. (user_allocator::free)(static_cast<char*>(*pos));
  810. }
  811. free_list.clear();
  812. for(std::set<void*>::iterator pos = used_list.begin(); pos != used_list.end(); ++pos)
  813. {
  814. (user_allocator::free)(static_cast<char*>(*pos));
  815. }
  816. used_list.clear();
  817. return ret;
  818. }
  819. size_type get_next_size() const
  820. {
  821. return 1;
  822. }
  823. void set_next_size(const size_type){}
  824. size_type get_max_size() const
  825. {
  826. return max_alloc_size;
  827. }
  828. void set_max_size(const size_type s)
  829. {
  830. max_alloc_size = s;
  831. }
  832. size_type get_requested_size() const
  833. {
  834. return chunk_size;
  835. }
  836. void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
  837. {
  838. void* ret;
  839. if(free_list.empty())
  840. {
  841. ret = (user_allocator::malloc)(chunk_size);
  842. VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size);
  843. }
  844. else
  845. {
  846. ret = *free_list.begin();
  847. free_list.erase(free_list.begin());
  848. VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size);
  849. }
  850. used_list.insert(ret);
  851. return ret;
  852. }
  853. void * ordered_malloc()
  854. {
  855. return (this->malloc)();
  856. }
  857. void * ordered_malloc(size_type n)
  858. {
  859. if(max_alloc_size && (n > max_alloc_size))
  860. return 0;
  861. void* ret = (user_allocator::malloc)(chunk_size * n);
  862. used_list.insert(ret);
  863. return ret;
  864. }
  865. void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk)
  866. {
  867. BOOST_ASSERT(used_list.count(chunk) == 1);
  868. BOOST_ASSERT(free_list.count(chunk) == 0);
  869. used_list.erase(chunk);
  870. free_list.insert(chunk);
  871. VALGRIND_MAKE_MEM_NOACCESS(chunk, chunk_size);
  872. }
  873. void ordered_free(void *const chunk)
  874. {
  875. return (this->free)(chunk);
  876. }
  877. void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk, const size_type)
  878. {
  879. BOOST_ASSERT(used_list.count(chunk) == 1);
  880. BOOST_ASSERT(free_list.count(chunk) == 0);
  881. used_list.erase(chunk);
  882. (user_allocator::free)(static_cast<char*>(chunk));
  883. }
  884. void ordered_free(void *const chunk, const size_type n)
  885. {
  886. (this->free)(chunk, n);
  887. }
  888. bool is_from(void *const chunk) const
  889. {
  890. return used_list.count(chunk) || free_list.count(chunk);
  891. }
  892. protected:
  893. size_type chunk_size, max_alloc_size;
  894. std::set<void*> free_list, used_list;
  895. };
  896. #endif
  897. } // namespace boost
  898. #ifdef BOOST_MSVC
  899. #pragma warning(pop)
  900. #endif
  901. #endif // #ifdef BOOST_POOL_HPP