thread_info_base.hpp 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. //
  2. // detail/thread_info_base.hpp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2023 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef ASIO_DETAIL_THREAD_INFO_BASE_HPP
  11. #define ASIO_DETAIL_THREAD_INFO_BASE_HPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include "asio/detail/config.hpp"
  16. #include <climits>
  17. #include <cstddef>
  18. #include "asio/detail/memory.hpp"
  19. #include "asio/detail/noncopyable.hpp"
  20. #if !defined(ASIO_NO_EXCEPTIONS)
  21. # include <exception>
  22. # include "asio/multiple_exceptions.hpp"
  23. #endif // !defined(ASIO_NO_EXCEPTIONS)
  24. #include "asio/detail/push_options.hpp"
  25. namespace asio {
  26. namespace detail {
  27. #ifndef ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE
  28. # define ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE 2
  29. #endif // ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE
  30. class thread_info_base
  31. : private noncopyable
  32. {
  33. public:
  34. struct default_tag
  35. {
  36. enum
  37. {
  38. cache_size = ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE,
  39. begin_mem_index = 0,
  40. end_mem_index = cache_size
  41. };
  42. };
  43. struct awaitable_frame_tag
  44. {
  45. enum
  46. {
  47. cache_size = ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE,
  48. begin_mem_index = default_tag::end_mem_index,
  49. end_mem_index = begin_mem_index + cache_size
  50. };
  51. };
  52. struct executor_function_tag
  53. {
  54. enum
  55. {
  56. cache_size = ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE,
  57. begin_mem_index = awaitable_frame_tag::end_mem_index,
  58. end_mem_index = begin_mem_index + cache_size
  59. };
  60. };
  61. struct cancellation_signal_tag
  62. {
  63. enum
  64. {
  65. cache_size = ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE,
  66. begin_mem_index = executor_function_tag::end_mem_index,
  67. end_mem_index = begin_mem_index + cache_size
  68. };
  69. };
  70. struct parallel_group_tag
  71. {
  72. enum
  73. {
  74. cache_size = ASIO_RECYCLING_ALLOCATOR_CACHE_SIZE,
  75. begin_mem_index = cancellation_signal_tag::end_mem_index,
  76. end_mem_index = begin_mem_index + cache_size
  77. };
  78. };
  79. enum { max_mem_index = parallel_group_tag::end_mem_index };
  80. thread_info_base()
  81. #if !defined(ASIO_NO_EXCEPTIONS)
  82. : has_pending_exception_(0)
  83. #endif // !defined(ASIO_NO_EXCEPTIONS)
  84. {
  85. for (int i = 0; i < max_mem_index; ++i)
  86. reusable_memory_[i] = 0;
  87. }
  88. ~thread_info_base()
  89. {
  90. for (int i = 0; i < max_mem_index; ++i)
  91. {
  92. // The following test for non-null pointers is technically redundant, but
  93. // it is significantly faster when using a tight io_context::poll() loop
  94. // in latency sensitive applications.
  95. if (reusable_memory_[i])
  96. aligned_delete(reusable_memory_[i]);
  97. }
  98. }
  99. static void* allocate(thread_info_base* this_thread,
  100. std::size_t size, std::size_t align = ASIO_DEFAULT_ALIGN)
  101. {
  102. return allocate(default_tag(), this_thread, size, align);
  103. }
  104. static void deallocate(thread_info_base* this_thread,
  105. void* pointer, std::size_t size)
  106. {
  107. deallocate(default_tag(), this_thread, pointer, size);
  108. }
  109. template <typename Purpose>
  110. static void* allocate(Purpose, thread_info_base* this_thread,
  111. std::size_t size, std::size_t align = ASIO_DEFAULT_ALIGN)
  112. {
  113. std::size_t chunks = (size + chunk_size - 1) / chunk_size;
  114. if (this_thread)
  115. {
  116. for (int mem_index = Purpose::begin_mem_index;
  117. mem_index < Purpose::end_mem_index; ++mem_index)
  118. {
  119. if (this_thread->reusable_memory_[mem_index])
  120. {
  121. void* const pointer = this_thread->reusable_memory_[mem_index];
  122. unsigned char* const mem = static_cast<unsigned char*>(pointer);
  123. if (static_cast<std::size_t>(mem[0]) >= chunks
  124. && reinterpret_cast<std::size_t>(pointer) % align == 0)
  125. {
  126. this_thread->reusable_memory_[mem_index] = 0;
  127. mem[size] = mem[0];
  128. return pointer;
  129. }
  130. }
  131. }
  132. for (int mem_index = Purpose::begin_mem_index;
  133. mem_index < Purpose::end_mem_index; ++mem_index)
  134. {
  135. if (this_thread->reusable_memory_[mem_index])
  136. {
  137. void* const pointer = this_thread->reusable_memory_[mem_index];
  138. this_thread->reusable_memory_[mem_index] = 0;
  139. aligned_delete(pointer);
  140. break;
  141. }
  142. }
  143. }
  144. void* const pointer = aligned_new(align, chunks * chunk_size + 1);
  145. unsigned char* const mem = static_cast<unsigned char*>(pointer);
  146. mem[size] = (chunks <= UCHAR_MAX) ? static_cast<unsigned char>(chunks) : 0;
  147. return pointer;
  148. }
  149. template <typename Purpose>
  150. static void deallocate(Purpose, thread_info_base* this_thread,
  151. void* pointer, std::size_t size)
  152. {
  153. if (size <= chunk_size * UCHAR_MAX)
  154. {
  155. if (this_thread)
  156. {
  157. for (int mem_index = Purpose::begin_mem_index;
  158. mem_index < Purpose::end_mem_index; ++mem_index)
  159. {
  160. if (this_thread->reusable_memory_[mem_index] == 0)
  161. {
  162. unsigned char* const mem = static_cast<unsigned char*>(pointer);
  163. mem[0] = mem[size];
  164. this_thread->reusable_memory_[mem_index] = pointer;
  165. return;
  166. }
  167. }
  168. }
  169. }
  170. aligned_delete(pointer);
  171. }
  172. void capture_current_exception()
  173. {
  174. #if !defined(ASIO_NO_EXCEPTIONS)
  175. switch (has_pending_exception_)
  176. {
  177. case 0:
  178. has_pending_exception_ = 1;
  179. pending_exception_ = std::current_exception();
  180. break;
  181. case 1:
  182. has_pending_exception_ = 2;
  183. pending_exception_ =
  184. std::make_exception_ptr<multiple_exceptions>(
  185. multiple_exceptions(pending_exception_));
  186. break;
  187. default:
  188. break;
  189. }
  190. #endif // !defined(ASIO_NO_EXCEPTIONS)
  191. }
  192. void rethrow_pending_exception()
  193. {
  194. #if !defined(ASIO_NO_EXCEPTIONS)
  195. if (has_pending_exception_ > 0)
  196. {
  197. has_pending_exception_ = 0;
  198. std::exception_ptr ex(
  199. static_cast<std::exception_ptr&&>(
  200. pending_exception_));
  201. std::rethrow_exception(ex);
  202. }
  203. #endif // !defined(ASIO_NO_EXCEPTIONS)
  204. }
  205. private:
  206. #if defined(ASIO_HAS_IO_URING)
  207. enum { chunk_size = 8 };
  208. #else // defined(ASIO_HAS_IO_URING)
  209. enum { chunk_size = 4 };
  210. #endif // defined(ASIO_HAS_IO_URING)
  211. void* reusable_memory_[max_mem_index];
  212. #if !defined(ASIO_NO_EXCEPTIONS)
  213. int has_pending_exception_;
  214. std::exception_ptr pending_exception_;
  215. #endif // !defined(ASIO_NO_EXCEPTIONS)
  216. };
  217. } // namespace detail
  218. } // namespace asio
  219. #include "asio/detail/pop_options.hpp"
  220. #endif // ASIO_DETAIL_THREAD_INFO_BASE_HPP