iopool.hpp 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786
  1. /*
  2. * Copyright (c) 2017-2023 zhllxt
  3. *
  4. * author : zhllxt
  5. * email : 37792738@qq.com
  6. *
  7. * Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. */
  10. #ifndef __ASIO2_IOPOOL_HPP__
  11. #define __ASIO2_IOPOOL_HPP__
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. #pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include <vector>
  16. #include <thread>
  17. #include <mutex>
  18. #include <chrono>
  19. #include <type_traits>
  20. #include <memory>
  21. #include <algorithm>
  22. #include <atomic>
  23. #include <unordered_set>
  24. #include <map>
  25. #include <functional>
  26. #include <asio2/base/error.hpp>
  27. #include <asio2/base/define.hpp>
  28. #include <asio2/base/log.hpp>
  29. #include <asio2/base/detail/util.hpp>
  30. #include <asio2/base/detail/shared_mutex.hpp>
  31. namespace asio2::detail
  32. {
  33. using io_context_work_guard = asio::executor_work_guard<asio::io_context::executor_type>;
  34. /* the below sfinae will cause compile error on gcc 7.3
  35. // unbelievable :
  36. // the 1 sfinae need use std::declval<std::decay_t<T>>()
  37. // the 2 sfinae need use (std::declval<std::decay_t<T>>())
  38. // the 3 sfinae need use ((std::declval<std::decay_t<T>>()))
  39. //-----------------------------------------------------------------------------------
  40. template<class T, class R = void>
  41. struct is_io_context_pointer : std::false_type {};
  42. template<class T>
  43. struct is_io_context_pointer<T, std::void_t<decltype(
  44. std::declval<std::decay_t<T>>()->~io_context()), void>> : std::true_type {};
  45. template<class T, class R = void>
  46. struct is_io_context_object : std::false_type {};
  47. template<class T>
  48. struct is_io_context_object<T, std::void_t<decltype(
  49. std::declval<std::decay_t<T>>().~io_context()), void>> : std::true_type {};
  50. //-----------------------------------------------------------------------------------
  51. template<class T, class R = void>
  52. struct is_executor_work_guard_pointer : std::false_type {};
  53. template<class T>
  54. struct is_executor_work_guard_pointer<T, std::void_t<decltype(
  55. (std::declval<std::decay_t<T>>())->~executor_work_guard()), void>> : std::true_type {};
  56. template<class T, class R = void>
  57. struct is_executor_work_guard_object : std::false_type {};
  58. template<class T>
  59. struct is_executor_work_guard_object<T, std::void_t<decltype(
  60. (std::declval<std::decay_t<T>>()).~executor_work_guard()), void>> : std::true_type {};
  61. //-----------------------------------------------------------------------------------
  62. #if defined(ASIO2_ENABLE_LOG)
  63. static_assert(is_io_context_pointer<asio::io_context* >::value);
  64. static_assert(is_io_context_pointer<asio::io_context*& >::value);
  65. static_assert(is_io_context_pointer<asio::io_context*&&>::value);
  66. static_assert(is_io_context_pointer<std::shared_ptr<asio::io_context> >::value);
  67. static_assert(is_io_context_pointer<std::shared_ptr<asio::io_context>& >::value);
  68. static_assert(is_io_context_pointer<std::shared_ptr<asio::io_context>&&>::value);
  69. static_assert(is_io_context_pointer<std::shared_ptr<asio::io_context>const&>::value);
  70. static_assert(is_io_context_object<asio::io_context >::value);
  71. static_assert(is_io_context_object<asio::io_context& >::value);
  72. static_assert(is_io_context_object<asio::io_context&&>::value);
  73. #endif
  74. */
  75. //-----------------------------------------------------------------------------------
  76. class iopool;
  77. template<class, class> class iopool_cp;
  78. class io_t
  79. {
  80. friend class iopool;
  81. template<class, class> friend class iopool_cp;
  82. public:
  83. io_t(std::shared_ptr<asio::io_context> ioc_ptr) noexcept : context_(std::move(ioc_ptr))
  84. {
  85. }
  86. ~io_t() noexcept
  87. {
  88. }
  89. inline asio::io_context & context() noexcept { return (*(this->context_)) ; }
  90. inline std::atomic<std::size_t> & pending() noexcept { return this->pending_ ; }
  91. inline std::unordered_set<asio::steady_timer*> & timers () noexcept { return this->timers_ ; }
  92. inline asio::io_context const& context() const noexcept { return (*(this->context_)) ; }
  93. inline std::atomic<std::size_t> const& pending() const noexcept { return this->pending_ ; }
  94. inline std::unordered_set<asio::steady_timer*> const& timers () const noexcept { return this->timers_ ; }
  95. template<class Object>
  96. inline void regobj(Object* p)
  97. {
  98. if (!p)
  99. return;
  100. // should hold a io_contxt guard to ensure that the unregobj must be called, otherwise
  101. // the objects maybe is not empty and the unregobj maybe not be called.
  102. asio::dispatch(this->context(), [this, p, optr = p->derived().selfptr()]() mutable
  103. {
  104. std::size_t k = reinterpret_cast<std::size_t>(p);
  105. io_context_work_guard iocg(this->context_->get_executor());
  106. this->objects_[k] = [p, optr = std::move(optr), iocg = std::move(iocg)]() mutable
  107. {
  108. detail::ignore_unused(optr, iocg);
  109. p->stop();
  110. };
  111. });
  112. }
  113. template<class Object>
  114. inline void unregobj(Object* p)
  115. {
  116. if (!p)
  117. return;
  118. // must use post, beacuse the "for each objects_" was called in the iopool.stop,
  119. // then the object->stop is called in the for each, then the unregobj is called
  120. // in the object->stop, if we erase the elem of the objects_ directly at here,
  121. // it will cause the iterator is invalid when executed at "for each objects_" .
  122. asio::post(this->context(), [this, p, optr = p->derived().selfptr()]() mutable
  123. {
  124. detail::ignore_unused(optr);
  125. this->objects_.erase(reinterpret_cast<std::size_t>(p));
  126. });
  127. }
  128. /**
  129. * @brief
  130. */
  131. inline void cancel()
  132. {
  133. // moust read write the timers_ in io_context thread by "post"
  134. // when code run to here, the io_context maybe stopped already.
  135. asio::post(this->context(), [this]() mutable
  136. {
  137. for (asio::steady_timer* timer : this->timers_)
  138. {
  139. // when the timer is canceled, it will erase itself from timers_.
  140. detail::cancel_timer(*timer);
  141. }
  142. for (auto&[ptr, fun] : this->objects_)
  143. {
  144. detail::ignore_unused(ptr);
  145. if (fun)
  146. {
  147. fun();
  148. }
  149. }
  150. this->timers_.clear();
  151. this->objects_.clear();
  152. });
  153. }
  154. /**
  155. * @brief initialize the thread id to "std::this_thread::get_id()"
  156. */
  157. inline void init_thread_id() noexcept
  158. {
  159. if (this->thread_id_ != std::this_thread::get_id())
  160. {
  161. this->thread_id_ = std::this_thread::get_id();
  162. }
  163. }
  164. /**
  165. * @brief uninitialize the thread id to empty.
  166. */
  167. inline void fini_thread_id() noexcept
  168. {
  169. this->thread_id_ = std::thread::id{};
  170. }
  171. /**
  172. * @brief return the thread id of the current io_context running in.
  173. */
  174. inline std::thread::id get_thread_id() const noexcept
  175. {
  176. return this->thread_id_;
  177. }
  178. /**
  179. * @brief Determine whether the current io_context is running in the current thread.
  180. */
  181. inline bool running_in_this_thread() const noexcept
  182. {
  183. return (std::this_thread::get_id() == this->thread_id_);
  184. }
  185. protected:
  186. //
  187. std::shared_ptr<asio::io_context> context_;
  188. // the strand will cause some problem when used in dll.
  189. // 1. when declare a strand in dll, and export it, when use the strand in exe which
  190. // exported by the dll, the strand.running_in_this_thread will false, even if it
  191. // is called in the io_context thread.
  192. // 2. when declare a strand in dll, and export it, when use asio::bind_executor(strand
  193. // in exe, it will cause deak lock.
  194. // eg: async_connect(endpoint, asio::bind_executor(strand, callback)); the callback
  195. // will never be called.
  196. //asio::io_context::strand strand_;
  197. // Use this variable to ensure async_send function was executed correctly.
  198. // see : send_cp.hpp "# issue x:"
  199. std::atomic<std::size_t> pending_{};
  200. // Use this variable to save the timers that have not been closed properly.
  201. // If we don't do this, the following problem will occurs:
  202. // user call client.stop, when the code is run to before the iopool's
  203. // wait_for_io_context_stopped, and user call client.start_timer at another
  204. // thread, this will cause the wait_for_io_context_stopped will block forever
  205. // until the timer expires.
  206. // e.g:
  207. // {
  208. // asio2::timer timer;
  209. // timer.post([&]()
  210. // {
  211. // timer.start_timer(1, std::chrono::seconds(1), []() {});
  212. // });
  213. // } // the timer's destructor will be called here.
  214. // when the timer's destructor is called, it will call the "stop_all_timers"
  215. // function, the "stop_all_timers" will "post a event", this "post a event"
  216. // will executed before the "timer.start_timer(1,...)", so when the
  217. // "timer.start_timer(1,...)" is executed, nobody has a chance to cancel it,
  218. // and this will cause the iopool's wait_for_io_context_stopped function
  219. // blocked forever.
  220. std::unordered_set<asio::steady_timer*> timers_;
  221. // Used to save the server or client or other objects, when iopool.stop is called,
  222. // the objects.stop will be called automaticly.
  223. std::map<std::size_t, std::function<void()>> objects_;
  224. // the thread id of the current io_context running in.
  225. std::thread::id thread_id_{};
  226. };
  227. //-----------------------------------------------------------------------------------
  228. template<class T, class R = void>
  229. struct is_io_t_pointer : std::false_type {};
  230. template<class T>
  231. struct is_io_t_pointer<T, std::void_t<decltype(
  232. ((std::declval<std::decay_t<T>>()))->~io_t()), void>> : std::true_type {};
  233. template<class T, class R = void>
  234. struct is_io_t_object : std::false_type {};
  235. template<class T>
  236. struct is_io_t_object<T, std::void_t<decltype(
  237. ((std::declval<std::decay_t<T>>())).~io_t()), void>> : std::true_type {};
  238. //-----------------------------------------------------------------------------------
  239. /**
  240. * io_context pool
  241. */
  242. class iopool
  243. {
  244. template<class, class> friend class iopool_cp;
  245. // used fo fix the compile error under vs2017
  246. template<class R, class P, class F, class T>
  247. struct post_lambda_1
  248. {
  249. std::atomic<std::size_t>& pending;
  250. P p;
  251. F f;
  252. T t;
  253. template<class X = P, class Y = F, class Z = T>
  254. explicit post_lambda_1(std::atomic<std::size_t>& pd, X&& x, Y&& y, Z&& z)
  255. : pending(pd), p(std::forward<X>(x)), f(std::forward<Y>(y)), t(std::forward<Z>(z))
  256. {
  257. }
  258. template<class U = R>
  259. void operator()()
  260. {
  261. if constexpr (std::is_void_v<R>)
  262. {
  263. std::apply(std::move(f), std::move(t));
  264. p.set_value();
  265. }
  266. else
  267. {
  268. p.set_value(std::apply(std::move(f), std::move(t)));
  269. }
  270. pending--;
  271. }
  272. };
  273. // used fo fix the compile error under vs2017
  274. template<class R, class P, class F, class T>
  275. struct post_lambda_2
  276. {
  277. P p;
  278. F f;
  279. T t;
  280. template<class X = P, class Y = F, class Z = T>
  281. explicit post_lambda_2(X&& x, Y&& y, Z&& z)
  282. : p(std::forward<X>(x)), f(std::forward<Y>(y)), t(std::forward<Z>(z))
  283. {
  284. }
  285. template<class U = R>
  286. void operator()()
  287. {
  288. if constexpr (std::is_void_v<U>)
  289. {
  290. std::apply(std::move(f), std::move(t));
  291. p.set_value();
  292. }
  293. else
  294. {
  295. p.set_value(std::apply(std::move(f), std::move(t)));
  296. }
  297. }
  298. };
  299. public:
  300. /**
  301. * @brief constructor
  302. * @param concurrency - the pool size, default is double the number of CPU cores
  303. */
  304. explicit iopool(std::size_t concurrency = default_concurrency()) : state_(state_t::stopped), next_(0)
  305. {
  306. if (concurrency == 0)
  307. {
  308. concurrency = default_concurrency();
  309. }
  310. for (std::size_t i = 0; i < concurrency; ++i)
  311. {
  312. this->iocs_.emplace_back(std::make_shared<asio::io_context>(1));
  313. }
  314. for (std::size_t i = 0; i < concurrency; ++i)
  315. {
  316. this->iots_.emplace_back(std::make_shared<io_t>(this->iocs_[i]));
  317. }
  318. this->threads_.reserve(this->iots_.size());
  319. this->guards_ .reserve(this->iots_.size());
  320. }
  321. /**
  322. * @brief destructor
  323. */
  324. ~iopool()
  325. {
  326. this->stop();
  327. // only call object's stop function in io_context thread and hasn't call object's
  328. // stop function in non io_context thread maybe cause this problem:
  329. // the io_context do one task, and at this time, the shared ptr object reference
  330. // counter is 1 in the task,
  331. // when the task is finished, the shared ptr object will be destroyed, when
  332. // it destroyed, the iopool will be destroyed too, but at this time, the iopool
  333. // stop is running in the io_context thread, so the iopool and the iopool thread
  334. // will can not stopped, then this caused a crash.
  335. // so we must ensure that: we must hold a shared ptr object manual, to avoid
  336. // the shared ptr object destroyed in the io_context thread, so a method is:
  337. // we call stop in non io_context thread can solve this problem.
  338. #if defined(ASIO2_ENABLE_LOG)
  339. if (!this->threads_.empty())
  340. {
  341. ASIO2_LOG_FATAL(
  342. "fatal error: the object is destroyed in the io_context thread. {}",
  343. this->threads_.size());
  344. }
  345. #endif
  346. // You should call stop function manually by youself to avoid this problem.
  347. // eg:
  348. // asio2::tcp_client client;
  349. // ...
  350. // client.stop();
  351. ASIO2_ASSERT(!this->running_in_threads());
  352. ASIO2_ASSERT(this->threads_.empty());
  353. }
  354. /**
  355. * @brief run all io_context objects in the pool.
  356. */
  357. bool start()
  358. {
  359. clear_last_error();
  360. // use read lock to check the state, to avoid deadlock.
  361. {
  362. asio2::shared_locker guard(this->mutex_);
  363. if (this->state_ != state_t::stopped)
  364. {
  365. set_last_error(asio::error::already_started);
  366. return true;
  367. }
  368. if (!this->guards_.empty() || !this->threads_.empty())
  369. {
  370. set_last_error(asio::error::already_started);
  371. return true;
  372. }
  373. }
  374. // then must use write lock again yet.
  375. asio2::unique_locker guard(this->mutex_);
  376. if (this->state_ != state_t::stopped)
  377. {
  378. set_last_error(asio::error::already_started);
  379. return true;
  380. }
  381. if (!this->guards_.empty() || !this->threads_.empty())
  382. {
  383. set_last_error(asio::error::already_started);
  384. return true;
  385. }
  386. this->state_ = state_t::starting;
  387. std::vector<std::promise<void>> promises(this->iots_.size());
  388. // Create a pool of threads to run all of the io_contexts.
  389. for (std::size_t i = 0; i < this->iots_.size(); ++i)
  390. {
  391. auto& iot = this->iots_[i];
  392. std::promise<void>& promise = promises[i];
  393. /// Restart the io_context in preparation for a subsequent run() invocation.
  394. /**
  395. * This function must be called prior to any second or later set of
  396. * invocations of the run(), run_one(), poll() or poll_one() functions when a
  397. * previous invocation of these functions returned due to the io_context
  398. * being stopped or running out of work. After a call to restart(), the
  399. * io_context object's stopped() function will return @c false.
  400. *
  401. * This function must not be called while there are any unfinished calls to
  402. * the run(), run_one(), poll() or poll_one() functions.
  403. */
  404. iot->context().restart();
  405. this->guards_.emplace_back(iot->context().get_executor());
  406. // start work thread
  407. this->threads_.emplace_back([this, &iot, &promise]() mutable
  408. {
  409. detail::ignore_unused(this);
  410. iot->thread_id_ = std::this_thread::get_id();
  411. // after the thread id is seted already, we set the promise
  412. promise.set_value();
  413. // should we catch the exception ?
  414. // If an exception occurs here, what should we do ?
  415. // We should handle exceptions in other business functions to ensure that
  416. // exceptions will not be triggered here.
  417. // You can define ASIO_NO_EXCEPTIONS in the /asio2/config.hpp to disable the
  418. // exception. so when the exception occurs, you can check the stack trace.
  419. #if !defined(ASIO_NO_EXCEPTIONS) && !defined(BOOST_ASIO_NO_EXCEPTIONS)
  420. try
  421. {
  422. #endif
  423. iot->context().run();
  424. #if !defined(ASIO_NO_EXCEPTIONS) && !defined(BOOST_ASIO_NO_EXCEPTIONS)
  425. }
  426. catch (system_error const& e)
  427. {
  428. std::ignore = e;
  429. ASIO2_LOG_ERROR("fatal exception in io_context::run:1: {}", e.what());
  430. ASIO2_ASSERT(false);
  431. }
  432. catch (std::exception const& e)
  433. {
  434. std::ignore = e;
  435. ASIO2_LOG_ERROR("fatal exception in io_context::run:2: {}", e.what());
  436. ASIO2_ASSERT(false);
  437. }
  438. catch (...)
  439. {
  440. ASIO2_LOG_ERROR("fatal exception in io_context::run:3");
  441. ASIO2_ASSERT(false);
  442. }
  443. #endif
  444. // memory leaks occur when SSL is used in multithreading
  445. // https://github.com/chriskohlhoff/asio/issues/368
  446. #if defined(ASIO2_ENABLE_SSL) || defined(ASIO2_USE_SSL)
  447. OPENSSL_thread_stop();
  448. #endif
  449. });
  450. }
  451. for (std::size_t i = 0; i < this->iots_.size(); ++i)
  452. {
  453. promises[i].get_future().wait();
  454. }
  455. #if defined(_DEBUG) || defined(DEBUG)
  456. for (std::size_t i = 0; i < this->iots_.size(); ++i)
  457. {
  458. ASIO2_ASSERT(this->iots_[i]->get_thread_id() == this->threads_[i].get_id());
  459. }
  460. #endif
  461. this->state_ = state_t::started;
  462. return true;
  463. }
  464. /**
  465. * @brief stop all io_context objects in the pool
  466. * blocking until all posted event has completed already.
  467. * After we call iog.reset(), when an asio::post(io_context,...) execution ends, the count
  468. * of the io_context will be checked. If the count equals 0, the io_context will be closed. Then
  469. * the subsequent call of asio:: post(io_context,...) will fail, and the post event will not
  470. * be executed. When our program exits, it will nest call asio:: post (io_context...) to post
  471. * many events, so when an asio::post(io_context,...) inside someone asio::post(io_context,...)
  472. * has not yet been executed, the io_context may have been closed, which will result in the
  473. * nested asio::post(io_context,...) never being executed.
  474. */
  475. void stop()
  476. {
  477. // split read and write to avoid deadlock caused by iopool.post([&iopool]() {iopool.stop(); });
  478. {
  479. asio2::shared_locker guard(this->mutex_);
  480. if (this->state_ != state_t::started)
  481. return;
  482. if (this->guards_.empty() && this->threads_.empty())
  483. return;
  484. if (this->running_in_threads_impl())
  485. return this->cancel_impl();
  486. }
  487. {
  488. asio2::unique_locker guard(this->mutex_);
  489. if (this->state_ != state_t::started)
  490. return;
  491. this->state_ = state_t::stopping;
  492. }
  493. // Waiting for all nested events to complete.
  494. // The mutex_ must be released while waiting, otherwise, the stop function may be called
  495. // in the communication thread and the lock will be requested, which is already held here,
  496. // so leading to deadlock.
  497. this->wait_for_io_context_stopped();
  498. {
  499. asio2::unique_locker guard(this->mutex_);
  500. // call executor_work_guard reset,and then the io_context working thread will be exited.
  501. // In fact, the guards has called reset already, but there is no problem with repeated calls
  502. for (auto & iog : this->guards_)
  503. {
  504. ASIO2_ASSERT(iog.owns_work() == false);
  505. iog.reset();
  506. }
  507. // Wait for all threads to exit.
  508. for (auto & thread : this->threads_)
  509. {
  510. thread.join();
  511. }
  512. this->guards_ .clear();
  513. this->threads_.clear();
  514. #if defined(_DEBUG) || defined(DEBUG)
  515. for (std::size_t i = 0; i < this->iots_.size(); ++i)
  516. {
  517. ASIO2_ASSERT(this->iots_[i]->objects_.empty());
  518. }
  519. #endif
  520. this->state_ = state_t::stopped;
  521. }
  522. }
  523. /**
  524. * @brief check whether the io_context pool is started
  525. */
  526. inline bool started() const noexcept
  527. {
  528. asio2::shared_locker guard(this->mutex_);
  529. return (this->state_ == state_t::started);
  530. }
  531. /**
  532. * @brief check whether the io_context pool is stopped
  533. */
  534. inline bool stopped() const noexcept
  535. {
  536. asio2::shared_locker guard(this->mutex_);
  537. return (this->state_ == state_t::stopped);
  538. }
  539. /**
  540. * @brief get an io_t to use
  541. */
  542. inline std::shared_ptr<io_t> get(std::size_t index = static_cast<std::size_t>(-1)) noexcept
  543. {
  544. asio2::shared_locker guard(this->mutex_);
  545. ASIO2_ASSERT(!this->iots_.empty());
  546. return this->iots_[this->next_impl(index)];
  547. }
  548. /**
  549. * @brief get an io_context to use
  550. */
  551. inline asio::io_context& get_context(std::size_t index = static_cast<std::size_t>(-1)) noexcept
  552. {
  553. asio2::shared_locker guard(this->mutex_);
  554. ASIO2_ASSERT(!this->iots_.empty());
  555. return this->iots_[this->next_impl(index)]->context();
  556. }
  557. /**
  558. * @brief get an io_context shared_ptr to use
  559. */
  560. inline std::shared_ptr<asio::io_context> get_context_ptr(std::size_t index = std::size_t(-1)) noexcept
  561. {
  562. asio2::shared_locker guard(this->mutex_);
  563. ASIO2_ASSERT(!this->iocs_.empty());
  564. return this->iocs_[this->next_impl(index)];
  565. }
  566. /**
  567. * @brief Determine whether current code is running in the io_context pool threads.
  568. */
  569. inline bool running_in_threads() const noexcept
  570. {
  571. asio2::shared_locker guard(this->mutex_);
  572. return this->running_in_threads_impl();
  573. }
  574. /**
  575. * @brief Determine whether current code is running in the io_context thread by index
  576. */
  577. inline bool running_in_thread(std::size_t index) const noexcept
  578. {
  579. asio2::shared_locker guard(this->mutex_);
  580. ASIO2_ASSERT(index < this->threads_.size());
  581. if (!(index < this->threads_.size()))
  582. return false;
  583. return (std::this_thread::get_id() == this->threads_[index].get_id());
  584. }
  585. /**
  586. * @brief get io_context pool size.
  587. */
  588. inline std::size_t size() const noexcept
  589. {
  590. asio2::shared_locker guard(this->mutex_);
  591. return this->iots_.size();
  592. }
  593. /**
  594. * @brief Get the thread id of the specified thread index.
  595. */
  596. inline std::thread::id get_thread_id(std::size_t index) const noexcept
  597. {
  598. asio2::shared_locker guard(this->mutex_);
  599. return this->threads_[index % this->threads_.size()].get_id();
  600. }
  601. /**
  602. * @brief Get the thread native handle of the specified thread index.
  603. * @note after test, on Windows:
  604. * this will be failed:
  605. * SetThreadPriority((HANDLE)thread.native_handle(), THREAD_PRIORITY_HIGHEST);
  606. * this will be successed:
  607. * SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
  608. */
  609. inline std::thread::native_handle_type get_thread_handle(std::size_t index) noexcept
  610. {
  611. asio2::shared_locker guard(this->mutex_);
  612. return this->threads_[index % this->threads_.size()].native_handle();
  613. }
  614. /**
  615. * Use to ensure that all nested asio::post(...) events are fully invoked.
  616. */
  617. inline void wait_for_io_context_stopped() ASIO2_NO_THREAD_SAFETY_ANALYSIS
  618. {
  619. // split read and write to avoid deadlock caused by iopool.post([&iopool]() {iopool.stop(); });
  620. {
  621. //asio2::shared_locker guard(this->mutex_);
  622. if (this->running_in_threads_impl())
  623. return this->cancel_impl();
  624. // wiat fo all pending events completed.
  625. for (auto& iot : this->iots_)
  626. {
  627. while (iot->pending() > std::size_t(0))
  628. std::this_thread::sleep_for(std::chrono::milliseconds(0));
  629. }
  630. }
  631. {
  632. asio2::unique_locker guard(this->mutex_);
  633. // first reset the acceptor io_context work guard
  634. if (!this->guards_.empty())
  635. this->guards_.front().reset();
  636. }
  637. constexpr auto max = std::chrono::milliseconds(10);
  638. constexpr auto min = std::chrono::milliseconds(1);
  639. {
  640. // don't need lock, maybe cause deadlock in client start iopool
  641. //asio2::shared_locker guard(this->mutex_);
  642. // second wait indefinitely until the acceptor io_context is stopped
  643. for (std::size_t i = 0; i < std::size_t(1) && i < this->iocs_.size(); ++i)
  644. {
  645. auto t1 = std::chrono::steady_clock::now();
  646. auto& ioc = this->iocs_[i];
  647. auto& iot = this->iots_[i];
  648. while (!ioc->stopped())
  649. {
  650. // the timer may not be canceled successed when using visual
  651. // studio break point for debugging, so cancel it at each loop
  652. // must cancel all iots, otherwise maybe cause deaklock like below:
  653. // the client_ptr->bind_recv has hold the session_ptr, and the session_ptr
  654. // is in the indexed 1 iot ( not indexed 0 iot ), so if call iot->cancel,
  655. // the cancel function of indexed 1 iot wont be called, so the stop function
  656. // of client_ptr won't be called too, so the session_ptr which holded by the
  657. // client_ptr will can't be destroyed, so the server's acceptor io will
  658. // can't be stopped(this means the indexed 0 io can't be stopped).
  659. //server.bind_accept([](std::shared_ptr<asio2::tcp_session>& session_ptr)
  660. //{
  661. // std::shared_ptr<asio2::tcp_client> client_ptr = std::make_shared<asio2::tcp_client>(
  662. // 512, 1024, session_ptr->io());
  663. //
  664. // client_ptr->bind_recv([session_ptr](std::string_view data) mutable
  665. // {
  666. // });
  667. //
  668. // client_ptr->async_start("127.0.0.1", 8888);
  669. //});
  670. this->cancel_impl();
  671. auto t2 = std::chrono::steady_clock::now();
  672. auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
  673. std::this_thread::sleep_for(std::clamp(ms, min, max));
  674. }
  675. iot->thread_id_ = std::thread::id{};
  676. ASIO2_ASSERT(iot->timers().empty());
  677. ASIO2_ASSERT(iot->objects_.empty());
  678. }
  679. }
  680. {
  681. asio2::unique_locker guard(this->mutex_);
  682. for (std::size_t i = 1; i < this->guards_.size(); ++i)
  683. {
  684. this->guards_[i].reset();
  685. }
  686. }
  687. {
  688. // don't need lock, maybe cause deadlock in client start iopool
  689. //asio2::shared_locker guard(this->mutex_);
  690. for (std::size_t i = 1; i < this->iocs_.size(); ++i)
  691. {
  692. auto t1 = std::chrono::steady_clock::now();
  693. auto& ioc = this->iocs_[i];
  694. auto& iot = this->iots_[i];
  695. while (!ioc->stopped())
  696. {
  697. // the timer may not be canceled successed when using visual
  698. // studio break point for debugging, so cancel it at each loop
  699. this->cancel_impl();
  700. auto t2 = std::chrono::steady_clock::now();
  701. auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1);
  702. std::this_thread::sleep_for(std::clamp(ms, min, max));
  703. }
  704. iot->thread_id_ = std::thread::id{};
  705. ASIO2_ASSERT(iot->timers().empty());
  706. ASIO2_ASSERT(iot->objects_.empty());
  707. }
  708. }
  709. }
  710. /**
  711. *
  712. */
  713. inline void cancel()
  714. {
  715. asio2::shared_locker guard(this->mutex_);
  716. return this->cancel_impl();
  717. }
  718. /**
  719. * @brief destroy the content of all member variables, this is used for solve the memory leaks.
  720. * After this function is called, this class object cannot be used again.
  721. */
  722. inline void destroy() noexcept
  723. {
  724. asio2::unique_locker guard(this->mutex_);
  725. this->threads_.clear();
  726. this->iocs_.clear();
  727. this->iots_.clear();
  728. this->guards_.clear();
  729. #if defined(_DEBUG) || defined(DEBUG)
  730. this->derive_pointer_ = []() {};
  731. #endif
  732. }
  733. /**
  734. * @brief
  735. */
  736. inline std::size_t next(std::size_t index) noexcept
  737. {
  738. asio2::shared_locker guard(this->mutex_);
  739. return this->next_impl(index);
  740. }
  741. /**
  742. * The wait_for() function blocks until the specified duration has elapsed.
  743. *
  744. * @param rel_time - The duration for which the call may block.
  745. */
  746. template <typename Rep, typename Period>
  747. void wait_for(const std::chrono::duration<Rep, Period>& rel_time)
  748. {
  749. if (this->running_in_threads())
  750. {
  751. set_last_error(asio::error::operation_not_supported);
  752. return;
  753. }
  754. clear_last_error();
  755. io_t* iot = nullptr;
  756. {
  757. asio2::shared_locker guard(this->mutex_);
  758. iot = this->iots_[0].get();
  759. }
  760. asio::steady_timer timer(iot->context());
  761. timer.expires_after(rel_time);
  762. timer.wait(get_last_error());
  763. }
  764. /**
  765. * The wait_until() function blocks until the specified time has been reached.
  766. *
  767. * @param abs_time - The time point until which the call may block.
  768. */
  769. template <typename Clock, typename Duration>
  770. void wait_until(const std::chrono::time_point<Clock, Duration>& abs_time)
  771. {
  772. if (this->running_in_threads())
  773. {
  774. set_last_error(asio::error::operation_not_supported);
  775. return;
  776. }
  777. clear_last_error();
  778. io_t* iot = nullptr;
  779. {
  780. asio2::shared_locker guard(this->mutex_);
  781. iot = this->iots_[0].get();
  782. }
  783. asio::steady_timer timer(iot->context());
  784. timer.expires_at(abs_time);
  785. timer.wait(get_last_error());
  786. }
  787. /**
  788. * The wait_signal() function blocks util some signal delivered.
  789. *
  790. * @return The delivered signal number. Maybe invalid value when some exception occured.
  791. */
  792. template <class... Ints>
  793. int wait_signal(Ints... signal_number)
  794. {
  795. if (this->running_in_threads())
  796. {
  797. set_last_error(asio::error::operation_not_supported);
  798. return 0;
  799. }
  800. clear_last_error();
  801. io_t* iot = nullptr;
  802. {
  803. asio2::shared_locker guard(this->mutex_);
  804. iot = this->iots_[0].get();
  805. }
  806. // note: The variable name signals will conflict with the macro signals of qt
  807. asio::signal_set signalset(iot->context());
  808. (signalset.add(signal_number), ...);
  809. std::promise<int> promise;
  810. std::future<int> future = promise.get_future();
  811. signalset.async_wait([&](const error_code& /*ec*/, int signo)
  812. {
  813. promise.set_value(signo);
  814. });
  815. return future.get();
  816. }
  817. /**
  818. * @brief post a function object into the thread pool, then return immediately,
  819. * the function object will never be executed inside this function. Instead, it will
  820. * be executed asynchronously in the thread pool.
  821. * @param fun - global function,static function,lambda,member function,std::function.
  822. * @return std::future<fun_return_type>
  823. */
  824. template<class Fun, class... Args>
  825. auto post(Fun&& fun, Args&&... args) -> std::future<std::invoke_result_t<Fun, Args...>>
  826. {
  827. asio2::shared_locker guard(this->mutex_);
  828. using return_type = std::invoke_result_t<Fun, Args...>;
  829. std::size_t index = 0, num = (std::numeric_limits<std::size_t>::max)();
  830. for (std::size_t i = 0, n = this->iots_.size(); i < n; ++i)
  831. {
  832. std::size_t pending = this->iots_[i]->pending().load();
  833. if (pending == 0)
  834. {
  835. index = i;
  836. break;
  837. }
  838. if (pending < num)
  839. {
  840. num = pending;
  841. index = i;
  842. }
  843. }
  844. std::atomic<std::size_t>& pending = this->iots_[index]->pending();
  845. pending++;
  846. std::promise<return_type> promise;
  847. std::future<return_type> future = promise.get_future();
  848. using lambda_t = post_lambda_1<
  849. return_type,
  850. std::promise<return_type>,
  851. detail::remove_cvref_t<Fun>,
  852. std::tuple<detail::remove_cvref_t<Args>...>>;
  853. asio::post(*(this->iocs_[index]), lambda_t
  854. {
  855. pending,
  856. std::move(promise),
  857. std::forward<Fun>(fun),
  858. std::tuple(std::forward<Args>(args) ...)
  859. });
  860. return future;
  861. }
  862. /**
  863. * @brief post a function object into the thread pool with specified thread index,
  864. * then return immediately, the function object will never be executed inside this
  865. * function. Instead, it will be executed asynchronously in the thread pool.
  866. * @param thread_index - which thread to execute the function.
  867. * @param fun - global function,static function,lambda,member function,std::function.
  868. * @return std::future<fun_return_type>
  869. */
  870. template<class IntegerT, class Fun, class... Args,
  871. std::enable_if_t<std::is_integral_v<std::remove_cv_t<std::remove_reference_t<IntegerT>>>, int> = 0>
  872. auto post(IntegerT thread_index, Fun&& fun, Args&&... args) -> std::future<std::invoke_result_t<Fun, Args...>>
  873. {
  874. asio2::shared_locker guard(this->mutex_);
  875. using return_type = std::invoke_result_t<Fun, Args...>;
  876. std::promise<return_type> promise;
  877. std::future<return_type> future = promise.get_future();
  878. using lambda_t = post_lambda_2<
  879. return_type,
  880. std::promise<return_type>,
  881. detail::remove_cvref_t<Fun>,
  882. std::tuple<detail::remove_cvref_t<Args>...>>;
  883. asio::post(*(this->iocs_[thread_index % this->iocs_.size()]), lambda_t
  884. {
  885. std::move(promise),
  886. std::forward<Fun>(fun),
  887. std::tuple(std::forward<Args>(args) ...)
  888. });
  889. return future;
  890. }
  891. protected:
  892. inline bool running_in_threads_impl() const noexcept ASIO2_NO_THREAD_SAFETY_ANALYSIS
  893. {
  894. std::thread::id curr_tid = std::this_thread::get_id();
  895. for (auto& thread : this->threads_)
  896. {
  897. if (curr_tid == thread.get_id())
  898. return true;
  899. }
  900. return false;
  901. }
  902. inline void cancel_impl() ASIO2_NO_THREAD_SAFETY_ANALYSIS
  903. {
  904. for (std::size_t i = 0; i < this->iocs_.size(); ++i)
  905. {
  906. auto& ioc = this->iocs_[i];
  907. auto& iot = this->iots_[i];
  908. if (!ioc->stopped())
  909. {
  910. iot->cancel();
  911. }
  912. }
  913. }
  914. inline std::size_t next_impl(std::size_t index) noexcept ASIO2_NO_THREAD_SAFETY_ANALYSIS
  915. {
  916. // Use a round-robin scheme to choose the next io_context to use.
  917. return (index == static_cast<std::size_t>(-1) ?
  918. ((++(this->next_)) % this->iots_.size()) : (index % this->iots_.size()));
  919. }
  920. protected:
  921. ///
  922. mutable asio2::shared_mutexer mutex_;
  923. /// threads to run all of the io_context
  924. std::vector<std::thread> threads_ ASIO2_GUARDED_BY(mutex_);
  925. /// The pool of io_context.
  926. std::vector<std::shared_ptr<asio::io_context>> iocs_ ASIO2_GUARDED_BY(mutex_);
  927. /// The pool of io_context.
  928. std::vector<std::shared_ptr<io_t>> iots_ ASIO2_GUARDED_BY(mutex_);
  929. /// Flag whether the io_context pool has stopped already
  930. detail::state_t state_ ASIO2_GUARDED_BY(mutex_);
  931. /// The next io_context to use for a connection.
  932. std::size_t next_;
  933. // Give all the io_contexts executor_work_guard to do so that their run() functions will not
  934. // exit until they are explicitly stopped.
  935. std::vector<io_context_work_guard> guards_ ASIO2_GUARDED_BY(mutex_);
  936. // for debug, to see the derived object details.
  937. #if defined(_DEBUG) || defined(DEBUG)
  938. std::function<void()> derive_pointer_;
  939. #endif
  940. };
  941. class iopool_base
  942. {
  943. public:
  944. iopool_base() = default;
  945. virtual ~iopool_base() {}
  946. virtual bool start () = 0;
  947. virtual void stop () = 0;
  948. virtual bool started() noexcept = 0;
  949. virtual bool stopped() noexcept = 0;
  950. virtual void destroy() noexcept = 0;
  951. virtual std::shared_ptr<io_t> get (std::size_t index) noexcept = 0;
  952. virtual std::size_t size () noexcept = 0;
  953. virtual bool running_in_threads() noexcept = 0;
  954. };
  955. class default_iopool : public iopool_base
  956. {
  957. template<class, class> friend class iopool_cp;
  958. public:
  959. explicit default_iopool(std::size_t concurrency) : impl_(concurrency)
  960. {
  961. }
  962. /**
  963. * @brief destructor
  964. */
  965. virtual ~default_iopool()
  966. {
  967. this->impl_.stop();
  968. }
  969. /**
  970. * @brief run all io_context objects in the pool.
  971. */
  972. virtual bool start() override
  973. {
  974. return this->impl_.start();
  975. }
  976. /**
  977. * @brief stop all io_context objects in the pool
  978. */
  979. virtual void stop() override
  980. {
  981. this->impl_.stop();
  982. }
  983. /**
  984. * @brief check whether the io_context pool is started
  985. */
  986. virtual bool started() noexcept override
  987. {
  988. return this->impl_.started();
  989. }
  990. /**
  991. * @brief check whether the io_context pool is stopped
  992. */
  993. virtual bool stopped() noexcept override
  994. {
  995. return this->impl_.stopped();
  996. }
  997. /**
  998. * @brief destroy the content of all member variables, this is used for solve the memory leaks.
  999. * After this function is called, this class object cannot be used again.
  1000. */
  1001. virtual void destroy() noexcept override
  1002. {
  1003. return this->impl_.destroy();
  1004. }
  1005. /**
  1006. * @brief get an io_t to use
  1007. */
  1008. virtual std::shared_ptr<io_t> get(std::size_t index) noexcept override
  1009. {
  1010. return this->impl_.get(index);
  1011. }
  1012. /**
  1013. * @brief get io_context pool size.
  1014. */
  1015. virtual std::size_t size() noexcept override
  1016. {
  1017. return this->impl_.size();
  1018. }
  1019. /**
  1020. * @brief Determine whether current code is running in the io_context pool threads.
  1021. */
  1022. virtual bool running_in_threads() noexcept override
  1023. {
  1024. return this->impl_.running_in_threads();
  1025. }
  1026. protected:
  1027. detail::iopool impl_;
  1028. };
  1029. /**
  1030. * This io_context pool is passed in by the user
  1031. */
  1032. class user_iopool : public iopool_base
  1033. {
  1034. template<class, class> friend class iopool_cp;
  1035. public:
  1036. /**
  1037. * @brief constructor
  1038. */
  1039. explicit user_iopool(std::vector<std::shared_ptr<io_t>> iots) : iots_(std::move(iots)), stopped_(true), next_(0)
  1040. {
  1041. }
  1042. /**
  1043. * @brief destructor
  1044. */
  1045. virtual ~user_iopool()
  1046. {
  1047. this->stop();
  1048. }
  1049. /**
  1050. * @brief run all io_context objects in the pool.
  1051. */
  1052. virtual bool start() override
  1053. {
  1054. clear_last_error();
  1055. asio2::unique_locker guard(this->mutex_);
  1056. if (!this->stopped_)
  1057. {
  1058. set_last_error(asio::error::already_started);
  1059. return true;
  1060. }
  1061. this->stopped_ = false;
  1062. return true;
  1063. }
  1064. /**
  1065. * @brief stop all io_context objects in the pool
  1066. */
  1067. virtual void stop() override
  1068. {
  1069. {
  1070. asio2::shared_locker guard(this->mutex_);
  1071. if (this->stopped_)
  1072. return;
  1073. // wiat fo all pending events completed.
  1074. for (auto& iot : this->iots_)
  1075. {
  1076. while (iot->pending() > std::size_t(0))
  1077. std::this_thread::sleep_for(std::chrono::milliseconds(0));
  1078. }
  1079. }
  1080. {
  1081. asio2::unique_locker guard(this->mutex_);
  1082. if (this->stopped_)
  1083. return;
  1084. this->stopped_ = true;
  1085. }
  1086. }
  1087. /**
  1088. * @brief check whether the io_context pool is started
  1089. */
  1090. virtual bool started() noexcept override
  1091. {
  1092. asio2::shared_locker guard(this->mutex_);
  1093. return (!this->stopped_);
  1094. }
  1095. /**
  1096. * @brief check whether the io_context pool is stopped
  1097. */
  1098. virtual bool stopped() noexcept override
  1099. {
  1100. asio2::shared_locker guard(this->mutex_);
  1101. return (this->stopped_);
  1102. }
  1103. /**
  1104. * @brief destroy the content of all member variables, this is used for solve the memory leaks.
  1105. * After this function is called, this class object cannot be used again.
  1106. */
  1107. virtual void destroy() noexcept override
  1108. {
  1109. asio2::unique_locker guard(this->mutex_);
  1110. this->iots_.clear();
  1111. }
  1112. /**
  1113. * @brief get an io_t to use
  1114. */
  1115. virtual std::shared_ptr<io_t> get(std::size_t index) noexcept override
  1116. {
  1117. asio2::shared_locker guard(this->mutex_);
  1118. return this->iots_[this->next_impl(index)];
  1119. }
  1120. /**
  1121. * @brief get io_context pool size.
  1122. */
  1123. virtual std::size_t size() noexcept override
  1124. {
  1125. asio2::shared_locker guard(this->mutex_);
  1126. return this->iots_.size();
  1127. }
  1128. /**
  1129. * @brief
  1130. */
  1131. inline std::size_t next(std::size_t index) noexcept
  1132. {
  1133. asio2::shared_locker guard(this->mutex_);
  1134. return this->next_impl(index);
  1135. }
  1136. /**
  1137. * @brief Determine whether current code is running in the io_context pool threads.
  1138. */
  1139. virtual bool running_in_threads() noexcept override
  1140. {
  1141. asio2::shared_locker guard(this->mutex_);
  1142. return this->running_in_threads_impl();
  1143. }
  1144. protected:
  1145. inline bool running_in_threads_impl() noexcept ASIO2_NO_THREAD_SAFETY_ANALYSIS
  1146. {
  1147. std::thread::id curr_tid = std::this_thread::get_id();
  1148. for (auto& iot : this->iots_)
  1149. {
  1150. if (curr_tid == iot->get_thread_id())
  1151. return true;
  1152. }
  1153. return false;
  1154. }
  1155. inline std::size_t next_impl(std::size_t index) noexcept ASIO2_NO_THREAD_SAFETY_ANALYSIS
  1156. {
  1157. // Use a round-robin scheme to choose the next io_context to use.
  1158. // Use this->iots_.size() instead of this->size() to avoid call virtual function.
  1159. return (index == static_cast<std::size_t>(-1) ?
  1160. ((++(this->next_)) % this->iots_.size()) : (index % this->iots_.size()));
  1161. }
  1162. protected:
  1163. ///
  1164. mutable asio2::shared_mutexer mutex_;
  1165. /// The pool of io_t.
  1166. std::vector<std::shared_ptr<io_t>> iots_ ASIO2_GUARDED_BY(mutex_);
  1167. /// Flag whether the io_context pool has stopped already
  1168. bool stopped_ ASIO2_GUARDED_BY(mutex_);
  1169. /// The next io_context to use for a connection.
  1170. std::size_t next_;
  1171. };
  1172. template<class derived_t, class args_t = void>
  1173. class iopool_cp
  1174. {
  1175. public:
  1176. template<class T>
  1177. explicit iopool_cp(T&& v) : next_(0)
  1178. {
  1179. using type = typename detail::remove_cvref_t<T>;
  1180. if /**/ constexpr (std::is_integral_v<type>)
  1181. {
  1182. this->iopool_ = std::make_unique<default_iopool>(v);
  1183. #if defined(_DEBUG) || defined(DEBUG)
  1184. derived_t& derive = static_cast<derived_t&>(*this);
  1185. static_cast<default_iopool*>(this->iopool_.get())->impl_.derive_pointer_ =
  1186. [&derive]() { detail::ignore_unused(derive); };
  1187. #endif
  1188. }
  1189. else
  1190. {
  1191. this->iopool_ = std::make_unique<user_iopool>(this->to_iots(std::forward<T>(v)));
  1192. }
  1193. for (std::size_t i = 0, size = iopool_->size(); i < size; ++i)
  1194. {
  1195. iots_.emplace_back(iopool_->get(i));
  1196. }
  1197. }
  1198. ~iopool_cp() = default;
  1199. /**
  1200. * The wait_stop() function blocks until the stop() function has been called.
  1201. */
  1202. void wait_stop()
  1203. {
  1204. if (this->iopool().running_in_threads())
  1205. {
  1206. set_last_error(asio::error::operation_not_supported);
  1207. return;
  1208. }
  1209. clear_last_error();
  1210. derived_t& derive = static_cast<derived_t&>(*this);
  1211. std::promise<error_code> promise;
  1212. std::future<error_code> future = promise.get_future();
  1213. std::shared_ptr<io_t>& iot = this->iots_[0];
  1214. // We must use asio::post to ensure the wait_stop_timer_ is read write in the
  1215. // same thread.
  1216. asio::post(iot->context(), [this, iot, this_ptr = derive.selfptr(), promise = std::move(promise)]
  1217. () mutable
  1218. {
  1219. if (this->wait_stop_timer_)
  1220. {
  1221. iot->timers().erase(this->wait_stop_timer_.get());
  1222. detail::cancel_timer(*(this->wait_stop_timer_));
  1223. }
  1224. this->wait_stop_timer_ = std::make_unique<asio::steady_timer>(iot->context());
  1225. iot->timers().emplace(this->wait_stop_timer_.get());
  1226. this->wait_stop_timer_->expires_after((std::chrono::nanoseconds::max)());
  1227. this->wait_stop_timer_->async_wait(
  1228. [this_ptr = std::move(this_ptr), promise = std::move(promise)]
  1229. (const error_code&) mutable
  1230. {
  1231. detail::ignore_unused(this_ptr);
  1232. promise.set_value(error_code{});
  1233. });
  1234. });
  1235. set_last_error(future.get());
  1236. }
  1237. /**
  1238. * The wait_for() function blocks until the specified duration has elapsed.
  1239. *
  1240. * @param rel_time - The duration for which the call may block.
  1241. */
  1242. template <typename Rep, typename Period>
  1243. void wait_for(const std::chrono::duration<Rep, Period>& rel_time)
  1244. {
  1245. if (this->iopool().running_in_threads())
  1246. {
  1247. set_last_error(asio::error::operation_not_supported);
  1248. return;
  1249. }
  1250. clear_last_error();
  1251. asio::steady_timer timer(this->iots_[0]->context());
  1252. timer.expires_after(rel_time);
  1253. timer.wait(get_last_error());
  1254. }
  1255. /**
  1256. * The wait_until() function blocks until the specified time has been reached.
  1257. *
  1258. * @param abs_time - The time point until which the call may block.
  1259. */
  1260. template <typename Clock, typename Duration>
  1261. void wait_until(const std::chrono::time_point<Clock, Duration>& abs_time)
  1262. {
  1263. if (this->iopool().running_in_threads())
  1264. {
  1265. set_last_error(asio::error::operation_not_supported);
  1266. return;
  1267. }
  1268. clear_last_error();
  1269. asio::steady_timer timer(this->iots_[0]->context());
  1270. timer.expires_at(abs_time);
  1271. timer.wait(get_last_error());
  1272. }
  1273. /**
  1274. * The wait_signal() function blocks util some signal delivered.
  1275. *
  1276. * @return The delivered signal number. Maybe invalid value when some exception occured.
  1277. */
  1278. template <class... Ints>
  1279. int wait_signal(Ints... signal_number)
  1280. {
  1281. if (this->iopool().running_in_threads())
  1282. {
  1283. set_last_error(asio::error::operation_not_supported);
  1284. return 0;
  1285. }
  1286. clear_last_error();
  1287. // note: The variable name signals will conflict with the macro signals of qt
  1288. asio::signal_set signalset(this->iots_[0]->context());
  1289. (signalset.add(signal_number), ...);
  1290. std::promise<int> promise;
  1291. std::future<int> future = promise.get_future();
  1292. signalset.async_wait([&](const error_code& /*ec*/, int signo)
  1293. {
  1294. promise.set_value(signo);
  1295. });
  1296. return future.get();
  1297. }
  1298. /**
  1299. * Get the iopool_base interface reference.
  1300. */
  1301. inline iopool_base& iopool() noexcept { return (*(this->iopool_)); }
  1302. /**
  1303. * Get the iopool_base interface reference.
  1304. */
  1305. inline iopool_base const& iopool() const noexcept { return (*(this->iopool_)); }
  1306. protected:
  1307. inline std::shared_ptr<io_t> _get_io(std::size_t index = static_cast<std::size_t>(-1)) noexcept
  1308. {
  1309. ASIO2_ASSERT(!iots_.empty());
  1310. std::size_t n = (index == static_cast<std::size_t>(-1) ?
  1311. ((++(this->next_)) % this->iots_.size()) : (index % this->iots_.size()));
  1312. return this->iots_[n];
  1313. }
  1314. inline bool is_iopool_started() const noexcept
  1315. {
  1316. return this->iopool_->started();
  1317. }
  1318. inline bool is_iopool_stopped() const noexcept
  1319. {
  1320. return this->iopool_->stopped();
  1321. }
  1322. inline bool start_iopool()
  1323. {
  1324. bool ret = this->iopool_->start();
  1325. // if the io_context is customed that passed by the user, then when the server
  1326. // accepted a new session, then the session's fire init will be called, but at
  1327. // this time, the session io_t's thread id is not inited, if use call the thread
  1328. // id function in the fire init callback, it will be failed, so we do all ios
  1329. // init thread at here first.
  1330. if (ret)
  1331. {
  1332. for (std::shared_ptr<io_t>& iot : this->iots_)
  1333. {
  1334. asio::dispatch(iot->context(), [iot]() mutable
  1335. {
  1336. iot->init_thread_id();
  1337. });
  1338. }
  1339. }
  1340. return ret;
  1341. }
  1342. inline void stop_iopool()
  1343. {
  1344. if (this->is_iopool_stopped())
  1345. return;
  1346. derived_t& derive = static_cast<derived_t&>(*this);
  1347. std::shared_ptr<io_t>& iot = this->iots_[0];
  1348. // if the server's or client's iopool is user_iopool, and when the server.stop
  1349. // or client.stop is called, we need notify the timer to cancel for the function
  1350. // wait_stop, otherwise the wait_stop function will blocked forever.
  1351. // We must use asio::post to ensure the wait_stop_timer_ is read write in the
  1352. // same thread.
  1353. asio::post(iot->context(), [this, iot, this_ptr = derive.selfptr()]() mutable
  1354. {
  1355. detail::ignore_unused(this_ptr);
  1356. if (this->wait_stop_timer_)
  1357. {
  1358. iot->timers().erase(this->wait_stop_timer_.get());
  1359. detail::cancel_timer(*(this->wait_stop_timer_));
  1360. }
  1361. });
  1362. this->iopool_->stop();
  1363. }
  1364. /**
  1365. * @brief destroy the content of all member variables, this is used for solve the memory leaks.
  1366. * After this function is called, this class object cannot be used again.
  1367. */
  1368. void destroy_iopool() noexcept
  1369. {
  1370. this->iots_.clear();
  1371. this->wait_stop_timer_.reset();
  1372. this->iopool_->destroy();
  1373. // cant reset the iopool pointer to nullptr, beacuse the derived class destructor will call
  1374. // the iopool.is_stopped() functions.
  1375. //this->iopool_.reset();
  1376. }
  1377. protected:
  1378. template<class T>
  1379. std::vector<std::shared_ptr<io_t>> to_iots(T&& v)
  1380. {
  1381. using type = typename detail::remove_cvref_t<T>;
  1382. std::vector<std::shared_ptr<io_t>> iots{};
  1383. if constexpr (std::is_same_v<type, detail::iopool>)
  1384. {
  1385. ASIO2_ASSERT(v.size() && "The iopool is empty.");
  1386. for (std::size_t i = 0; i < v.size(); ++i)
  1387. {
  1388. iots.emplace_back(v.get(i));
  1389. }
  1390. }
  1391. else if constexpr (std::is_same_v<type, std::shared_ptr<asio::io_context>>)
  1392. {
  1393. ASIO2_ASSERT(v && "The io_context pointer is nullptr.");
  1394. iots.emplace_back(std::make_shared<io_t>(std::forward<T>(v)));
  1395. }
  1396. else if constexpr (std::is_same_v<type, asio::io_context*>)
  1397. {
  1398. ASIO2_ASSERT(v && "The io_context pointer is nullptr.");
  1399. std::shared_ptr<asio::io_context> iop(v, [](asio::io_context*) {});
  1400. iots.emplace_back(std::make_shared<io_t>(std::move(iop)));
  1401. }
  1402. else if constexpr (std::is_same_v<type, asio::io_context>)
  1403. {
  1404. static_assert(std::is_reference_v<std::remove_cv_t<T>>);
  1405. std::shared_ptr<asio::io_context> iop(std::addressof(v), [](asio::io_context*) {});
  1406. iots.emplace_back(std::make_shared<io_t>(std::move(iop)));
  1407. }
  1408. else if constexpr (std::is_same_v<type, std::shared_ptr<io_t>>)
  1409. {
  1410. ASIO2_ASSERT(v && "The io_t pointer is nullptr.");
  1411. iots.emplace_back(std::forward<T>(v));
  1412. }
  1413. else if constexpr (std::is_same_v<type, io_t*>)
  1414. {
  1415. ASIO2_ASSERT(v && "The io_t pointer is nullptr.");
  1416. iots.emplace_back(std::shared_ptr<io_t>(v, [](io_t*) {}));
  1417. }
  1418. else if constexpr (std::is_same_v<type, io_t>)
  1419. {
  1420. static_assert(std::is_reference_v<std::remove_cv_t<T>>);
  1421. iots.emplace_back(std::shared_ptr<io_t>(std::addressof(v), [](io_t*) {}));
  1422. }
  1423. else
  1424. {
  1425. // std::vector<...> std::list<...>
  1426. ASIO2_ASSERT(!v.empty() && "The container is empty.");
  1427. for (auto& e : v)
  1428. {
  1429. std::vector<std::shared_ptr<io_t>> tmps = this->to_iots(e);
  1430. for (std::shared_ptr<io_t>& iot : tmps)
  1431. {
  1432. iots.emplace_back(std::move(iot));
  1433. }
  1434. }
  1435. }
  1436. return iots;
  1437. }
  1438. protected:
  1439. /// the io_context pool for socket event
  1440. std::unique_ptr<iopool_base> iopool_;
  1441. /// Use a copy to avoid calling the virtual function "iopool_base::get"
  1442. std::vector<std::shared_ptr<io_t>> iots_;
  1443. /// The next io_context to use for a connection.
  1444. std::size_t next_;
  1445. /// the timer used for wait_stop function.
  1446. std::unique_ptr<asio::steady_timer> wait_stop_timer_;
  1447. };
  1448. }
  1449. namespace asio2
  1450. {
  1451. using io_t = detail::io_t;
  1452. using iopool = detail::iopool;
  1453. }
  1454. #endif // !__ASIO2_IOPOOL_HPP__