scheduler.ipp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. //
  2. // detail/impl/scheduler.ipp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2023 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP
  11. #define ASIO_DETAIL_IMPL_SCHEDULER_IPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include "asio/detail/config.hpp"
  16. #include "asio/detail/concurrency_hint.hpp"
  17. #include "asio/detail/event.hpp"
  18. #include "asio/detail/limits.hpp"
  19. #include "asio/detail/scheduler.hpp"
  20. #include "asio/detail/scheduler_thread_info.hpp"
  21. #include "asio/detail/signal_blocker.hpp"
  22. #if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
  23. # include "asio/detail/io_uring_service.hpp"
  24. #else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
  25. # include "asio/detail/reactor.hpp"
  26. #endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
  27. #include "asio/detail/push_options.hpp"
  28. namespace asio {
  29. namespace detail {
  30. class scheduler::thread_function
  31. {
  32. public:
  33. explicit thread_function(scheduler* s)
  34. : this_(s)
  35. {
  36. }
  37. void operator()()
  38. {
  39. asio::error_code ec;
  40. this_->run(ec);
  41. }
  42. private:
  43. scheduler* this_;
  44. };
  45. struct scheduler::task_cleanup
  46. {
  47. ~task_cleanup()
  48. {
  49. if (this_thread_->private_outstanding_work > 0)
  50. {
  51. asio::detail::increment(
  52. scheduler_->outstanding_work_,
  53. this_thread_->private_outstanding_work);
  54. }
  55. this_thread_->private_outstanding_work = 0;
  56. // Enqueue the completed operations and reinsert the task at the end of
  57. // the operation queue.
  58. lock_->lock();
  59. scheduler_->task_interrupted_ = true;
  60. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  61. scheduler_->op_queue_.push(&scheduler_->task_operation_);
  62. }
  63. scheduler* scheduler_;
  64. mutex::scoped_lock* lock_;
  65. thread_info* this_thread_;
  66. };
  67. struct scheduler::work_cleanup
  68. {
  69. ~work_cleanup()
  70. {
  71. if (this_thread_->private_outstanding_work > 1)
  72. {
  73. asio::detail::increment(
  74. scheduler_->outstanding_work_,
  75. this_thread_->private_outstanding_work - 1);
  76. }
  77. else if (this_thread_->private_outstanding_work < 1)
  78. {
  79. scheduler_->work_finished();
  80. }
  81. this_thread_->private_outstanding_work = 0;
  82. #if defined(ASIO_HAS_THREADS)
  83. if (!this_thread_->private_op_queue.empty())
  84. {
  85. lock_->lock();
  86. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  87. }
  88. #endif // defined(ASIO_HAS_THREADS)
  89. }
  90. scheduler* scheduler_;
  91. mutex::scoped_lock* lock_;
  92. thread_info* this_thread_;
  93. };
  94. scheduler::scheduler(asio::execution_context& ctx,
  95. int concurrency_hint, bool own_thread, get_task_func_type get_task)
  96. : asio::detail::execution_context_service_base<scheduler>(ctx),
  97. one_thread_(concurrency_hint == 1
  98. || !ASIO_CONCURRENCY_HINT_IS_LOCKING(
  99. SCHEDULER, concurrency_hint)
  100. || !ASIO_CONCURRENCY_HINT_IS_LOCKING(
  101. REACTOR_IO, concurrency_hint)),
  102. mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
  103. SCHEDULER, concurrency_hint)),
  104. task_(0),
  105. get_task_(get_task),
  106. task_interrupted_(true),
  107. outstanding_work_(0),
  108. stopped_(false),
  109. shutdown_(false),
  110. concurrency_hint_(concurrency_hint),
  111. thread_(0)
  112. {
  113. ASIO_HANDLER_TRACKING_INIT;
  114. if (own_thread)
  115. {
  116. ++outstanding_work_;
  117. asio::detail::signal_blocker sb;
  118. thread_ = new asio::detail::thread(thread_function(this));
  119. }
  120. }
  121. scheduler::~scheduler()
  122. {
  123. if (thread_)
  124. {
  125. mutex::scoped_lock lock(mutex_);
  126. shutdown_ = true;
  127. stop_all_threads(lock);
  128. lock.unlock();
  129. thread_->join();
  130. delete thread_;
  131. }
  132. }
  133. void scheduler::shutdown()
  134. {
  135. mutex::scoped_lock lock(mutex_);
  136. shutdown_ = true;
  137. if (thread_)
  138. stop_all_threads(lock);
  139. lock.unlock();
  140. // Join thread to ensure task operation is returned to queue.
  141. if (thread_)
  142. {
  143. thread_->join();
  144. delete thread_;
  145. thread_ = 0;
  146. }
  147. // Destroy handler objects.
  148. while (!op_queue_.empty())
  149. {
  150. operation* o = op_queue_.front();
  151. op_queue_.pop();
  152. if (o != &task_operation_)
  153. o->destroy();
  154. }
  155. // Reset to initial state.
  156. task_ = 0;
  157. }
  158. void scheduler::init_task()
  159. {
  160. mutex::scoped_lock lock(mutex_);
  161. if (!shutdown_ && !task_)
  162. {
  163. task_ = get_task_(this->context());
  164. op_queue_.push(&task_operation_);
  165. wake_one_thread_and_unlock(lock);
  166. }
  167. }
  168. std::size_t scheduler::run(asio::error_code& ec)
  169. {
  170. ec = asio::error_code();
  171. if (outstanding_work_ == 0)
  172. {
  173. stop();
  174. return 0;
  175. }
  176. thread_info this_thread;
  177. this_thread.private_outstanding_work = 0;
  178. thread_call_stack::context ctx(this, this_thread);
  179. mutex::scoped_lock lock(mutex_);
  180. std::size_t n = 0;
  181. for (; do_run_one(lock, this_thread, ec); lock.lock())
  182. if (n != (std::numeric_limits<std::size_t>::max)())
  183. ++n;
  184. return n;
  185. }
  186. std::size_t scheduler::run_one(asio::error_code& ec)
  187. {
  188. ec = asio::error_code();
  189. if (outstanding_work_ == 0)
  190. {
  191. stop();
  192. return 0;
  193. }
  194. thread_info this_thread;
  195. this_thread.private_outstanding_work = 0;
  196. thread_call_stack::context ctx(this, this_thread);
  197. mutex::scoped_lock lock(mutex_);
  198. return do_run_one(lock, this_thread, ec);
  199. }
  200. std::size_t scheduler::wait_one(long usec, asio::error_code& ec)
  201. {
  202. ec = asio::error_code();
  203. if (outstanding_work_ == 0)
  204. {
  205. stop();
  206. return 0;
  207. }
  208. thread_info this_thread;
  209. this_thread.private_outstanding_work = 0;
  210. thread_call_stack::context ctx(this, this_thread);
  211. mutex::scoped_lock lock(mutex_);
  212. return do_wait_one(lock, this_thread, usec, ec);
  213. }
  214. std::size_t scheduler::poll(asio::error_code& ec)
  215. {
  216. ec = asio::error_code();
  217. if (outstanding_work_ == 0)
  218. {
  219. stop();
  220. return 0;
  221. }
  222. thread_info this_thread;
  223. this_thread.private_outstanding_work = 0;
  224. thread_call_stack::context ctx(this, this_thread);
  225. mutex::scoped_lock lock(mutex_);
  226. #if defined(ASIO_HAS_THREADS)
  227. // We want to support nested calls to poll() and poll_one(), so any handlers
  228. // that are already on a thread-private queue need to be put on to the main
  229. // queue now.
  230. if (one_thread_)
  231. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  232. op_queue_.push(outer_info->private_op_queue);
  233. #endif // defined(ASIO_HAS_THREADS)
  234. std::size_t n = 0;
  235. for (; do_poll_one(lock, this_thread, ec); lock.lock())
  236. if (n != (std::numeric_limits<std::size_t>::max)())
  237. ++n;
  238. return n;
  239. }
  240. std::size_t scheduler::poll_one(asio::error_code& ec)
  241. {
  242. ec = asio::error_code();
  243. if (outstanding_work_ == 0)
  244. {
  245. stop();
  246. return 0;
  247. }
  248. thread_info this_thread;
  249. this_thread.private_outstanding_work = 0;
  250. thread_call_stack::context ctx(this, this_thread);
  251. mutex::scoped_lock lock(mutex_);
  252. #if defined(ASIO_HAS_THREADS)
  253. // We want to support nested calls to poll() and poll_one(), so any handlers
  254. // that are already on a thread-private queue need to be put on to the main
  255. // queue now.
  256. if (one_thread_)
  257. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  258. op_queue_.push(outer_info->private_op_queue);
  259. #endif // defined(ASIO_HAS_THREADS)
  260. return do_poll_one(lock, this_thread, ec);
  261. }
  262. void scheduler::stop()
  263. {
  264. mutex::scoped_lock lock(mutex_);
  265. stop_all_threads(lock);
  266. }
  267. bool scheduler::stopped() const
  268. {
  269. mutex::scoped_lock lock(mutex_);
  270. return stopped_;
  271. }
  272. void scheduler::restart()
  273. {
  274. mutex::scoped_lock lock(mutex_);
  275. stopped_ = false;
  276. }
  277. void scheduler::compensating_work_started()
  278. {
  279. thread_info_base* this_thread = thread_call_stack::contains(this);
  280. ASIO_ASSUME(this_thread != 0); // Only called from inside scheduler.
  281. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  282. }
  283. bool scheduler::can_dispatch()
  284. {
  285. return thread_call_stack::contains(this) != 0;
  286. }
  287. void scheduler::capture_current_exception()
  288. {
  289. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  290. this_thread->capture_current_exception();
  291. }
  292. void scheduler::post_immediate_completion(
  293. scheduler::operation* op, bool is_continuation)
  294. {
  295. #if defined(ASIO_HAS_THREADS)
  296. if (one_thread_ || is_continuation)
  297. {
  298. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  299. {
  300. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  301. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  302. return;
  303. }
  304. }
  305. #else // defined(ASIO_HAS_THREADS)
  306. (void)is_continuation;
  307. #endif // defined(ASIO_HAS_THREADS)
  308. work_started();
  309. mutex::scoped_lock lock(mutex_);
  310. op_queue_.push(op);
  311. wake_one_thread_and_unlock(lock);
  312. }
  313. void scheduler::post_immediate_completions(std::size_t n,
  314. op_queue<scheduler::operation>& ops, bool is_continuation)
  315. {
  316. #if defined(ASIO_HAS_THREADS)
  317. if (one_thread_ || is_continuation)
  318. {
  319. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  320. {
  321. static_cast<thread_info*>(this_thread)->private_outstanding_work
  322. += static_cast<long>(n);
  323. static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
  324. return;
  325. }
  326. }
  327. #else // defined(ASIO_HAS_THREADS)
  328. (void)is_continuation;
  329. #endif // defined(ASIO_HAS_THREADS)
  330. increment(outstanding_work_, static_cast<long>(n));
  331. mutex::scoped_lock lock(mutex_);
  332. op_queue_.push(ops);
  333. wake_one_thread_and_unlock(lock);
  334. }
  335. void scheduler::post_deferred_completion(scheduler::operation* op)
  336. {
  337. #if defined(ASIO_HAS_THREADS)
  338. if (one_thread_)
  339. {
  340. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  341. {
  342. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  343. return;
  344. }
  345. }
  346. #endif // defined(ASIO_HAS_THREADS)
  347. mutex::scoped_lock lock(mutex_);
  348. op_queue_.push(op);
  349. wake_one_thread_and_unlock(lock);
  350. }
  351. void scheduler::post_deferred_completions(
  352. op_queue<scheduler::operation>& ops)
  353. {
  354. if (!ops.empty())
  355. {
  356. #if defined(ASIO_HAS_THREADS)
  357. if (one_thread_)
  358. {
  359. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  360. {
  361. static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
  362. return;
  363. }
  364. }
  365. #endif // defined(ASIO_HAS_THREADS)
  366. mutex::scoped_lock lock(mutex_);
  367. op_queue_.push(ops);
  368. wake_one_thread_and_unlock(lock);
  369. }
  370. }
  371. void scheduler::do_dispatch(
  372. scheduler::operation* op)
  373. {
  374. work_started();
  375. mutex::scoped_lock lock(mutex_);
  376. op_queue_.push(op);
  377. wake_one_thread_and_unlock(lock);
  378. }
  379. void scheduler::abandon_operations(
  380. op_queue<scheduler::operation>& ops)
  381. {
  382. op_queue<scheduler::operation> ops2;
  383. ops2.push(ops);
  384. }
  385. std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
  386. scheduler::thread_info& this_thread,
  387. const asio::error_code& ec)
  388. {
  389. while (!stopped_)
  390. {
  391. if (!op_queue_.empty())
  392. {
  393. // Prepare to execute first handler from queue.
  394. operation* o = op_queue_.front();
  395. op_queue_.pop();
  396. bool more_handlers = (!op_queue_.empty());
  397. if (o == &task_operation_)
  398. {
  399. task_interrupted_ = more_handlers;
  400. if (more_handlers && !one_thread_)
  401. wakeup_event_.unlock_and_signal_one(lock);
  402. else
  403. lock.unlock();
  404. task_cleanup on_exit = { this, &lock, &this_thread };
  405. (void)on_exit;
  406. // Run the task. May throw an exception. Only block if the operation
  407. // queue is empty and we're not polling, otherwise we want to return
  408. // as soon as possible.
  409. task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
  410. }
  411. else
  412. {
  413. std::size_t task_result = o->task_result_;
  414. if (more_handlers && !one_thread_)
  415. wake_one_thread_and_unlock(lock);
  416. else
  417. lock.unlock();
  418. // Ensure the count of outstanding work is decremented on block exit.
  419. work_cleanup on_exit = { this, &lock, &this_thread };
  420. (void)on_exit;
  421. // Complete the operation. May throw an exception. Deletes the object.
  422. o->complete(this, ec, task_result);
  423. this_thread.rethrow_pending_exception();
  424. return 1;
  425. }
  426. }
  427. else
  428. {
  429. wakeup_event_.clear(lock);
  430. wakeup_event_.wait(lock);
  431. }
  432. }
  433. return 0;
  434. }
  435. std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
  436. scheduler::thread_info& this_thread, long usec,
  437. const asio::error_code& ec)
  438. {
  439. if (stopped_)
  440. return 0;
  441. operation* o = op_queue_.front();
  442. if (o == 0)
  443. {
  444. wakeup_event_.clear(lock);
  445. wakeup_event_.wait_for_usec(lock, usec);
  446. usec = 0; // Wait at most once.
  447. o = op_queue_.front();
  448. }
  449. if (o == &task_operation_)
  450. {
  451. op_queue_.pop();
  452. bool more_handlers = (!op_queue_.empty());
  453. task_interrupted_ = more_handlers;
  454. if (more_handlers && !one_thread_)
  455. wakeup_event_.unlock_and_signal_one(lock);
  456. else
  457. lock.unlock();
  458. {
  459. task_cleanup on_exit = { this, &lock, &this_thread };
  460. (void)on_exit;
  461. // Run the task. May throw an exception. Only block if the operation
  462. // queue is empty and we're not polling, otherwise we want to return
  463. // as soon as possible.
  464. task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
  465. }
  466. o = op_queue_.front();
  467. if (o == &task_operation_)
  468. {
  469. if (!one_thread_)
  470. wakeup_event_.maybe_unlock_and_signal_one(lock);
  471. return 0;
  472. }
  473. }
  474. if (o == 0)
  475. return 0;
  476. op_queue_.pop();
  477. bool more_handlers = (!op_queue_.empty());
  478. std::size_t task_result = o->task_result_;
  479. if (more_handlers && !one_thread_)
  480. wake_one_thread_and_unlock(lock);
  481. else
  482. lock.unlock();
  483. // Ensure the count of outstanding work is decremented on block exit.
  484. work_cleanup on_exit = { this, &lock, &this_thread };
  485. (void)on_exit;
  486. // Complete the operation. May throw an exception. Deletes the object.
  487. o->complete(this, ec, task_result);
  488. this_thread.rethrow_pending_exception();
  489. return 1;
  490. }
  491. std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
  492. scheduler::thread_info& this_thread,
  493. const asio::error_code& ec)
  494. {
  495. if (stopped_)
  496. return 0;
  497. operation* o = op_queue_.front();
  498. if (o == &task_operation_)
  499. {
  500. op_queue_.pop();
  501. lock.unlock();
  502. {
  503. task_cleanup c = { this, &lock, &this_thread };
  504. (void)c;
  505. // Run the task. May throw an exception. Only block if the operation
  506. // queue is empty and we're not polling, otherwise we want to return
  507. // as soon as possible.
  508. task_->run(0, this_thread.private_op_queue);
  509. }
  510. o = op_queue_.front();
  511. if (o == &task_operation_)
  512. {
  513. wakeup_event_.maybe_unlock_and_signal_one(lock);
  514. return 0;
  515. }
  516. }
  517. if (o == 0)
  518. return 0;
  519. op_queue_.pop();
  520. bool more_handlers = (!op_queue_.empty());
  521. std::size_t task_result = o->task_result_;
  522. if (more_handlers && !one_thread_)
  523. wake_one_thread_and_unlock(lock);
  524. else
  525. lock.unlock();
  526. // Ensure the count of outstanding work is decremented on block exit.
  527. work_cleanup on_exit = { this, &lock, &this_thread };
  528. (void)on_exit;
  529. // Complete the operation. May throw an exception. Deletes the object.
  530. o->complete(this, ec, task_result);
  531. this_thread.rethrow_pending_exception();
  532. return 1;
  533. }
  534. void scheduler::stop_all_threads(
  535. mutex::scoped_lock& lock)
  536. {
  537. stopped_ = true;
  538. wakeup_event_.signal_all(lock);
  539. if (!task_interrupted_ && task_)
  540. {
  541. task_interrupted_ = true;
  542. task_->interrupt();
  543. }
  544. }
  545. void scheduler::wake_one_thread_and_unlock(
  546. mutex::scoped_lock& lock)
  547. {
  548. if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
  549. {
  550. if (!task_interrupted_ && task_)
  551. {
  552. task_interrupted_ = true;
  553. task_->interrupt();
  554. }
  555. lock.unlock();
  556. }
  557. }
  558. scheduler_task* scheduler::get_default_task(asio::execution_context& ctx)
  559. {
  560. #if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
  561. return &use_service<io_uring_service>(ctx);
  562. #else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
  563. return &use_service<reactor>(ctx);
  564. #endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
  565. }
  566. } // namespace detail
  567. } // namespace asio
  568. #include "asio/detail/pop_options.hpp"
  569. #endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP