gather.hpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. //
  2. // Copyright (c) 2022 Klemens Morgenstern (klemens.morgenstern@gmx.net)
  3. //
  4. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  5. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. #ifndef BOOST_COBALT_DETAIL_GATHER_HPP
  8. #define BOOST_COBALT_DETAIL_GATHER_HPP
  9. #include <boost/cobalt/detail/await_result_helper.hpp>
  10. #include <boost/cobalt/detail/exception.hpp>
  11. #include <boost/cobalt/detail/fork.hpp>
  12. #include <boost/cobalt/detail/forward_cancellation.hpp>
  13. #include <boost/cobalt/detail/util.hpp>
  14. #include <boost/cobalt/detail/wrapper.hpp>
  15. #include <boost/cobalt/task.hpp>
  16. #include <boost/cobalt/this_thread.hpp>
  17. #include <boost/asio/associated_cancellation_slot.hpp>
  18. #include <boost/asio/bind_cancellation_slot.hpp>
  19. #include <boost/asio/cancellation_signal.hpp>
  20. #include <boost/core/ignore_unused.hpp>
  21. #include <boost/intrusive_ptr.hpp>
  22. #include <boost/system/result.hpp>
  23. #include <boost/variant2/variant.hpp>
  24. #include <array>
  25. #include <coroutine>
  26. #include <algorithm>
  27. namespace boost::cobalt::detail
  28. {
  29. template<typename ... Args>
  30. struct gather_variadic_impl
  31. {
  32. using tuple_type = std::tuple<decltype(get_awaitable_type(std::declval<Args&&>()))...>;
  33. gather_variadic_impl(Args && ... args)
  34. : args{std::forward<Args>(args)...}
  35. {
  36. }
  37. std::tuple<Args...> args;
  38. constexpr static std::size_t tuple_size = sizeof...(Args);
  39. struct awaitable : fork::static_shared_state<256 * tuple_size>
  40. {
  41. template<std::size_t ... Idx>
  42. awaitable(std::tuple<Args...> & args, std::index_sequence<Idx...>)
  43. : aws(awaitable_type_getter<Args>(std::get<Idx>(args))...)
  44. {
  45. }
  46. tuple_type aws;
  47. std::array<asio::cancellation_signal, tuple_size> cancel;
  48. template<typename T>
  49. using result_store_part = variant2::variant<
  50. variant2::monostate,
  51. void_as_monostate<co_await_result_t<T>>,
  52. std::exception_ptr>;
  53. std::tuple<result_store_part<Args>...> result;
  54. template<std::size_t Idx>
  55. void interrupt_await_step()
  56. {
  57. using type= std::tuple_element_t<Idx, std::tuple<Args...>>;
  58. using t = std::conditional_t<
  59. std::is_reference_v<std::tuple_element_t<Idx, decltype(aws)>>,
  60. co_awaitable_type<type> &,
  61. co_awaitable_type<type> &&>;
  62. if constexpr (interruptible<t>)
  63. static_cast<t>(std::get<Idx>(aws)).interrupt_await();
  64. }
  65. void interrupt_await()
  66. {
  67. mp11::mp_for_each<mp11::mp_iota_c<sizeof...(Args)>>
  68. ([&](auto idx)
  69. {
  70. interrupt_await_step<idx>();
  71. });
  72. }
  73. // GCC doesn't like member funs
  74. template<std::size_t Idx>
  75. static detail::fork await_impl(awaitable & this_)
  76. BOOST_TRY
  77. {
  78. auto & aw = std::get<Idx>(this_.aws);
  79. // check manually if we're ready
  80. auto rd = aw.await_ready();
  81. if (!rd)
  82. {
  83. co_await this_.cancel[Idx].slot();
  84. // make sure the executor is set
  85. co_await detail::fork::wired_up;
  86. // do the await - this doesn't call await-ready again
  87. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  88. {
  89. co_await aw;
  90. std::get<Idx>(this_.result).template emplace<1u>();
  91. }
  92. else
  93. std::get<Idx>(this_.result).template emplace<1u>(co_await aw);
  94. }
  95. else
  96. {
  97. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  98. {
  99. aw.await_resume();
  100. std::get<Idx>(this_.result).template emplace<1u>();
  101. }
  102. else
  103. std::get<Idx>(this_.result).template emplace<1u>(aw.await_resume());
  104. }
  105. }
  106. BOOST_CATCH(...)
  107. {
  108. std::get<Idx>(this_.result).template emplace<2u>(std::current_exception());
  109. }
  110. BOOST_CATCH_END
  111. std::array<detail::fork(*)(awaitable&), tuple_size> impls {
  112. []<std::size_t ... Idx>(std::index_sequence<Idx...>)
  113. {
  114. return std::array<detail::fork(*)(awaitable&), tuple_size>{&await_impl<Idx>...};
  115. }(std::make_index_sequence<tuple_size>{})
  116. };
  117. detail::fork last_forked;
  118. std::size_t last_index = 0u;
  119. bool await_ready()
  120. {
  121. while (last_index < tuple_size)
  122. {
  123. last_forked = impls[last_index++](*this);
  124. if (!last_forked.done())
  125. return false; // one coro didn't immediately complete!
  126. }
  127. last_forked.release();
  128. return true;
  129. }
  130. template<typename H>
  131. auto await_suspend(
  132. std::coroutine_handle<H> h
  133. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  134. , const boost::source_location & loc = BOOST_CURRENT_LOCATION
  135. #endif
  136. )
  137. {
  138. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  139. this->loc = loc;
  140. #endif
  141. this->exec = &cobalt::detail::get_executor(h);
  142. last_forked.release().resume();
  143. while (last_index < tuple_size)
  144. impls[last_index++](*this).release();
  145. if (!this->outstanding_work()) // already done, resume rightaway.
  146. return false;
  147. // arm the cancel
  148. assign_cancellation(
  149. h,
  150. [&](asio::cancellation_type ct)
  151. {
  152. for (auto & cs : cancel)
  153. cs.emit(ct);
  154. });
  155. this->coro.reset(h.address());
  156. return true;
  157. }
  158. template<typename T>
  159. using result_part = system::result<co_await_result_t<T>, std::exception_ptr>;
  160. #if _MSC_VER
  161. BOOST_NOINLINE
  162. #endif
  163. std::tuple<result_part<Args> ...> await_resume()
  164. {
  165. return mp11::tuple_transform(
  166. []<typename T>(variant2::variant<variant2::monostate, T, std::exception_ptr> & var)
  167. -> system::result<monostate_as_void<T>, std::exception_ptr>
  168. {
  169. BOOST_ASSERT(var.index() != 0u);
  170. if (var.index() == 1u)
  171. {
  172. if constexpr (std::is_same_v<T, variant2::monostate>)
  173. return {system::in_place_value};
  174. else
  175. return {system::in_place_value, std::move(get<1>(var))};
  176. }
  177. else
  178. return {system::in_place_error, std::move(get<2>(var))};
  179. }
  180. , result);
  181. }
  182. };
  183. awaitable operator co_await() &&
  184. {
  185. return awaitable(args, std::make_index_sequence<sizeof...(Args)>{});
  186. }
  187. };
  188. template<typename Range>
  189. struct gather_ranged_impl
  190. {
  191. Range aws;
  192. using result_type = system::result<
  193. co_await_result_t<std::decay_t<decltype(*std::begin(std::declval<Range>()))>>,
  194. std::exception_ptr>;
  195. using result_storage_type = variant2::variant<
  196. variant2::monostate,
  197. void_as_monostate<
  198. co_await_result_t<std::decay_t<decltype(*std::begin(std::declval<Range>()))>>
  199. >,
  200. std::exception_ptr>;
  201. struct awaitable : fork::shared_state
  202. {
  203. using type = std::decay_t<decltype(*std::begin(std::declval<Range>()))>;
  204. #if !defined(BOOST_COBALT_NO_PMR)
  205. pmr::polymorphic_allocator<void> alloc{&resource};
  206. std::conditional_t<awaitable_type<type>, Range &,
  207. pmr::vector<co_awaitable_type<type>>> aws;
  208. pmr::vector<bool> ready{std::size(aws), alloc};
  209. pmr::vector<asio::cancellation_signal> cancel{std::size(aws), alloc};
  210. pmr::vector<result_storage_type> result{cancel.size(), alloc};
  211. #else
  212. std::allocator<void> alloc{};
  213. std::conditional_t<awaitable_type<type>, Range &,
  214. std::vector<co_awaitable_type<type>>> aws;
  215. std::vector<bool> ready{std::size(aws), alloc};
  216. std::vector<asio::cancellation_signal> cancel{std::size(aws), alloc};
  217. std::vector<result_storage_type> result{cancel.size(), alloc};
  218. #endif
  219. awaitable(Range & aws_, std::false_type /* needs operator co_await */)
  220. : fork::shared_state((512 + sizeof(co_awaitable_type<type>)) * std::size(aws_))
  221. , aws{alloc}
  222. , ready{std::size(aws_), alloc}
  223. , cancel{std::size(aws_), alloc}
  224. {
  225. aws.reserve(std::size(aws_));
  226. for (auto && a : aws_)
  227. {
  228. using a_0 = std::decay_t<decltype(a)>;
  229. using a_t = std::conditional_t<
  230. std::is_lvalue_reference_v<Range>, a_0 &, a_0 &&>;
  231. aws.emplace_back(awaitable_type_getter<a_t>(static_cast<a_t>(a)));
  232. }
  233. std::transform(std::begin(this->aws),
  234. std::end(this->aws),
  235. std::begin(ready),
  236. [](auto & aw) {return aw.await_ready();});
  237. }
  238. awaitable(Range & aws, std::true_type /* needs operator co_await */)
  239. : fork::shared_state((512 + sizeof(co_awaitable_type<type>)) * std::size(aws))
  240. , aws(aws)
  241. {
  242. std::transform(std::begin(aws), std::end(aws), std::begin(ready), [](auto & aw) {return aw.await_ready();});
  243. }
  244. awaitable(Range & aws)
  245. : awaitable(aws, std::bool_constant<awaitable_type<type>>{})
  246. {
  247. }
  248. void interrupt_await()
  249. {
  250. using t = std::conditional_t<std::is_reference_v<Range>,
  251. co_awaitable_type<type> &,
  252. co_awaitable_type<type> &&>;
  253. if constexpr (interruptible<t>)
  254. for (auto & aw : aws)
  255. static_cast<t>(aw).interrupt_await();
  256. }
  257. static detail::fork await_impl(awaitable & this_, std::size_t idx)
  258. BOOST_TRY
  259. {
  260. auto & aw = *std::next(std::begin(this_.aws), idx);
  261. auto rd = aw.await_ready();
  262. if (!rd)
  263. {
  264. co_await this_.cancel[idx].slot();
  265. co_await detail::fork::wired_up;
  266. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  267. {
  268. co_await aw;
  269. this_.result[idx].template emplace<1u>();
  270. }
  271. else
  272. this_.result[idx].template emplace<1u>(co_await aw);
  273. }
  274. else
  275. {
  276. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  277. {
  278. aw.await_resume();
  279. this_.result[idx].template emplace<1u>();
  280. }
  281. else
  282. this_.result[idx].template emplace<1u>(aw.await_resume());
  283. }
  284. }
  285. BOOST_CATCH(...)
  286. {
  287. this_.result[idx].template emplace<2u>(std::current_exception());
  288. }
  289. BOOST_CATCH_END
  290. detail::fork last_forked;
  291. std::size_t last_index = 0u;
  292. bool await_ready()
  293. {
  294. while (last_index < cancel.size())
  295. {
  296. last_forked = await_impl(*this, last_index++);
  297. if (!last_forked.done())
  298. return false; // one coro didn't immediately complete!
  299. }
  300. last_forked.release();
  301. return true;
  302. }
  303. template<typename H>
  304. auto await_suspend(
  305. std::coroutine_handle<H> h
  306. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  307. , const boost::source_location & loc = BOOST_CURRENT_LOCATION
  308. #endif
  309. )
  310. {
  311. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  312. this->loc = loc;
  313. #endif
  314. exec = &detail::get_executor(h);
  315. last_forked.release().resume();
  316. while (last_index < cancel.size())
  317. await_impl(*this, last_index++).release();
  318. if (!this->outstanding_work()) // already done, resume rightaway.
  319. return false;
  320. // arm the cancel
  321. assign_cancellation(
  322. h,
  323. [&](asio::cancellation_type ct)
  324. {
  325. for (auto & cs : cancel)
  326. cs.emit(ct);
  327. });
  328. this->coro.reset(h.address());
  329. return true;
  330. }
  331. #if _MSC_VER
  332. BOOST_NOINLINE
  333. #endif
  334. auto await_resume()
  335. {
  336. #if !defined(BOOST_COBALT_NO_PMR)
  337. pmr::vector<result_type> res{result.size(), this_thread::get_allocator()};
  338. #else
  339. std::vector<result_type> res(result.size());
  340. #endif
  341. std::transform(
  342. result.begin(), result.end(), res.begin(),
  343. [](result_storage_type & res) -> result_type
  344. {
  345. BOOST_ASSERT(res.index() != 0u);
  346. if (res.index() == 1u)
  347. {
  348. if constexpr (std::is_void_v<typename result_type::value_type>)
  349. return system::in_place_value;
  350. else
  351. return {system::in_place_value, std::move(get<1u>(res))};
  352. }
  353. else
  354. return {system::in_place_error, get<2u>(res)};
  355. });
  356. return res;
  357. }
  358. };
  359. awaitable operator co_await() && {return awaitable{aws};}
  360. };
  361. }
  362. #endif //BOOST_COBALT_DETAIL_GATHER_HPP