io_uring_descriptor_service.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. //
  2. // detail/io_uring_descriptor_service.hpp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2023 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef ASIO_DETAIL_IO_URING_DESCRIPTOR_SERVICE_HPP
  11. #define ASIO_DETAIL_IO_URING_DESCRIPTOR_SERVICE_HPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include "asio/detail/config.hpp"
  16. #if defined(ASIO_HAS_IO_URING)
  17. #include "asio/associated_cancellation_slot.hpp"
  18. #include "asio/buffer.hpp"
  19. #include "asio/cancellation_type.hpp"
  20. #include "asio/execution_context.hpp"
  21. #include "asio/detail/buffer_sequence_adapter.hpp"
  22. #include "asio/detail/descriptor_ops.hpp"
  23. #include "asio/detail/io_uring_descriptor_read_at_op.hpp"
  24. #include "asio/detail/io_uring_descriptor_read_op.hpp"
  25. #include "asio/detail/io_uring_descriptor_write_at_op.hpp"
  26. #include "asio/detail/io_uring_descriptor_write_op.hpp"
  27. #include "asio/detail/io_uring_null_buffers_op.hpp"
  28. #include "asio/detail/io_uring_service.hpp"
  29. #include "asio/detail/io_uring_wait_op.hpp"
  30. #include "asio/detail/memory.hpp"
  31. #include "asio/detail/noncopyable.hpp"
  32. #include "asio/posix/descriptor_base.hpp"
  33. #include "asio/detail/push_options.hpp"
  34. namespace asio {
  35. namespace detail {
  36. class io_uring_descriptor_service :
  37. public execution_context_service_base<io_uring_descriptor_service>
  38. {
  39. public:
  40. // The native type of a descriptor.
  41. typedef int native_handle_type;
  42. // The implementation type of the descriptor.
  43. class implementation_type
  44. : private asio::detail::noncopyable
  45. {
  46. public:
  47. // Default constructor.
  48. implementation_type()
  49. : descriptor_(-1),
  50. state_(0)
  51. {
  52. }
  53. private:
  54. // Only this service will have access to the internal values.
  55. friend class io_uring_descriptor_service;
  56. // The native descriptor representation.
  57. int descriptor_;
  58. // The current state of the descriptor.
  59. descriptor_ops::state_type state_;
  60. // Per I/O object data used by the io_uring_service.
  61. io_uring_service::per_io_object_data io_object_data_;
  62. };
  63. // Constructor.
  64. ASIO_DECL io_uring_descriptor_service(execution_context& context);
  65. // Destroy all user-defined handler objects owned by the service.
  66. ASIO_DECL void shutdown();
  67. // Construct a new descriptor implementation.
  68. ASIO_DECL void construct(implementation_type& impl);
  69. // Move-construct a new descriptor implementation.
  70. ASIO_DECL void move_construct(implementation_type& impl,
  71. implementation_type& other_impl) noexcept;
  72. // Move-assign from another descriptor implementation.
  73. ASIO_DECL void move_assign(implementation_type& impl,
  74. io_uring_descriptor_service& other_service,
  75. implementation_type& other_impl);
  76. // Destroy a descriptor implementation.
  77. ASIO_DECL void destroy(implementation_type& impl);
  78. // Assign a native descriptor to a descriptor implementation.
  79. ASIO_DECL asio::error_code assign(implementation_type& impl,
  80. const native_handle_type& native_descriptor,
  81. asio::error_code& ec);
  82. // Determine whether the descriptor is open.
  83. bool is_open(const implementation_type& impl) const
  84. {
  85. return impl.descriptor_ != -1;
  86. }
  87. // Destroy a descriptor implementation.
  88. ASIO_DECL asio::error_code close(implementation_type& impl,
  89. asio::error_code& ec);
  90. // Get the native descriptor representation.
  91. native_handle_type native_handle(const implementation_type& impl) const
  92. {
  93. return impl.descriptor_;
  94. }
  95. // Release ownership of the native descriptor representation.
  96. ASIO_DECL native_handle_type release(implementation_type& impl);
  97. // Release ownership of the native descriptor representation.
  98. native_handle_type release(implementation_type& impl,
  99. asio::error_code& ec)
  100. {
  101. ec = success_ec_;
  102. return release(impl);
  103. }
  104. // Cancel all operations associated with the descriptor.
  105. ASIO_DECL asio::error_code cancel(implementation_type& impl,
  106. asio::error_code& ec);
  107. // Perform an IO control command on the descriptor.
  108. template <typename IO_Control_Command>
  109. asio::error_code io_control(implementation_type& impl,
  110. IO_Control_Command& command, asio::error_code& ec)
  111. {
  112. descriptor_ops::ioctl(impl.descriptor_, impl.state_,
  113. command.name(), static_cast<ioctl_arg_type*>(command.data()), ec);
  114. ASIO_ERROR_LOCATION(ec);
  115. return ec;
  116. }
  117. // Gets the non-blocking mode of the descriptor.
  118. bool non_blocking(const implementation_type& impl) const
  119. {
  120. return (impl.state_ & descriptor_ops::user_set_non_blocking) != 0;
  121. }
  122. // Sets the non-blocking mode of the descriptor.
  123. asio::error_code non_blocking(implementation_type& impl,
  124. bool mode, asio::error_code& ec)
  125. {
  126. descriptor_ops::set_user_non_blocking(
  127. impl.descriptor_, impl.state_, mode, ec);
  128. ASIO_ERROR_LOCATION(ec);
  129. return ec;
  130. }
  131. // Gets the non-blocking mode of the native descriptor implementation.
  132. bool native_non_blocking(const implementation_type& impl) const
  133. {
  134. return (impl.state_ & descriptor_ops::internal_non_blocking) != 0;
  135. }
  136. // Sets the non-blocking mode of the native descriptor implementation.
  137. asio::error_code native_non_blocking(implementation_type& impl,
  138. bool mode, asio::error_code& ec)
  139. {
  140. descriptor_ops::set_internal_non_blocking(
  141. impl.descriptor_, impl.state_, mode, ec);
  142. ASIO_ERROR_LOCATION(ec);
  143. return ec;
  144. }
  145. // Wait for the descriptor to become ready to read, ready to write, or to have
  146. // pending error conditions.
  147. asio::error_code wait(implementation_type& impl,
  148. posix::descriptor_base::wait_type w, asio::error_code& ec)
  149. {
  150. switch (w)
  151. {
  152. case posix::descriptor_base::wait_read:
  153. descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec);
  154. break;
  155. case posix::descriptor_base::wait_write:
  156. descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec);
  157. break;
  158. case posix::descriptor_base::wait_error:
  159. descriptor_ops::poll_error(impl.descriptor_, impl.state_, ec);
  160. break;
  161. default:
  162. ec = asio::error::invalid_argument;
  163. break;
  164. }
  165. ASIO_ERROR_LOCATION(ec);
  166. return ec;
  167. }
  168. // Asynchronously wait for the descriptor to become ready to read, ready to
  169. // write, or to have pending error conditions.
  170. template <typename Handler, typename IoExecutor>
  171. void async_wait(implementation_type& impl,
  172. posix::descriptor_base::wait_type w,
  173. Handler& handler, const IoExecutor& io_ex)
  174. {
  175. bool is_continuation =
  176. asio_handler_cont_helpers::is_continuation(handler);
  177. associated_cancellation_slot_t<Handler> slot
  178. = asio::get_associated_cancellation_slot(handler);
  179. int op_type;
  180. int poll_flags;
  181. switch (w)
  182. {
  183. case posix::descriptor_base::wait_read:
  184. op_type = io_uring_service::read_op;
  185. poll_flags = POLLIN;
  186. break;
  187. case posix::descriptor_base::wait_write:
  188. op_type = io_uring_service::write_op;
  189. poll_flags = POLLOUT;
  190. break;
  191. case posix::descriptor_base::wait_error:
  192. op_type = io_uring_service::except_op;
  193. poll_flags = POLLPRI | POLLERR | POLLHUP;
  194. break;
  195. default:
  196. op_type = -1;
  197. poll_flags = -1;
  198. return;
  199. }
  200. // Allocate and construct an operation to wrap the handler.
  201. typedef io_uring_wait_op<Handler, IoExecutor> op;
  202. typename op::ptr p = { asio::detail::addressof(handler),
  203. op::ptr::allocate(handler), 0 };
  204. p.p = new (p.v) op(success_ec_, impl.descriptor_,
  205. poll_flags, handler, io_ex);
  206. // Optionally register for per-operation cancellation.
  207. if (slot.is_connected() && op_type != -1)
  208. {
  209. p.p->cancellation_key_ =
  210. &slot.template emplace<io_uring_op_cancellation>(
  211. &io_uring_service_, &impl.io_object_data_, op_type);
  212. }
  213. ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
  214. "descriptor", &impl, impl.descriptor_, "async_wait"));
  215. start_op(impl, op_type, p.p, is_continuation, op_type == -1);
  216. p.v = p.p = 0;
  217. }
  218. // Write some data to the descriptor.
  219. template <typename ConstBufferSequence>
  220. size_t write_some(implementation_type& impl,
  221. const ConstBufferSequence& buffers, asio::error_code& ec)
  222. {
  223. typedef buffer_sequence_adapter<asio::const_buffer,
  224. ConstBufferSequence> bufs_type;
  225. size_t n;
  226. if (bufs_type::is_single_buffer)
  227. {
  228. n = descriptor_ops::sync_write1(impl.descriptor_,
  229. impl.state_, bufs_type::first(buffers).data(),
  230. bufs_type::first(buffers).size(), ec);
  231. }
  232. else
  233. {
  234. bufs_type bufs(buffers);
  235. n = descriptor_ops::sync_write(impl.descriptor_, impl.state_,
  236. bufs.buffers(), bufs.count(), bufs.all_empty(), ec);
  237. }
  238. ASIO_ERROR_LOCATION(ec);
  239. return n;
  240. }
  241. // Wait until data can be written without blocking.
  242. size_t write_some(implementation_type& impl,
  243. const null_buffers&, asio::error_code& ec)
  244. {
  245. // Wait for descriptor to become ready.
  246. descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec);
  247. ASIO_ERROR_LOCATION(ec);
  248. return 0;
  249. }
  250. // Start an asynchronous write. The data being sent must be valid for the
  251. // lifetime of the asynchronous operation.
  252. template <typename ConstBufferSequence, typename Handler, typename IoExecutor>
  253. void async_write_some(implementation_type& impl,
  254. const ConstBufferSequence& buffers, Handler& handler,
  255. const IoExecutor& io_ex)
  256. {
  257. bool is_continuation =
  258. asio_handler_cont_helpers::is_continuation(handler);
  259. associated_cancellation_slot_t<Handler> slot
  260. = asio::get_associated_cancellation_slot(handler);
  261. // Allocate and construct an operation to wrap the handler.
  262. typedef io_uring_descriptor_write_op<
  263. ConstBufferSequence, Handler, IoExecutor> op;
  264. typename op::ptr p = { asio::detail::addressof(handler),
  265. op::ptr::allocate(handler), 0 };
  266. p.p = new (p.v) op(success_ec_, impl.descriptor_,
  267. impl.state_, buffers, handler, io_ex);
  268. // Optionally register for per-operation cancellation.
  269. if (slot.is_connected())
  270. {
  271. p.p->cancellation_key_ =
  272. &slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
  273. &impl.io_object_data_, io_uring_service::write_op);
  274. }
  275. ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
  276. "descriptor", &impl, impl.descriptor_, "async_write_some"));
  277. start_op(impl, io_uring_service::write_op, p.p, is_continuation,
  278. buffer_sequence_adapter<asio::const_buffer,
  279. ConstBufferSequence>::all_empty(buffers));
  280. p.v = p.p = 0;
  281. }
  282. // Start an asynchronous wait until data can be written without blocking.
  283. template <typename Handler, typename IoExecutor>
  284. void async_write_some(implementation_type& impl,
  285. const null_buffers&, Handler& handler, const IoExecutor& io_ex)
  286. {
  287. bool is_continuation =
  288. asio_handler_cont_helpers::is_continuation(handler);
  289. associated_cancellation_slot_t<Handler> slot
  290. = asio::get_associated_cancellation_slot(handler);
  291. // Allocate and construct an operation to wrap the handler.
  292. typedef io_uring_null_buffers_op<Handler, IoExecutor> op;
  293. typename op::ptr p = { asio::detail::addressof(handler),
  294. op::ptr::allocate(handler), 0 };
  295. p.p = new (p.v) op(success_ec_, impl.descriptor_, POLLOUT, handler, io_ex);
  296. // Optionally register for per-operation cancellation.
  297. if (slot.is_connected())
  298. {
  299. p.p->cancellation_key_ =
  300. &slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
  301. &impl.io_object_data_, io_uring_service::write_op);
  302. }
  303. ASIO_HANDLER_CREATION((io_uring_service_.context(),
  304. *p.p, "descriptor", &impl, impl.descriptor_,
  305. "async_write_some(null_buffers)"));
  306. start_op(impl, io_uring_service::write_op, p.p, is_continuation, false);
  307. p.v = p.p = 0;
  308. }
  309. // Write some data to the descriptor at the specified offset.
  310. template <typename ConstBufferSequence>
  311. size_t write_some_at(implementation_type& impl, uint64_t offset,
  312. const ConstBufferSequence& buffers, asio::error_code& ec)
  313. {
  314. typedef buffer_sequence_adapter<asio::const_buffer,
  315. ConstBufferSequence> bufs_type;
  316. size_t n;
  317. if (bufs_type::is_single_buffer)
  318. {
  319. n = descriptor_ops::sync_write_at1(impl.descriptor_,
  320. impl.state_, offset, bufs_type::first(buffers).data(),
  321. bufs_type::first(buffers).size(), ec);
  322. }
  323. else
  324. {
  325. bufs_type bufs(buffers);
  326. n = descriptor_ops::sync_write_at(impl.descriptor_, impl.state_,
  327. offset, bufs.buffers(), bufs.count(), bufs.all_empty(), ec);
  328. }
  329. ASIO_ERROR_LOCATION(ec);
  330. return n;
  331. }
  332. // Wait until data can be written without blocking.
  333. size_t write_some_at(implementation_type& impl, uint64_t,
  334. const null_buffers& buffers, asio::error_code& ec)
  335. {
  336. return write_some(impl, buffers, ec);
  337. }
  338. // Start an asynchronous write at the specified offset. The data being sent
  339. // must be valid for the lifetime of the asynchronous operation.
  340. template <typename ConstBufferSequence, typename Handler, typename IoExecutor>
  341. void async_write_some_at(implementation_type& impl, uint64_t offset,
  342. const ConstBufferSequence& buffers, Handler& handler,
  343. const IoExecutor& io_ex)
  344. {
  345. bool is_continuation =
  346. asio_handler_cont_helpers::is_continuation(handler);
  347. associated_cancellation_slot_t<Handler> slot
  348. = asio::get_associated_cancellation_slot(handler);
  349. // Allocate and construct an operation to wrap the handler.
  350. typedef io_uring_descriptor_write_at_op<
  351. ConstBufferSequence, Handler, IoExecutor> op;
  352. typename op::ptr p = { asio::detail::addressof(handler),
  353. op::ptr::allocate(handler), 0 };
  354. p.p = new (p.v) op(success_ec_, impl.descriptor_,
  355. impl.state_, offset, buffers, handler, io_ex);
  356. // Optionally register for per-operation cancellation.
  357. if (slot.is_connected())
  358. {
  359. p.p->cancellation_key_ =
  360. &slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
  361. &impl.io_object_data_, io_uring_service::write_op);
  362. }
  363. ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
  364. "descriptor", &impl, impl.descriptor_, "async_write_some"));
  365. start_op(impl, io_uring_service::write_op, p.p, is_continuation,
  366. buffer_sequence_adapter<asio::const_buffer,
  367. ConstBufferSequence>::all_empty(buffers));
  368. p.v = p.p = 0;
  369. }
  370. // Start an asynchronous wait until data can be written without blocking.
  371. template <typename Handler, typename IoExecutor>
  372. void async_write_some_at(implementation_type& impl,
  373. const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex)
  374. {
  375. return async_write_some(impl, buffers, handler, io_ex);
  376. }
  377. // Read some data from the stream. Returns the number of bytes read.
  378. template <typename MutableBufferSequence>
  379. size_t read_some(implementation_type& impl,
  380. const MutableBufferSequence& buffers, asio::error_code& ec)
  381. {
  382. typedef buffer_sequence_adapter<asio::mutable_buffer,
  383. MutableBufferSequence> bufs_type;
  384. size_t n;
  385. if (bufs_type::is_single_buffer)
  386. {
  387. n = descriptor_ops::sync_read1(impl.descriptor_,
  388. impl.state_, bufs_type::first(buffers).data(),
  389. bufs_type::first(buffers).size(), ec);
  390. }
  391. else
  392. {
  393. bufs_type bufs(buffers);
  394. n = descriptor_ops::sync_read(impl.descriptor_, impl.state_,
  395. bufs.buffers(), bufs.count(), bufs.all_empty(), ec);
  396. }
  397. ASIO_ERROR_LOCATION(ec);
  398. return n;
  399. }
  400. // Wait until data can be read without blocking.
  401. size_t read_some(implementation_type& impl,
  402. const null_buffers&, asio::error_code& ec)
  403. {
  404. // Wait for descriptor to become ready.
  405. descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec);
  406. ASIO_ERROR_LOCATION(ec);
  407. return 0;
  408. }
  409. // Start an asynchronous read. The buffer for the data being read must be
  410. // valid for the lifetime of the asynchronous operation.
  411. template <typename MutableBufferSequence,
  412. typename Handler, typename IoExecutor>
  413. void async_read_some(implementation_type& impl,
  414. const MutableBufferSequence& buffers,
  415. Handler& handler, const IoExecutor& io_ex)
  416. {
  417. bool is_continuation =
  418. asio_handler_cont_helpers::is_continuation(handler);
  419. associated_cancellation_slot_t<Handler> slot
  420. = asio::get_associated_cancellation_slot(handler);
  421. // Allocate and construct an operation to wrap the handler.
  422. typedef io_uring_descriptor_read_op<
  423. MutableBufferSequence, Handler, IoExecutor> op;
  424. typename op::ptr p = { asio::detail::addressof(handler),
  425. op::ptr::allocate(handler), 0 };
  426. p.p = new (p.v) op(success_ec_, impl.descriptor_,
  427. impl.state_, buffers, handler, io_ex);
  428. // Optionally register for per-operation cancellation.
  429. if (slot.is_connected())
  430. {
  431. p.p->cancellation_key_ =
  432. &slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
  433. &impl.io_object_data_, io_uring_service::read_op);
  434. }
  435. ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
  436. "descriptor", &impl, impl.descriptor_, "async_read_some"));
  437. start_op(impl, io_uring_service::read_op, p.p, is_continuation,
  438. buffer_sequence_adapter<asio::mutable_buffer,
  439. MutableBufferSequence>::all_empty(buffers));
  440. p.v = p.p = 0;
  441. }
  442. // Wait until data can be read without blocking.
  443. template <typename Handler, typename IoExecutor>
  444. void async_read_some(implementation_type& impl,
  445. const null_buffers&, Handler& handler, const IoExecutor& io_ex)
  446. {
  447. bool is_continuation =
  448. asio_handler_cont_helpers::is_continuation(handler);
  449. associated_cancellation_slot_t<Handler> slot
  450. = asio::get_associated_cancellation_slot(handler);
  451. // Allocate and construct an operation to wrap the handler.
  452. typedef io_uring_null_buffers_op<Handler, IoExecutor> op;
  453. typename op::ptr p = { asio::detail::addressof(handler),
  454. op::ptr::allocate(handler), 0 };
  455. p.p = new (p.v) op(success_ec_, impl.descriptor_, POLLIN, handler, io_ex);
  456. // Optionally register for per-operation cancellation.
  457. if (slot.is_connected())
  458. {
  459. p.p->cancellation_key_ =
  460. &slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
  461. &impl.io_object_data_, io_uring_service::read_op);
  462. }
  463. ASIO_HANDLER_CREATION((io_uring_service_.context(),
  464. *p.p, "descriptor", &impl, impl.descriptor_,
  465. "async_read_some(null_buffers)"));
  466. start_op(impl, io_uring_service::read_op, p.p, is_continuation, false);
  467. p.v = p.p = 0;
  468. }
  469. // Read some data at the specified offset. Returns the number of bytes read.
  470. template <typename MutableBufferSequence>
  471. size_t read_some_at(implementation_type& impl, uint64_t offset,
  472. const MutableBufferSequence& buffers, asio::error_code& ec)
  473. {
  474. typedef buffer_sequence_adapter<asio::mutable_buffer,
  475. MutableBufferSequence> bufs_type;
  476. if (bufs_type::is_single_buffer)
  477. {
  478. return descriptor_ops::sync_read_at1(impl.descriptor_,
  479. impl.state_, offset, bufs_type::first(buffers).data(),
  480. bufs_type::first(buffers).size(), ec);
  481. }
  482. else
  483. {
  484. bufs_type bufs(buffers);
  485. return descriptor_ops::sync_read_at(impl.descriptor_, impl.state_,
  486. offset, bufs.buffers(), bufs.count(), bufs.all_empty(), ec);
  487. }
  488. }
  489. // Wait until data can be read without blocking.
  490. size_t read_some_at(implementation_type& impl, uint64_t,
  491. const null_buffers& buffers, asio::error_code& ec)
  492. {
  493. return read_some(impl, buffers, ec);
  494. }
  495. // Start an asynchronous read. The buffer for the data being read must be
  496. // valid for the lifetime of the asynchronous operation.
  497. template <typename MutableBufferSequence,
  498. typename Handler, typename IoExecutor>
  499. void async_read_some_at(implementation_type& impl,
  500. uint64_t offset, const MutableBufferSequence& buffers,
  501. Handler& handler, const IoExecutor& io_ex)
  502. {
  503. bool is_continuation =
  504. asio_handler_cont_helpers::is_continuation(handler);
  505. associated_cancellation_slot_t<Handler> slot
  506. = asio::get_associated_cancellation_slot(handler);
  507. // Allocate and construct an operation to wrap the handler.
  508. typedef io_uring_descriptor_read_at_op<
  509. MutableBufferSequence, Handler, IoExecutor> op;
  510. typename op::ptr p = { asio::detail::addressof(handler),
  511. op::ptr::allocate(handler), 0 };
  512. p.p = new (p.v) op(success_ec_, impl.descriptor_,
  513. impl.state_, offset, buffers, handler, io_ex);
  514. // Optionally register for per-operation cancellation.
  515. if (slot.is_connected())
  516. {
  517. p.p->cancellation_key_ =
  518. &slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
  519. &impl.io_object_data_, io_uring_service::read_op);
  520. }
  521. ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
  522. "descriptor", &impl, impl.descriptor_, "async_read_some"));
  523. start_op(impl, io_uring_service::read_op, p.p, is_continuation,
  524. buffer_sequence_adapter<asio::mutable_buffer,
  525. MutableBufferSequence>::all_empty(buffers));
  526. p.v = p.p = 0;
  527. }
  528. // Wait until data can be read without blocking.
  529. template <typename Handler, typename IoExecutor>
  530. void async_read_some_at(implementation_type& impl, uint64_t,
  531. const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex)
  532. {
  533. return async_read_some(impl, buffers, handler, io_ex);
  534. }
  535. private:
  536. // Start the asynchronous operation.
  537. ASIO_DECL void start_op(implementation_type& impl, int op_type,
  538. io_uring_operation* op, bool is_continuation, bool noop);
  539. // Helper class used to implement per-operation cancellation
  540. class io_uring_op_cancellation
  541. {
  542. public:
  543. io_uring_op_cancellation(io_uring_service* s,
  544. io_uring_service::per_io_object_data* p, int o)
  545. : io_uring_service_(s),
  546. io_object_data_(p),
  547. op_type_(o)
  548. {
  549. }
  550. void operator()(cancellation_type_t type)
  551. {
  552. if (!!(type &
  553. (cancellation_type::terminal
  554. | cancellation_type::partial
  555. | cancellation_type::total)))
  556. {
  557. io_uring_service_->cancel_ops_by_key(*io_object_data_, op_type_, this);
  558. }
  559. }
  560. private:
  561. io_uring_service* io_uring_service_;
  562. io_uring_service::per_io_object_data* io_object_data_;
  563. int op_type_;
  564. };
  565. // The io_uring_service that performs event demultiplexing for the service.
  566. io_uring_service& io_uring_service_;
  567. // Cached success value to avoid accessing category singleton.
  568. const asio::error_code success_ec_;
  569. };
  570. } // namespace detail
  571. } // namespace asio
  572. #include "asio/detail/pop_options.hpp"
  573. #if defined(ASIO_HEADER_ONLY)
  574. # include "asio/detail/impl/io_uring_descriptor_service.ipp"
  575. #endif // defined(ASIO_HEADER_ONLY)
  576. #endif // defined(ASIO_HAS_IO_URING)
  577. #endif // ASIO_DETAIL_IO_URING_DESCRIPTOR_SERVICE_HPP