1 // SPDX-License-Identifier: GPL-2.0
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
30 * Also see the examples in the liburing library:
32 * git://git.kernel.dk/liburing
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
50 #include <linux/sched/signal.h>
52 #include <linux/file.h>
53 #include <linux/fdtable.h>
55 #include <linux/mman.h>
56 #include <linux/mmu_context.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/workqueue.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
65 #include <net/af_unix.h>
67 #include <linux/anon_inodes.h>
68 #include <linux/sched/mm.h>
69 #include <linux/uaccess.h>
70 #include <linux/nospec.h>
71 #include <linux/sizes.h>
72 #include <linux/hugetlb.h>
74 #include <uapi/linux/io_uring.h>
78 #define IORING_MAX_ENTRIES 32768
79 #define IORING_MAX_FIXED_FILES 1024
82 u32 head ____cacheline_aligned_in_smp;
83 u32 tail ____cacheline_aligned_in_smp;
87 * This data is shared with the application through the mmap at offsets
88 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
90 * The offsets to the member fields are published through struct
91 * io_sqring_offsets when calling io_uring_setup.
95 * Head and tail offsets into the ring; the offsets need to be
96 * masked to get valid indices.
98 * The kernel controls head of the sq ring and the tail of the cq ring,
99 * and the application controls tail of the sq ring and the head of the
102 struct io_uring sq, cq;
104 * Bitmasks to apply to head and tail offsets (constant, equals
107 u32 sq_ring_mask, cq_ring_mask;
108 /* Ring sizes (constant, power of 2) */
109 u32 sq_ring_entries, cq_ring_entries;
111 * Number of invalid entries dropped by the kernel due to
112 * invalid index stored in array
114 * Written by the kernel, shouldn't be modified by the
115 * application (i.e. get number of "new events" by comparing to
118 * After a new SQ head value was read by the application this
119 * counter includes all submissions that were dropped reaching
120 * the new SQ head (and possibly more).
126 * Written by the kernel, shouldn't be modified by the
129 * The application needs a full memory barrier before checking
130 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
134 * Number of completion events lost because the queue was full;
135 * this should be avoided by the application by making sure
136 * there are not more requests pending thatn there is space in
137 * the completion queue.
139 * Written by the kernel, shouldn't be modified by the
140 * application (i.e. get number of "new events" by comparing to
143 * As completion events come in out of order this counter is not
144 * ordered with any other data.
148 * Ring buffer of completion events.
150 * The kernel writes completion events fresh every time they are
151 * produced, so the application is allowed to modify pending
154 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
157 struct io_mapped_ubuf {
160 struct bio_vec *bvec;
161 unsigned int nr_bvecs;
167 struct list_head list;
176 struct percpu_ref refs;
177 } ____cacheline_aligned_in_smp;
185 * Ring buffer of indices into array of io_uring_sqe, which is
186 * mmapped by the application using the IORING_OFF_SQES offset.
188 * This indirection could e.g. be used to assign fixed
189 * io_uring_sqe entries to operations and only submit them to
190 * the queue when needed.
192 * The kernel modifies neither the indices array nor the entries
196 unsigned cached_sq_head;
199 unsigned sq_thread_idle;
200 struct io_uring_sqe *sq_sqes;
202 struct list_head defer_list;
203 struct list_head timeout_list;
204 } ____cacheline_aligned_in_smp;
207 struct workqueue_struct *sqo_wq[2];
208 struct task_struct *sqo_thread; /* if using sq thread polling */
209 struct mm_struct *sqo_mm;
210 wait_queue_head_t sqo_wait;
211 struct completion sqo_thread_started;
214 unsigned cached_cq_tail;
217 struct wait_queue_head cq_wait;
218 struct fasync_struct *cq_fasync;
219 struct eventfd_ctx *cq_ev_fd;
220 atomic_t cq_timeouts;
221 } ____cacheline_aligned_in_smp;
223 struct io_rings *rings;
226 * If used, fixed file set. Writers must ensure that ->refs is dead,
227 * readers must ensure that ->refs is alive as long as the file* is
228 * used. Only updated through io_uring_register(2).
230 struct file **user_files;
231 unsigned nr_user_files;
233 /* if used, fixed mapped user buffers */
234 unsigned nr_user_bufs;
235 struct io_mapped_ubuf *user_bufs;
237 struct user_struct *user;
239 struct completion ctx_done;
242 struct mutex uring_lock;
243 wait_queue_head_t wait;
244 } ____cacheline_aligned_in_smp;
247 spinlock_t completion_lock;
248 bool poll_multi_file;
250 * ->poll_list is protected by the ctx->uring_lock for
251 * io_uring instances that don't use IORING_SETUP_SQPOLL.
252 * For SQPOLL, only the single threaded io_sq_thread() will
253 * manipulate the list, hence no extra locking is needed there.
255 struct list_head poll_list;
256 struct list_head cancel_list;
257 } ____cacheline_aligned_in_smp;
259 struct async_list pending_async[2];
261 #if defined(CONFIG_UNIX)
262 struct socket *ring_sock;
267 const struct io_uring_sqe *sqe;
268 unsigned short index;
272 bool needs_fixed_file;
276 * First field must be the file pointer in all the
277 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
279 struct io_poll_iocb {
281 struct wait_queue_head *head;
285 struct wait_queue_entry wait;
290 struct hrtimer timer;
294 * NOTE! Each of the iocb union members has the file pointer
295 * as the first entry in their struct definition. So you can
296 * access the file pointer through any of the sub-structs,
297 * or directly as just 'ki_filp' in this struct.
303 struct io_poll_iocb poll;
304 struct io_timeout timeout;
307 struct sqe_submit submit;
309 struct io_ring_ctx *ctx;
310 struct list_head list;
311 struct list_head link_list;
314 #define REQ_F_NOWAIT 1 /* must not punt to workers */
315 #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
316 #define REQ_F_FIXED_FILE 4 /* ctx owns file */
317 #define REQ_F_SEQ_PREV 8 /* sequential with previous */
318 #define REQ_F_IO_DRAIN 16 /* drain existing IO first */
319 #define REQ_F_IO_DRAINED 32 /* drain done */
320 #define REQ_F_LINK 64 /* linked sqes */
321 #define REQ_F_LINK_DONE 128 /* linked sqes done */
322 #define REQ_F_FAIL_LINK 256 /* fail rest of links */
323 #define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
324 #define REQ_F_TIMEOUT 1024 /* timeout request */
329 struct work_struct work;
332 #define IO_PLUG_THRESHOLD 2
333 #define IO_IOPOLL_BATCH 8
335 struct io_submit_state {
336 struct blk_plug plug;
339 * io_kiocb alloc cache
341 void *reqs[IO_IOPOLL_BATCH];
342 unsigned int free_reqs;
343 unsigned int cur_req;
346 * File reference cache
350 unsigned int has_refs;
351 unsigned int used_refs;
352 unsigned int ios_left;
355 static void io_sq_wq_submit_work(struct work_struct *work);
356 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
358 static void __io_free_req(struct io_kiocb *req);
360 static struct kmem_cache *req_cachep;
362 static const struct file_operations io_uring_fops;
364 struct sock *io_uring_get_socket(struct file *file)
366 #if defined(CONFIG_UNIX)
367 if (file->f_op == &io_uring_fops) {
368 struct io_ring_ctx *ctx = file->private_data;
370 return ctx->ring_sock->sk;
375 EXPORT_SYMBOL(io_uring_get_socket);
377 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
379 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
381 complete(&ctx->ctx_done);
384 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
386 struct io_ring_ctx *ctx;
389 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
393 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
394 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
399 ctx->flags = p->flags;
400 init_waitqueue_head(&ctx->cq_wait);
401 init_completion(&ctx->ctx_done);
402 init_completion(&ctx->sqo_thread_started);
403 mutex_init(&ctx->uring_lock);
404 init_waitqueue_head(&ctx->wait);
405 for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
406 spin_lock_init(&ctx->pending_async[i].lock);
407 INIT_LIST_HEAD(&ctx->pending_async[i].list);
408 atomic_set(&ctx->pending_async[i].cnt, 0);
410 spin_lock_init(&ctx->completion_lock);
411 INIT_LIST_HEAD(&ctx->poll_list);
412 INIT_LIST_HEAD(&ctx->cancel_list);
413 INIT_LIST_HEAD(&ctx->defer_list);
414 INIT_LIST_HEAD(&ctx->timeout_list);
418 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
419 struct io_kiocb *req)
421 /* timeout requests always honor sequence */
422 if (!(req->flags & REQ_F_TIMEOUT) &&
423 (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
426 return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
429 static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
430 struct list_head *list)
432 struct io_kiocb *req;
434 if (list_empty(list))
437 req = list_first_entry(list, struct io_kiocb, list);
438 if (!io_sequence_defer(ctx, req)) {
439 list_del_init(&req->list);
446 static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
448 return __io_get_deferred_req(ctx, &ctx->defer_list);
451 static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
453 return __io_get_deferred_req(ctx, &ctx->timeout_list);
456 static void __io_commit_cqring(struct io_ring_ctx *ctx)
458 struct io_rings *rings = ctx->rings;
460 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
461 /* order cqe stores with ring update */
462 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
464 if (wq_has_sleeper(&ctx->cq_wait)) {
465 wake_up_interruptible(&ctx->cq_wait);
466 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
471 static inline void io_queue_async_work(struct io_ring_ctx *ctx,
472 struct io_kiocb *req)
476 if (req->submit.sqe) {
477 switch (req->submit.sqe->opcode) {
478 case IORING_OP_WRITEV:
479 case IORING_OP_WRITE_FIXED:
480 rw = !(req->rw.ki_flags & IOCB_DIRECT);
485 queue_work(ctx->sqo_wq[rw], &req->work);
488 static void io_kill_timeout(struct io_kiocb *req)
492 ret = hrtimer_try_to_cancel(&req->timeout.timer);
494 atomic_inc(&req->ctx->cq_timeouts);
495 list_del(&req->list);
496 io_cqring_fill_event(req->ctx, req->user_data, 0);
501 static void io_kill_timeouts(struct io_ring_ctx *ctx)
503 struct io_kiocb *req, *tmp;
505 spin_lock_irq(&ctx->completion_lock);
506 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
507 io_kill_timeout(req);
508 spin_unlock_irq(&ctx->completion_lock);
511 static void io_commit_cqring(struct io_ring_ctx *ctx)
513 struct io_kiocb *req;
515 while ((req = io_get_timeout_req(ctx)) != NULL)
516 io_kill_timeout(req);
518 __io_commit_cqring(ctx);
520 while ((req = io_get_deferred_req(ctx)) != NULL) {
521 if (req->flags & REQ_F_SHADOW_DRAIN) {
522 /* Just for drain, free it. */
526 req->flags |= REQ_F_IO_DRAINED;
527 io_queue_async_work(ctx, req);
531 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
533 struct io_rings *rings = ctx->rings;
536 tail = ctx->cached_cq_tail;
538 * writes to the cq entry need to come after reading head; the
539 * control dependency is enough as we're using WRITE_ONCE to
542 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
545 ctx->cached_cq_tail++;
546 return &rings->cqes[tail & ctx->cq_mask];
549 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
552 struct io_uring_cqe *cqe;
555 * If we can't get a cq entry, userspace overflowed the
556 * submission (by quite a lot). Increment the overflow count in
559 cqe = io_get_cqring(ctx);
561 WRITE_ONCE(cqe->user_data, ki_user_data);
562 WRITE_ONCE(cqe->res, res);
563 WRITE_ONCE(cqe->flags, 0);
565 unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
567 WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
571 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
573 if (waitqueue_active(&ctx->wait))
575 if (waitqueue_active(&ctx->sqo_wait))
576 wake_up(&ctx->sqo_wait);
578 eventfd_signal(ctx->cq_ev_fd, 1);
581 static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
586 spin_lock_irqsave(&ctx->completion_lock, flags);
587 io_cqring_fill_event(ctx, user_data, res);
588 io_commit_cqring(ctx);
589 spin_unlock_irqrestore(&ctx->completion_lock, flags);
591 io_cqring_ev_posted(ctx);
594 static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
596 percpu_ref_put_many(&ctx->refs, refs);
598 if (waitqueue_active(&ctx->wait))
602 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
603 struct io_submit_state *state)
605 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
606 struct io_kiocb *req;
608 if (!percpu_ref_tryget(&ctx->refs))
612 req = kmem_cache_alloc(req_cachep, gfp);
615 } else if (!state->free_reqs) {
619 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
620 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
623 * Bulk alloc is all-or-nothing. If we fail to get a batch,
624 * retry single alloc to be on the safe side.
626 if (unlikely(ret <= 0)) {
627 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
632 state->free_reqs = ret - 1;
634 req = state->reqs[0];
636 req = state->reqs[state->cur_req];
644 /* one is dropped after submission, the other at completion */
645 refcount_set(&req->refs, 2);
649 io_ring_drop_ctx_refs(ctx, 1);
653 static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
656 kmem_cache_free_bulk(req_cachep, *nr, reqs);
657 io_ring_drop_ctx_refs(ctx, *nr);
662 static void __io_free_req(struct io_kiocb *req)
664 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
666 io_ring_drop_ctx_refs(req->ctx, 1);
667 kmem_cache_free(req_cachep, req);
670 static void io_req_link_next(struct io_kiocb *req)
672 struct io_kiocb *nxt;
675 * The list should never be empty when we are called here. But could
676 * potentially happen if the chain is messed up, check to be on the
679 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
681 list_del(&nxt->list);
682 if (!list_empty(&req->link_list)) {
683 INIT_LIST_HEAD(&nxt->link_list);
684 list_splice(&req->link_list, &nxt->link_list);
685 nxt->flags |= REQ_F_LINK;
688 nxt->flags |= REQ_F_LINK_DONE;
689 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
690 io_queue_async_work(req->ctx, nxt);
695 * Called if REQ_F_LINK is set, and we fail the head request
697 static void io_fail_links(struct io_kiocb *req)
699 struct io_kiocb *link;
701 while (!list_empty(&req->link_list)) {
702 link = list_first_entry(&req->link_list, struct io_kiocb, list);
703 list_del(&link->list);
705 io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
710 static void io_free_req(struct io_kiocb *req)
713 * If LINK is set, we have dependent requests in this chain. If we
714 * didn't fail this request, queue the first one up, moving any other
715 * dependencies to the next request. In case of failure, fail the rest
718 if (req->flags & REQ_F_LINK) {
719 if (req->flags & REQ_F_FAIL_LINK)
722 io_req_link_next(req);
728 static void io_put_req(struct io_kiocb *req)
730 if (refcount_dec_and_test(&req->refs))
734 static unsigned io_cqring_events(struct io_rings *rings)
736 /* See comment at the top of this file */
738 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
742 * Find and free completed poll iocbs
744 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
745 struct list_head *done)
747 void *reqs[IO_IOPOLL_BATCH];
748 struct io_kiocb *req;
752 while (!list_empty(done)) {
753 req = list_first_entry(done, struct io_kiocb, list);
754 list_del(&req->list);
756 io_cqring_fill_event(ctx, req->user_data, req->result);
759 if (refcount_dec_and_test(&req->refs)) {
760 /* If we're not using fixed files, we have to pair the
761 * completion part with the file put. Use regular
762 * completions for those, only batch free for fixed
763 * file and non-linked commands.
765 if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
767 reqs[to_free++] = req;
768 if (to_free == ARRAY_SIZE(reqs))
769 io_free_req_many(ctx, reqs, &to_free);
776 io_commit_cqring(ctx);
777 io_free_req_many(ctx, reqs, &to_free);
780 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
783 struct io_kiocb *req, *tmp;
789 * Only spin for completions if we don't have multiple devices hanging
790 * off our complete list, and we're under the requested amount.
792 spin = !ctx->poll_multi_file && *nr_events < min;
795 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
796 struct kiocb *kiocb = &req->rw;
799 * Move completed entries to our local list. If we find a
800 * request that requires polling, break out and complete
801 * the done list first, if we have entries there.
803 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
804 list_move_tail(&req->list, &done);
807 if (!list_empty(&done))
810 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
819 if (!list_empty(&done))
820 io_iopoll_complete(ctx, nr_events, &done);
826 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
827 * non-spinning poll check - we'll still enter the driver poll loop, but only
828 * as a non-spinning completion check.
830 static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
833 while (!list_empty(&ctx->poll_list) && !need_resched()) {
836 ret = io_do_iopoll(ctx, nr_events, min);
839 if (!min || *nr_events >= min)
847 * We can't just wait for polled events to come to us, we have to actively
848 * find and complete them.
850 static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
852 if (!(ctx->flags & IORING_SETUP_IOPOLL))
855 mutex_lock(&ctx->uring_lock);
856 while (!list_empty(&ctx->poll_list)) {
857 unsigned int nr_events = 0;
859 io_iopoll_getevents(ctx, &nr_events, 1);
862 * Ensure we allow local-to-the-cpu processing to take place,
863 * in this case we need to ensure that we reap all events.
867 mutex_unlock(&ctx->uring_lock);
870 static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
876 * We disallow the app entering submit/complete with polling, but we
877 * still need to lock the ring to prevent racing with polled issue
878 * that got punted to a workqueue.
880 mutex_lock(&ctx->uring_lock);
887 * Don't enter poll loop if we already have events pending.
888 * If we do, we can potentially be spinning for commands that
889 * already triggered a CQE (eg in error).
891 if (io_cqring_events(ctx->rings))
895 * If a submit got punted to a workqueue, we can have the
896 * application entering polling for a command before it gets
897 * issued. That app will hold the uring_lock for the duration
898 * of the poll right here, so we need to take a breather every
899 * now and then to ensure that the issue has a chance to add
900 * the poll to the issued list. Otherwise we can spin here
901 * forever, while the workqueue is stuck trying to acquire the
904 if (!(++iters & 7)) {
905 mutex_unlock(&ctx->uring_lock);
906 mutex_lock(&ctx->uring_lock);
909 if (*nr_events < min)
910 tmin = min - *nr_events;
912 ret = io_iopoll_getevents(ctx, nr_events, tmin);
916 } while (min && !*nr_events && !need_resched());
918 mutex_unlock(&ctx->uring_lock);
922 static void kiocb_end_write(struct kiocb *kiocb)
924 if (kiocb->ki_flags & IOCB_WRITE) {
925 struct inode *inode = file_inode(kiocb->ki_filp);
928 * Tell lockdep we inherited freeze protection from submission
931 if (S_ISREG(inode->i_mode))
932 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
933 file_end_write(kiocb->ki_filp);
937 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
939 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
941 kiocb_end_write(kiocb);
943 if ((req->flags & REQ_F_LINK) && res != req->result)
944 req->flags |= REQ_F_FAIL_LINK;
945 io_cqring_add_event(req->ctx, req->user_data, res);
949 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
951 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
953 kiocb_end_write(kiocb);
955 if ((req->flags & REQ_F_LINK) && res != req->result)
956 req->flags |= REQ_F_FAIL_LINK;
959 req->flags |= REQ_F_IOPOLL_COMPLETED;
963 * After the iocb has been issued, it's safe to be found on the poll list.
964 * Adding the kiocb to the list AFTER submission ensures that we don't
965 * find it from a io_iopoll_getevents() thread before the issuer is done
966 * accessing the kiocb cookie.
968 static void io_iopoll_req_issued(struct io_kiocb *req)
970 struct io_ring_ctx *ctx = req->ctx;
973 * Track whether we have multiple files in our lists. This will impact
974 * how we do polling eventually, not spinning if we're on potentially
977 if (list_empty(&ctx->poll_list)) {
978 ctx->poll_multi_file = false;
979 } else if (!ctx->poll_multi_file) {
980 struct io_kiocb *list_req;
982 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
984 if (list_req->rw.ki_filp != req->rw.ki_filp)
985 ctx->poll_multi_file = true;
989 * For fast devices, IO may have already completed. If it has, add
990 * it to the front so we find it first.
992 if (req->flags & REQ_F_IOPOLL_COMPLETED)
993 list_add(&req->list, &ctx->poll_list);
995 list_add_tail(&req->list, &ctx->poll_list);
998 static void io_file_put(struct io_submit_state *state)
1001 int diff = state->has_refs - state->used_refs;
1004 fput_many(state->file, diff);
1010 * Get as many references to a file as we have IOs left in this submission,
1011 * assuming most submissions are for one file, or at least that each file
1012 * has more than one submission.
1014 static struct file *io_file_get(struct io_submit_state *state, int fd)
1020 if (state->fd == fd) {
1027 state->file = fget_many(fd, state->ios_left);
1032 state->has_refs = state->ios_left;
1033 state->used_refs = 1;
1039 * If we tracked the file through the SCM inflight mechanism, we could support
1040 * any file. For now, just ensure that anything potentially problematic is done
1043 static bool io_file_supports_async(struct file *file)
1045 umode_t mode = file_inode(file)->i_mode;
1047 if (S_ISBLK(mode) || S_ISCHR(mode))
1049 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1055 static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
1056 bool force_nonblock)
1058 const struct io_uring_sqe *sqe = s->sqe;
1059 struct io_ring_ctx *ctx = req->ctx;
1060 struct kiocb *kiocb = &req->rw;
1067 if (force_nonblock && !io_file_supports_async(req->file))
1068 force_nonblock = false;
1070 kiocb->ki_pos = READ_ONCE(sqe->off);
1071 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1072 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1074 ioprio = READ_ONCE(sqe->ioprio);
1076 ret = ioprio_check_cap(ioprio);
1080 kiocb->ki_ioprio = ioprio;
1082 kiocb->ki_ioprio = get_current_ioprio();
1084 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1088 /* don't allow async punt if RWF_NOWAIT was requested */
1089 if (kiocb->ki_flags & IOCB_NOWAIT)
1090 req->flags |= REQ_F_NOWAIT;
1093 kiocb->ki_flags |= IOCB_NOWAIT;
1095 if (ctx->flags & IORING_SETUP_IOPOLL) {
1096 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1097 !kiocb->ki_filp->f_op->iopoll)
1100 kiocb->ki_flags |= IOCB_HIPRI;
1101 kiocb->ki_complete = io_complete_rw_iopoll;
1103 if (kiocb->ki_flags & IOCB_HIPRI)
1105 kiocb->ki_complete = io_complete_rw;
1110 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1116 case -ERESTARTNOINTR:
1117 case -ERESTARTNOHAND:
1118 case -ERESTART_RESTARTBLOCK:
1120 * We can't just restart the syscall, since previously
1121 * submitted sqes may already be in progress. Just fail this
1127 kiocb->ki_complete(kiocb, ret, 0);
1131 static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1132 const struct io_uring_sqe *sqe,
1133 struct iov_iter *iter)
1135 size_t len = READ_ONCE(sqe->len);
1136 struct io_mapped_ubuf *imu;
1137 unsigned index, buf_index;
1141 /* attempt to use fixed buffers without having provided iovecs */
1142 if (unlikely(!ctx->user_bufs))
1145 buf_index = READ_ONCE(sqe->buf_index);
1146 if (unlikely(buf_index >= ctx->nr_user_bufs))
1149 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1150 imu = &ctx->user_bufs[index];
1151 buf_addr = READ_ONCE(sqe->addr);
1154 if (buf_addr + len < buf_addr)
1156 /* not inside the mapped region */
1157 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1161 * May not be a start of buffer, set size appropriately
1162 * and advance us to the beginning.
1164 offset = buf_addr - imu->ubuf;
1165 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
1169 * Don't use iov_iter_advance() here, as it's really slow for
1170 * using the latter parts of a big fixed buffer - it iterates
1171 * over each segment manually. We can cheat a bit here, because
1174 * 1) it's a BVEC iter, we set it up
1175 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1176 * first and last bvec
1178 * So just find our index, and adjust the iterator afterwards.
1179 * If the offset is within the first bvec (or the whole first
1180 * bvec, just use iov_iter_advance(). This makes it easier
1181 * since we can just skip the first segment, which may not
1182 * be PAGE_SIZE aligned.
1184 const struct bio_vec *bvec = imu->bvec;
1186 if (offset <= bvec->bv_len) {
1187 iov_iter_advance(iter, offset);
1189 unsigned long seg_skip;
1191 /* skip first vec */
1192 offset -= bvec->bv_len;
1193 seg_skip = 1 + (offset >> PAGE_SHIFT);
1195 iter->bvec = bvec + seg_skip;
1196 iter->nr_segs -= seg_skip;
1197 iter->count -= bvec->bv_len + offset;
1198 iter->iov_offset = offset & ~PAGE_MASK;
1205 static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1206 const struct sqe_submit *s, struct iovec **iovec,
1207 struct iov_iter *iter)
1209 const struct io_uring_sqe *sqe = s->sqe;
1210 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1211 size_t sqe_len = READ_ONCE(sqe->len);
1215 * We're reading ->opcode for the second time, but the first read
1216 * doesn't care whether it's _FIXED or not, so it doesn't matter
1217 * whether ->opcode changes concurrently. The first read does care
1218 * about whether it is a READ or a WRITE, so we don't trust this read
1219 * for that purpose and instead let the caller pass in the read/write
1222 opcode = READ_ONCE(sqe->opcode);
1223 if (opcode == IORING_OP_READ_FIXED ||
1224 opcode == IORING_OP_WRITE_FIXED) {
1225 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
1233 #ifdef CONFIG_COMPAT
1235 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1239 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1242 static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
1244 if (al->file == kiocb->ki_filp) {
1248 * Allow merging if we're anywhere in the range of the same
1249 * page. Generally this happens for sub-page reads or writes,
1250 * and it's beneficial to allow the first worker to bring the
1251 * page in and the piggy backed work can then work on the
1254 start = al->io_start & PAGE_MASK;
1255 end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
1256 if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
1265 * Make a note of the last file/offset/direction we punted to async
1266 * context. We'll use this information to see if we can piggy back a
1267 * sequential request onto the previous one, if it's still hasn't been
1268 * completed by the async worker.
1270 static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1272 struct async_list *async_list = &req->ctx->pending_async[rw];
1273 struct kiocb *kiocb = &req->rw;
1274 struct file *filp = kiocb->ki_filp;
1276 if (io_should_merge(async_list, kiocb)) {
1277 unsigned long max_bytes;
1279 /* Use 8x RA size as a decent limiter for both reads/writes */
1280 max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1282 max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
1284 /* If max len are exceeded, reset the state */
1285 if (async_list->io_len + len <= max_bytes) {
1286 req->flags |= REQ_F_SEQ_PREV;
1287 async_list->io_len += len;
1289 async_list->file = NULL;
1293 /* New file? Reset state. */
1294 if (async_list->file != filp) {
1295 async_list->io_start = kiocb->ki_pos;
1296 async_list->io_len = len;
1297 async_list->file = filp;
1302 * For files that don't have ->read_iter() and ->write_iter(), handle them
1303 * by looping over ->read() or ->write() manually.
1305 static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1306 struct iov_iter *iter)
1311 * Don't support polled IO through this interface, and we can't
1312 * support non-blocking either. For the latter, this just causes
1313 * the kiocb to be handled from an async context.
1315 if (kiocb->ki_flags & IOCB_HIPRI)
1317 if (kiocb->ki_flags & IOCB_NOWAIT)
1320 while (iov_iter_count(iter)) {
1321 struct iovec iovec = iov_iter_iovec(iter);
1325 nr = file->f_op->read(file, iovec.iov_base,
1326 iovec.iov_len, &kiocb->ki_pos);
1328 nr = file->f_op->write(file, iovec.iov_base,
1329 iovec.iov_len, &kiocb->ki_pos);
1338 if (nr != iovec.iov_len)
1340 iov_iter_advance(iter, nr);
1346 static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
1347 bool force_nonblock)
1349 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1350 struct kiocb *kiocb = &req->rw;
1351 struct iov_iter iter;
1354 ssize_t read_size, ret;
1356 ret = io_prep_rw(req, s, force_nonblock);
1359 file = kiocb->ki_filp;
1361 if (unlikely(!(file->f_mode & FMODE_READ)))
1364 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
1369 if (req->flags & REQ_F_LINK)
1370 req->result = read_size;
1372 iov_count = iov_iter_count(&iter);
1373 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
1377 if (file->f_op->read_iter)
1378 ret2 = call_read_iter(file, kiocb, &iter);
1380 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1383 * In case of a short read, punt to async. This can happen
1384 * if we have data partially cached. Alternatively we can
1385 * return the short read, in which case the application will
1386 * need to issue another SQE and wait for it. That SQE will
1387 * need async punt anyway, so it's more efficient to do it
1390 if (force_nonblock && ret2 > 0 && ret2 < read_size)
1392 /* Catch -EAGAIN return for forced non-blocking submission */
1393 if (!force_nonblock || ret2 != -EAGAIN) {
1394 io_rw_done(kiocb, ret2);
1397 * If ->needs_lock is true, we're already in async
1401 io_async_list_note(READ, req, iov_count);
1409 static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
1410 bool force_nonblock)
1412 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1413 struct kiocb *kiocb = &req->rw;
1414 struct iov_iter iter;
1419 ret = io_prep_rw(req, s, force_nonblock);
1423 file = kiocb->ki_filp;
1424 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1427 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
1431 if (req->flags & REQ_F_LINK)
1434 iov_count = iov_iter_count(&iter);
1437 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1438 /* If ->needs_lock is true, we're already in async context. */
1440 io_async_list_note(WRITE, req, iov_count);
1444 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
1449 * Open-code file_start_write here to grab freeze protection,
1450 * which will be released by another thread in
1451 * io_complete_rw(). Fool lockdep by telling it the lock got
1452 * released so that it doesn't complain about the held lock when
1453 * we return to userspace.
1455 if (S_ISREG(file_inode(file)->i_mode)) {
1456 __sb_start_write(file_inode(file)->i_sb,
1457 SB_FREEZE_WRITE, true);
1458 __sb_writers_release(file_inode(file)->i_sb,
1461 kiocb->ki_flags |= IOCB_WRITE;
1463 if (file->f_op->write_iter)
1464 ret2 = call_write_iter(file, kiocb, &iter);
1466 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
1467 if (!force_nonblock || ret2 != -EAGAIN) {
1468 io_rw_done(kiocb, ret2);
1471 * If ->needs_lock is true, we're already in async
1475 io_async_list_note(WRITE, req, iov_count);
1485 * IORING_OP_NOP just posts a completion event, nothing else.
1487 static int io_nop(struct io_kiocb *req, u64 user_data)
1489 struct io_ring_ctx *ctx = req->ctx;
1492 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1495 io_cqring_add_event(ctx, user_data, err);
1500 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1502 struct io_ring_ctx *ctx = req->ctx;
1507 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1509 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1515 static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1516 bool force_nonblock)
1518 loff_t sqe_off = READ_ONCE(sqe->off);
1519 loff_t sqe_len = READ_ONCE(sqe->len);
1520 loff_t end = sqe_off + sqe_len;
1521 unsigned fsync_flags;
1524 fsync_flags = READ_ONCE(sqe->fsync_flags);
1525 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1528 ret = io_prep_fsync(req, sqe);
1532 /* fsync always requires a blocking context */
1536 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1537 end > 0 ? end : LLONG_MAX,
1538 fsync_flags & IORING_FSYNC_DATASYNC);
1540 if (ret < 0 && (req->flags & REQ_F_LINK))
1541 req->flags |= REQ_F_FAIL_LINK;
1542 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1547 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1549 struct io_ring_ctx *ctx = req->ctx;
1555 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1557 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1563 static int io_sync_file_range(struct io_kiocb *req,
1564 const struct io_uring_sqe *sqe,
1565 bool force_nonblock)
1572 ret = io_prep_sfr(req, sqe);
1576 /* sync_file_range always requires a blocking context */
1580 sqe_off = READ_ONCE(sqe->off);
1581 sqe_len = READ_ONCE(sqe->len);
1582 flags = READ_ONCE(sqe->sync_range_flags);
1584 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1586 if (ret < 0 && (req->flags & REQ_F_LINK))
1587 req->flags |= REQ_F_FAIL_LINK;
1588 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1593 #if defined(CONFIG_NET)
1594 static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1595 bool force_nonblock,
1596 long (*fn)(struct socket *, struct user_msghdr __user *,
1599 struct socket *sock;
1602 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1605 sock = sock_from_file(req->file, &ret);
1607 struct user_msghdr __user *msg;
1610 flags = READ_ONCE(sqe->msg_flags);
1611 if (flags & MSG_DONTWAIT)
1612 req->flags |= REQ_F_NOWAIT;
1613 else if (force_nonblock)
1614 flags |= MSG_DONTWAIT;
1616 msg = (struct user_msghdr __user *) (unsigned long)
1617 READ_ONCE(sqe->addr);
1619 ret = fn(sock, msg, flags);
1620 if (force_nonblock && ret == -EAGAIN)
1624 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1630 static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1631 bool force_nonblock)
1633 #if defined(CONFIG_NET)
1634 return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
1640 static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1641 bool force_nonblock)
1643 #if defined(CONFIG_NET)
1644 return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
1650 static void io_poll_remove_one(struct io_kiocb *req)
1652 struct io_poll_iocb *poll = &req->poll;
1654 spin_lock(&poll->head->lock);
1655 WRITE_ONCE(poll->canceled, true);
1656 if (!list_empty(&poll->wait.entry)) {
1657 list_del_init(&poll->wait.entry);
1658 io_queue_async_work(req->ctx, req);
1660 spin_unlock(&poll->head->lock);
1662 list_del_init(&req->list);
1665 static void io_poll_remove_all(struct io_ring_ctx *ctx)
1667 struct io_kiocb *req;
1669 spin_lock_irq(&ctx->completion_lock);
1670 while (!list_empty(&ctx->cancel_list)) {
1671 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1672 io_poll_remove_one(req);
1674 spin_unlock_irq(&ctx->completion_lock);
1678 * Find a running poll command that matches one specified in sqe->addr,
1679 * and remove it if found.
1681 static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1683 struct io_ring_ctx *ctx = req->ctx;
1684 struct io_kiocb *poll_req, *next;
1687 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1689 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1693 spin_lock_irq(&ctx->completion_lock);
1694 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1695 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1696 io_poll_remove_one(poll_req);
1701 spin_unlock_irq(&ctx->completion_lock);
1703 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1708 static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1711 req->poll.done = true;
1712 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
1713 io_commit_cqring(ctx);
1716 static void io_poll_complete_work(struct work_struct *work)
1718 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1719 struct io_poll_iocb *poll = &req->poll;
1720 struct poll_table_struct pt = { ._key = poll->events };
1721 struct io_ring_ctx *ctx = req->ctx;
1724 if (!READ_ONCE(poll->canceled))
1725 mask = vfs_poll(poll->file, &pt) & poll->events;
1728 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1729 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1730 * synchronize with them. In the cancellation case the list_del_init
1731 * itself is not actually needed, but harmless so we keep it in to
1732 * avoid further branches in the fast path.
1734 spin_lock_irq(&ctx->completion_lock);
1735 if (!mask && !READ_ONCE(poll->canceled)) {
1736 add_wait_queue(poll->head, &poll->wait);
1737 spin_unlock_irq(&ctx->completion_lock);
1740 list_del_init(&req->list);
1741 io_poll_complete(ctx, req, mask);
1742 spin_unlock_irq(&ctx->completion_lock);
1744 io_cqring_ev_posted(ctx);
1748 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1751 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1753 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1754 struct io_ring_ctx *ctx = req->ctx;
1755 __poll_t mask = key_to_poll(key);
1756 unsigned long flags;
1758 /* for instances that support it check for an event match first: */
1759 if (mask && !(mask & poll->events))
1762 list_del_init(&poll->wait.entry);
1764 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1765 list_del(&req->list);
1766 io_poll_complete(ctx, req, mask);
1767 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1769 io_cqring_ev_posted(ctx);
1772 io_queue_async_work(ctx, req);
1778 struct io_poll_table {
1779 struct poll_table_struct pt;
1780 struct io_kiocb *req;
1784 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1785 struct poll_table_struct *p)
1787 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1789 if (unlikely(pt->req->poll.head)) {
1790 pt->error = -EINVAL;
1795 pt->req->poll.head = head;
1796 add_wait_queue(head, &pt->req->poll.wait);
1799 static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1801 struct io_poll_iocb *poll = &req->poll;
1802 struct io_ring_ctx *ctx = req->ctx;
1803 struct io_poll_table ipt;
1804 bool cancel = false;
1808 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1810 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1815 req->submit.sqe = NULL;
1816 INIT_WORK(&req->work, io_poll_complete_work);
1817 events = READ_ONCE(sqe->poll_events);
1818 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1822 poll->canceled = false;
1824 ipt.pt._qproc = io_poll_queue_proc;
1825 ipt.pt._key = poll->events;
1827 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1829 /* initialized the list so that we can do list_empty checks */
1830 INIT_LIST_HEAD(&poll->wait.entry);
1831 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1833 INIT_LIST_HEAD(&req->list);
1835 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
1837 spin_lock_irq(&ctx->completion_lock);
1838 if (likely(poll->head)) {
1839 spin_lock(&poll->head->lock);
1840 if (unlikely(list_empty(&poll->wait.entry))) {
1846 if (mask || ipt.error)
1847 list_del_init(&poll->wait.entry);
1849 WRITE_ONCE(poll->canceled, true);
1850 else if (!poll->done) /* actually waiting for an event */
1851 list_add_tail(&req->list, &ctx->cancel_list);
1852 spin_unlock(&poll->head->lock);
1854 if (mask) { /* no async, we'd stolen it */
1856 io_poll_complete(ctx, req, mask);
1858 spin_unlock_irq(&ctx->completion_lock);
1861 io_cqring_ev_posted(ctx);
1867 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1869 struct io_ring_ctx *ctx;
1870 struct io_kiocb *req;
1871 unsigned long flags;
1873 req = container_of(timer, struct io_kiocb, timeout.timer);
1875 atomic_inc(&ctx->cq_timeouts);
1877 spin_lock_irqsave(&ctx->completion_lock, flags);
1878 list_del(&req->list);
1880 io_cqring_fill_event(ctx, req->user_data, -ETIME);
1881 io_commit_cqring(ctx);
1882 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1884 io_cqring_ev_posted(ctx);
1887 return HRTIMER_NORESTART;
1890 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1892 unsigned count, req_dist, tail_index;
1893 struct io_ring_ctx *ctx = req->ctx;
1894 struct list_head *entry;
1895 struct timespec64 ts;
1897 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1899 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
1903 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
1907 * sqe->off holds how many events that need to occur for this
1908 * timeout event to be satisfied.
1910 count = READ_ONCE(sqe->off);
1914 req->sequence = ctx->cached_sq_head + count - 1;
1915 req->flags |= REQ_F_TIMEOUT;
1918 * Insertion sort, ensuring the first entry in the list is always
1919 * the one we need first.
1921 tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
1922 req_dist = req->sequence - tail_index;
1923 spin_lock_irq(&ctx->completion_lock);
1924 list_for_each_prev(entry, &ctx->timeout_list) {
1925 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
1928 dist = nxt->sequence - tail_index;
1929 if (req_dist >= dist)
1932 list_add(&req->list, entry);
1933 spin_unlock_irq(&ctx->completion_lock);
1935 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1936 req->timeout.timer.function = io_timeout_fn;
1937 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
1942 static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
1943 const struct io_uring_sqe *sqe)
1945 struct io_uring_sqe *sqe_copy;
1947 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
1950 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
1954 spin_lock_irq(&ctx->completion_lock);
1955 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
1956 spin_unlock_irq(&ctx->completion_lock);
1961 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
1962 req->submit.sqe = sqe_copy;
1964 INIT_WORK(&req->work, io_sq_wq_submit_work);
1965 list_add_tail(&req->list, &ctx->defer_list);
1966 spin_unlock_irq(&ctx->completion_lock);
1967 return -EIOCBQUEUED;
1970 static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1971 const struct sqe_submit *s, bool force_nonblock)
1975 req->user_data = READ_ONCE(s->sqe->user_data);
1977 if (unlikely(s->index >= ctx->sq_entries))
1980 opcode = READ_ONCE(s->sqe->opcode);
1983 ret = io_nop(req, req->user_data);
1985 case IORING_OP_READV:
1986 if (unlikely(s->sqe->buf_index))
1988 ret = io_read(req, s, force_nonblock);
1990 case IORING_OP_WRITEV:
1991 if (unlikely(s->sqe->buf_index))
1993 ret = io_write(req, s, force_nonblock);
1995 case IORING_OP_READ_FIXED:
1996 ret = io_read(req, s, force_nonblock);
1998 case IORING_OP_WRITE_FIXED:
1999 ret = io_write(req, s, force_nonblock);
2001 case IORING_OP_FSYNC:
2002 ret = io_fsync(req, s->sqe, force_nonblock);
2004 case IORING_OP_POLL_ADD:
2005 ret = io_poll_add(req, s->sqe);
2007 case IORING_OP_POLL_REMOVE:
2008 ret = io_poll_remove(req, s->sqe);
2010 case IORING_OP_SYNC_FILE_RANGE:
2011 ret = io_sync_file_range(req, s->sqe, force_nonblock);
2013 case IORING_OP_SENDMSG:
2014 ret = io_sendmsg(req, s->sqe, force_nonblock);
2016 case IORING_OP_RECVMSG:
2017 ret = io_recvmsg(req, s->sqe, force_nonblock);
2019 case IORING_OP_TIMEOUT:
2020 ret = io_timeout(req, s->sqe);
2030 if (ctx->flags & IORING_SETUP_IOPOLL) {
2031 if (req->result == -EAGAIN)
2034 /* workqueue context doesn't hold uring_lock, grab it now */
2036 mutex_lock(&ctx->uring_lock);
2037 io_iopoll_req_issued(req);
2039 mutex_unlock(&ctx->uring_lock);
2045 static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
2046 const struct io_uring_sqe *sqe)
2048 switch (sqe->opcode) {
2049 case IORING_OP_READV:
2050 case IORING_OP_READ_FIXED:
2051 return &ctx->pending_async[READ];
2052 case IORING_OP_WRITEV:
2053 case IORING_OP_WRITE_FIXED:
2054 return &ctx->pending_async[WRITE];
2060 static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
2062 u8 opcode = READ_ONCE(sqe->opcode);
2064 return !(opcode == IORING_OP_READ_FIXED ||
2065 opcode == IORING_OP_WRITE_FIXED);
2068 static void io_sq_wq_submit_work(struct work_struct *work)
2070 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2071 struct io_ring_ctx *ctx = req->ctx;
2072 struct mm_struct *cur_mm = NULL;
2073 struct async_list *async_list;
2074 LIST_HEAD(req_list);
2075 mm_segment_t old_fs;
2078 async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
2081 struct sqe_submit *s = &req->submit;
2082 const struct io_uring_sqe *sqe = s->sqe;
2083 unsigned int flags = req->flags;
2085 /* Ensure we clear previously set non-block flag */
2086 req->rw.ki_flags &= ~IOCB_NOWAIT;
2089 if (io_sqe_needs_user(sqe) && !cur_mm) {
2090 if (!mmget_not_zero(ctx->sqo_mm)) {
2093 cur_mm = ctx->sqo_mm;
2101 s->has_user = cur_mm != NULL;
2102 s->needs_lock = true;
2104 ret = __io_submit_sqe(ctx, req, s, false);
2106 * We can get EAGAIN for polled IO even though
2107 * we're forcing a sync submission from here,
2108 * since we can't wait for request slots on the
2117 /* drop submission reference */
2121 io_cqring_add_event(ctx, sqe->user_data, ret);
2125 /* async context always use a copy of the sqe */
2128 /* req from defer and link list needn't decrease async cnt */
2129 if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
2134 if (!list_empty(&req_list)) {
2135 req = list_first_entry(&req_list, struct io_kiocb,
2137 list_del(&req->list);
2140 if (list_empty(&async_list->list))
2144 spin_lock(&async_list->lock);
2145 if (list_empty(&async_list->list)) {
2146 spin_unlock(&async_list->lock);
2149 list_splice_init(&async_list->list, &req_list);
2150 spin_unlock(&async_list->lock);
2152 req = list_first_entry(&req_list, struct io_kiocb, list);
2153 list_del(&req->list);
2157 * Rare case of racing with a submitter. If we find the count has
2158 * dropped to zero AND we have pending work items, then restart
2159 * the processing. This is a tiny race window.
2162 ret = atomic_dec_return(&async_list->cnt);
2163 while (!ret && !list_empty(&async_list->list)) {
2164 spin_lock(&async_list->lock);
2165 atomic_inc(&async_list->cnt);
2166 list_splice_init(&async_list->list, &req_list);
2167 spin_unlock(&async_list->lock);
2169 if (!list_empty(&req_list)) {
2170 req = list_first_entry(&req_list,
2171 struct io_kiocb, list);
2172 list_del(&req->list);
2175 ret = atomic_dec_return(&async_list->cnt);
2188 * See if we can piggy back onto previously submitted work, that is still
2189 * running. We currently only allow this if the new request is sequential
2190 * to the previous one we punted.
2192 static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
2198 if (!(req->flags & REQ_F_SEQ_PREV))
2200 if (!atomic_read(&list->cnt))
2204 spin_lock(&list->lock);
2205 list_add_tail(&req->list, &list->list);
2207 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
2210 if (!atomic_read(&list->cnt)) {
2211 list_del_init(&req->list);
2214 spin_unlock(&list->lock);
2218 static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2220 int op = READ_ONCE(sqe->opcode);
2224 case IORING_OP_POLL_REMOVE:
2231 static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
2232 struct io_submit_state *state, struct io_kiocb *req)
2237 flags = READ_ONCE(s->sqe->flags);
2238 fd = READ_ONCE(s->sqe->fd);
2240 if (flags & IOSQE_IO_DRAIN)
2241 req->flags |= REQ_F_IO_DRAIN;
2243 * All io need record the previous position, if LINK vs DARIN,
2244 * it can be used to mark the position of the first IO in the
2247 req->sequence = s->sequence;
2249 if (!io_op_needs_file(s->sqe))
2252 if (flags & IOSQE_FIXED_FILE) {
2253 if (unlikely(!ctx->user_files ||
2254 (unsigned) fd >= ctx->nr_user_files))
2256 req->file = ctx->user_files[fd];
2257 req->flags |= REQ_F_FIXED_FILE;
2259 if (s->needs_fixed_file)
2261 req->file = io_file_get(state, fd);
2262 if (unlikely(!req->file))
2269 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2270 struct sqe_submit *s, bool force_nonblock)
2274 ret = __io_submit_sqe(ctx, req, s, force_nonblock);
2275 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
2276 struct io_uring_sqe *sqe_copy;
2278 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2280 struct async_list *list;
2283 memcpy(&req->submit, s, sizeof(*s));
2284 list = io_async_list_from_sqe(ctx, s->sqe);
2285 if (!io_add_to_prev_work(list, req)) {
2287 atomic_inc(&list->cnt);
2288 INIT_WORK(&req->work, io_sq_wq_submit_work);
2289 io_queue_async_work(ctx, req);
2293 * Queued up for async execution, worker will release
2294 * submit reference when the iocb is actually submitted.
2300 /* drop submission reference */
2303 /* and drop final reference, if we failed */
2305 io_cqring_add_event(ctx, req->user_data, ret);
2306 if (req->flags & REQ_F_LINK)
2307 req->flags |= REQ_F_FAIL_LINK;
2314 static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2315 struct sqe_submit *s, bool force_nonblock)
2319 ret = io_req_defer(ctx, req, s->sqe);
2321 if (ret != -EIOCBQUEUED) {
2323 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2328 return __io_queue_sqe(ctx, req, s, force_nonblock);
2331 static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
2332 struct sqe_submit *s, struct io_kiocb *shadow,
2333 bool force_nonblock)
2336 int need_submit = false;
2339 return io_queue_sqe(ctx, req, s, force_nonblock);
2342 * Mark the first IO in link list as DRAIN, let all the following
2343 * IOs enter the defer list. all IO needs to be completed before link
2346 req->flags |= REQ_F_IO_DRAIN;
2347 ret = io_req_defer(ctx, req, s->sqe);
2349 if (ret != -EIOCBQUEUED) {
2351 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2356 * If ret == 0 means that all IOs in front of link io are
2357 * running done. let's queue link head.
2362 /* Insert shadow req to defer_list, blocking next IOs */
2363 spin_lock_irq(&ctx->completion_lock);
2364 list_add_tail(&shadow->list, &ctx->defer_list);
2365 spin_unlock_irq(&ctx->completion_lock);
2368 return __io_queue_sqe(ctx, req, s, force_nonblock);
2373 #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2375 static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
2376 struct io_submit_state *state, struct io_kiocb **link,
2377 bool force_nonblock)
2379 struct io_uring_sqe *sqe_copy;
2380 struct io_kiocb *req;
2383 /* enforce forwards compatibility on users */
2384 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2389 req = io_get_req(ctx, state);
2390 if (unlikely(!req)) {
2395 ret = io_req_set_file(ctx, s, state, req);
2396 if (unlikely(ret)) {
2400 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2405 * If we already have a head request, queue this one for async
2406 * submittal once the head completes. If we don't have a head but
2407 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2408 * submitted sync once the chain is complete. If none of those
2409 * conditions are true (normal request), then just queue it.
2412 struct io_kiocb *prev = *link;
2414 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2421 memcpy(&req->submit, s, sizeof(*s));
2422 list_add_tail(&req->list, &prev->link_list);
2423 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2424 req->flags |= REQ_F_LINK;
2426 memcpy(&req->submit, s, sizeof(*s));
2427 INIT_LIST_HEAD(&req->link_list);
2430 io_queue_sqe(ctx, req, s, force_nonblock);
2435 * Batched submission is done, ensure local IO is flushed out.
2437 static void io_submit_state_end(struct io_submit_state *state)
2439 blk_finish_plug(&state->plug);
2441 if (state->free_reqs)
2442 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2443 &state->reqs[state->cur_req]);
2447 * Start submission side cache.
2449 static void io_submit_state_start(struct io_submit_state *state,
2450 struct io_ring_ctx *ctx, unsigned max_ios)
2452 blk_start_plug(&state->plug);
2453 state->free_reqs = 0;
2455 state->ios_left = max_ios;
2458 static void io_commit_sqring(struct io_ring_ctx *ctx)
2460 struct io_rings *rings = ctx->rings;
2462 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
2464 * Ensure any loads from the SQEs are done at this point,
2465 * since once we write the new head, the application could
2466 * write new data to them.
2468 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2473 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2474 * that is mapped by userspace. This means that care needs to be taken to
2475 * ensure that reads are stable, as we cannot rely on userspace always
2476 * being a good citizen. If members of the sqe are validated and then later
2477 * used, it's important that those reads are done through READ_ONCE() to
2478 * prevent a re-load down the line.
2480 static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2482 struct io_rings *rings = ctx->rings;
2483 u32 *sq_array = ctx->sq_array;
2487 * The cached sq head (or cq tail) serves two purposes:
2489 * 1) allows us to batch the cost of updating the user visible
2491 * 2) allows the kernel side to track the head on its own, even
2492 * though the application is the one updating it.
2494 head = ctx->cached_sq_head;
2495 /* make sure SQ entry isn't read before tail */
2496 if (head == smp_load_acquire(&rings->sq.tail))
2499 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
2500 if (head < ctx->sq_entries) {
2502 s->sqe = &ctx->sq_sqes[head];
2503 s->sequence = ctx->cached_sq_head;
2504 ctx->cached_sq_head++;
2508 /* drop invalid entries */
2509 ctx->cached_sq_head++;
2510 rings->sq_dropped++;
2514 static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
2515 unsigned int nr, bool has_user, bool mm_fault)
2517 struct io_submit_state state, *statep = NULL;
2518 struct io_kiocb *link = NULL;
2519 struct io_kiocb *shadow_req = NULL;
2520 bool prev_was_link = false;
2521 int i, submitted = 0;
2523 if (nr > IO_PLUG_THRESHOLD) {
2524 io_submit_state_start(&state, ctx, nr);
2528 for (i = 0; i < nr; i++) {
2530 * If previous wasn't linked and we have a linked command,
2531 * that's the end of the chain. Submit the previous link.
2533 if (!prev_was_link && link) {
2534 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2539 prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
2541 if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
2543 shadow_req = io_get_req(ctx, NULL);
2544 if (unlikely(!shadow_req))
2546 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2547 refcount_dec(&shadow_req->refs);
2549 shadow_req->sequence = sqes[i].sequence;
2553 if (unlikely(mm_fault)) {
2554 io_cqring_add_event(ctx, sqes[i].sqe->user_data,
2557 sqes[i].has_user = has_user;
2558 sqes[i].needs_lock = true;
2559 sqes[i].needs_fixed_file = true;
2560 io_submit_sqe(ctx, &sqes[i], statep, &link, true);
2566 io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
2568 io_submit_state_end(&state);
2573 static int io_sq_thread(void *data)
2575 struct sqe_submit sqes[IO_IOPOLL_BATCH];
2576 struct io_ring_ctx *ctx = data;
2577 struct mm_struct *cur_mm = NULL;
2578 mm_segment_t old_fs;
2581 unsigned long timeout;
2583 complete(&ctx->sqo_thread_started);
2588 timeout = inflight = 0;
2589 while (!kthread_should_park()) {
2590 bool all_fixed, mm_fault = false;
2594 unsigned nr_events = 0;
2596 if (ctx->flags & IORING_SETUP_IOPOLL) {
2597 io_iopoll_check(ctx, &nr_events, 0);
2600 * Normal IO, just pretend everything completed.
2601 * We don't have to poll completions for that.
2603 nr_events = inflight;
2606 inflight -= nr_events;
2608 timeout = jiffies + ctx->sq_thread_idle;
2611 if (!io_get_sqring(ctx, &sqes[0])) {
2613 * We're polling. If we're within the defined idle
2614 * period, then let us spin without work before going
2617 if (inflight || !time_after(jiffies, timeout)) {
2623 * Drop cur_mm before scheduling, we can't hold it for
2624 * long periods (or over schedule()). Do this before
2625 * adding ourselves to the waitqueue, as the unuse/drop
2634 prepare_to_wait(&ctx->sqo_wait, &wait,
2635 TASK_INTERRUPTIBLE);
2637 /* Tell userspace we may need a wakeup call */
2638 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
2639 /* make sure to read SQ tail after writing flags */
2642 if (!io_get_sqring(ctx, &sqes[0])) {
2643 if (kthread_should_park()) {
2644 finish_wait(&ctx->sqo_wait, &wait);
2647 if (signal_pending(current))
2648 flush_signals(current);
2650 finish_wait(&ctx->sqo_wait, &wait);
2652 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
2655 finish_wait(&ctx->sqo_wait, &wait);
2657 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
2663 if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
2667 if (i == ARRAY_SIZE(sqes))
2669 } while (io_get_sqring(ctx, &sqes[i]));
2671 /* Unless all new commands are FIXED regions, grab mm */
2672 if (!all_fixed && !cur_mm) {
2673 mm_fault = !mmget_not_zero(ctx->sqo_mm);
2675 use_mm(ctx->sqo_mm);
2676 cur_mm = ctx->sqo_mm;
2680 inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
2683 /* Commit SQ ring head once we've consumed all SQEs */
2684 io_commit_sqring(ctx);
2698 static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
2699 bool block_for_last)
2701 struct io_submit_state state, *statep = NULL;
2702 struct io_kiocb *link = NULL;
2703 struct io_kiocb *shadow_req = NULL;
2704 bool prev_was_link = false;
2707 if (to_submit > IO_PLUG_THRESHOLD) {
2708 io_submit_state_start(&state, ctx, to_submit);
2712 for (i = 0; i < to_submit; i++) {
2713 bool force_nonblock = true;
2714 struct sqe_submit s;
2716 if (!io_get_sqring(ctx, &s))
2720 * If previous wasn't linked and we have a linked command,
2721 * that's the end of the chain. Submit the previous link.
2723 if (!prev_was_link && link) {
2724 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2729 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2731 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2733 shadow_req = io_get_req(ctx, NULL);
2734 if (unlikely(!shadow_req))
2736 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2737 refcount_dec(&shadow_req->refs);
2739 shadow_req->sequence = s.sequence;
2744 s.needs_lock = false;
2745 s.needs_fixed_file = false;
2749 * The caller will block for events after submit, submit the
2750 * last IO non-blocking. This is either the only IO it's
2751 * submitting, or it already submitted the previous ones. This
2752 * improves performance by avoiding an async punt that we don't
2755 if (block_for_last && submit == to_submit)
2756 force_nonblock = false;
2758 io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
2760 io_commit_sqring(ctx);
2763 io_queue_link_head(ctx, link, &link->submit, shadow_req,
2766 io_submit_state_end(statep);
2771 struct io_wait_queue {
2772 struct wait_queue_entry wq;
2773 struct io_ring_ctx *ctx;
2775 unsigned nr_timeouts;
2778 static inline bool io_should_wake(struct io_wait_queue *iowq)
2780 struct io_ring_ctx *ctx = iowq->ctx;
2783 * Wake up if we have enough events, or if a timeout occured since we
2784 * started waiting. For timeouts, we always want to return to userspace,
2785 * regardless of event count.
2787 return io_cqring_events(ctx->rings) >= iowq->to_wait ||
2788 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2791 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2792 int wake_flags, void *key)
2794 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2797 if (!io_should_wake(iowq))
2800 return autoremove_wake_function(curr, mode, wake_flags, key);
2804 * Wait until events become available, if we don't already have some. The
2805 * application must reap them itself, as they reside on the shared cq ring.
2807 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2808 const sigset_t __user *sig, size_t sigsz)
2810 struct io_wait_queue iowq = {
2813 .func = io_wake_function,
2814 .entry = LIST_HEAD_INIT(iowq.wq.entry),
2817 .to_wait = min_events,
2819 struct io_rings *rings = ctx->rings;
2822 if (io_cqring_events(rings) >= min_events)
2826 #ifdef CONFIG_COMPAT
2827 if (in_compat_syscall())
2828 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2832 ret = set_user_sigmask(sig, sigsz);
2839 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2841 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
2842 TASK_INTERRUPTIBLE);
2843 if (io_should_wake(&iowq))
2846 if (signal_pending(current)) {
2851 finish_wait(&ctx->wait, &iowq.wq);
2853 restore_saved_sigmask_unless(ret == -ERESTARTSYS);
2854 if (ret == -ERESTARTSYS)
2857 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2860 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
2862 #if defined(CONFIG_UNIX)
2863 if (ctx->ring_sock) {
2864 struct sock *sock = ctx->ring_sock->sk;
2865 struct sk_buff *skb;
2867 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
2873 for (i = 0; i < ctx->nr_user_files; i++)
2874 fput(ctx->user_files[i]);
2878 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2880 if (!ctx->user_files)
2883 __io_sqe_files_unregister(ctx);
2884 kfree(ctx->user_files);
2885 ctx->user_files = NULL;
2886 ctx->nr_user_files = 0;
2890 static void io_sq_thread_stop(struct io_ring_ctx *ctx)
2892 if (ctx->sqo_thread) {
2893 wait_for_completion(&ctx->sqo_thread_started);
2895 * The park is a bit of a work-around, without it we get
2896 * warning spews on shutdown with SQPOLL set and affinity
2897 * set to a single CPU.
2899 kthread_park(ctx->sqo_thread);
2900 kthread_stop(ctx->sqo_thread);
2901 ctx->sqo_thread = NULL;
2905 static void io_finish_async(struct io_ring_ctx *ctx)
2909 io_sq_thread_stop(ctx);
2911 for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
2912 if (ctx->sqo_wq[i]) {
2913 destroy_workqueue(ctx->sqo_wq[i]);
2914 ctx->sqo_wq[i] = NULL;
2919 #if defined(CONFIG_UNIX)
2920 static void io_destruct_skb(struct sk_buff *skb)
2922 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
2924 io_finish_async(ctx);
2925 unix_destruct_scm(skb);
2929 * Ensure the UNIX gc is aware of our file set, so we are certain that
2930 * the io_uring can be safely unregistered on process exit, even if we have
2931 * loops in the file referencing.
2933 static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
2935 struct sock *sk = ctx->ring_sock->sk;
2936 struct scm_fp_list *fpl;
2937 struct sk_buff *skb;
2940 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
2941 unsigned long inflight = ctx->user->unix_inflight + nr;
2943 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
2947 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
2951 skb = alloc_skb(0, GFP_KERNEL);
2958 skb->destructor = io_destruct_skb;
2960 fpl->user = get_uid(ctx->user);
2961 for (i = 0; i < nr; i++) {
2962 fpl->fp[i] = get_file(ctx->user_files[i + offset]);
2963 unix_inflight(fpl->user, fpl->fp[i]);
2966 fpl->max = fpl->count = nr;
2967 UNIXCB(skb).fp = fpl;
2968 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2969 skb_queue_head(&sk->sk_receive_queue, skb);
2971 for (i = 0; i < nr; i++)
2978 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
2979 * causes regular reference counting to break down. We rely on the UNIX
2980 * garbage collection to take care of this problem for us.
2982 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2984 unsigned left, total;
2988 left = ctx->nr_user_files;
2990 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
2992 ret = __io_sqe_files_scm(ctx, this_files, total);
2996 total += this_files;
3002 while (total < ctx->nr_user_files) {
3003 fput(ctx->user_files[total]);
3010 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3016 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3019 __s32 __user *fds = (__s32 __user *) arg;
3023 if (ctx->user_files)
3027 if (nr_args > IORING_MAX_FIXED_FILES)
3030 ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
3031 if (!ctx->user_files)
3034 for (i = 0; i < nr_args; i++) {
3036 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3039 ctx->user_files[i] = fget(fd);
3042 if (!ctx->user_files[i])
3045 * Don't allow io_uring instances to be registered. If UNIX
3046 * isn't enabled, then this causes a reference cycle and this
3047 * instance can never get freed. If UNIX is enabled we'll
3048 * handle it just fine, but there's still no point in allowing
3049 * a ring fd as it doesn't support regular read/write anyway.
3051 if (ctx->user_files[i]->f_op == &io_uring_fops) {
3052 fput(ctx->user_files[i]);
3055 ctx->nr_user_files++;
3060 for (i = 0; i < ctx->nr_user_files; i++)
3061 fput(ctx->user_files[i]);
3063 kfree(ctx->user_files);
3064 ctx->user_files = NULL;
3065 ctx->nr_user_files = 0;
3069 ret = io_sqe_files_scm(ctx);
3071 io_sqe_files_unregister(ctx);
3076 static int io_sq_offload_start(struct io_ring_ctx *ctx,
3077 struct io_uring_params *p)
3081 init_waitqueue_head(&ctx->sqo_wait);
3082 mmgrab(current->mm);
3083 ctx->sqo_mm = current->mm;
3085 if (ctx->flags & IORING_SETUP_SQPOLL) {
3087 if (!capable(CAP_SYS_ADMIN))
3090 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3091 if (!ctx->sq_thread_idle)
3092 ctx->sq_thread_idle = HZ;
3094 if (p->flags & IORING_SETUP_SQ_AFF) {
3095 int cpu = p->sq_thread_cpu;
3098 if (cpu >= nr_cpu_ids)
3100 if (!cpu_online(cpu))
3103 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3107 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3110 if (IS_ERR(ctx->sqo_thread)) {
3111 ret = PTR_ERR(ctx->sqo_thread);
3112 ctx->sqo_thread = NULL;
3115 wake_up_process(ctx->sqo_thread);
3116 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3117 /* Can't have SQ_AFF without SQPOLL */
3122 /* Do QD, or 2 * CPUS, whatever is smallest */
3123 ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
3124 WQ_UNBOUND | WQ_FREEZABLE,
3125 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
3126 if (!ctx->sqo_wq[0]) {
3132 * This is for buffered writes, where we want to limit the parallelism
3133 * due to file locking in file systems. As "normal" buffered writes
3134 * should parellelize on writeout quite nicely, limit us to having 2
3135 * pending. This avoids massive contention on the inode when doing
3136 * buffered async writes.
3138 ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
3139 WQ_UNBOUND | WQ_FREEZABLE, 2);
3140 if (!ctx->sqo_wq[1]) {
3147 io_finish_async(ctx);
3148 mmdrop(ctx->sqo_mm);
3153 static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3155 atomic_long_sub(nr_pages, &user->locked_vm);
3158 static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3160 unsigned long page_limit, cur_pages, new_pages;
3162 /* Don't allow more pages than we can safely lock */
3163 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3166 cur_pages = atomic_long_read(&user->locked_vm);
3167 new_pages = cur_pages + nr_pages;
3168 if (new_pages > page_limit)
3170 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3171 new_pages) != cur_pages);
3176 static void io_mem_free(void *ptr)
3183 page = virt_to_head_page(ptr);
3184 if (put_page_testzero(page))
3185 free_compound_page(page);
3188 static void *io_mem_alloc(size_t size)
3190 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3193 return (void *) __get_free_pages(gfp_flags, get_order(size));
3196 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3199 struct io_rings *rings;
3200 size_t off, sq_array_size;
3202 off = struct_size(rings, cqes, cq_entries);
3203 if (off == SIZE_MAX)
3207 off = ALIGN(off, SMP_CACHE_BYTES);
3212 sq_array_size = array_size(sizeof(u32), sq_entries);
3213 if (sq_array_size == SIZE_MAX)
3216 if (check_add_overflow(off, sq_array_size, &off))
3225 static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3229 pages = (size_t)1 << get_order(
3230 rings_size(sq_entries, cq_entries, NULL));
3231 pages += (size_t)1 << get_order(
3232 array_size(sizeof(struct io_uring_sqe), sq_entries));
3237 static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3241 if (!ctx->user_bufs)
3244 for (i = 0; i < ctx->nr_user_bufs; i++) {
3245 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3247 for (j = 0; j < imu->nr_bvecs; j++)
3248 put_user_page(imu->bvec[j].bv_page);
3250 if (ctx->account_mem)
3251 io_unaccount_mem(ctx->user, imu->nr_bvecs);
3256 kfree(ctx->user_bufs);
3257 ctx->user_bufs = NULL;
3258 ctx->nr_user_bufs = 0;
3262 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3263 void __user *arg, unsigned index)
3265 struct iovec __user *src;
3267 #ifdef CONFIG_COMPAT
3269 struct compat_iovec __user *ciovs;
3270 struct compat_iovec ciov;
3272 ciovs = (struct compat_iovec __user *) arg;
3273 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3276 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3277 dst->iov_len = ciov.iov_len;
3281 src = (struct iovec __user *) arg;
3282 if (copy_from_user(dst, &src[index], sizeof(*dst)))
3287 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3290 struct vm_area_struct **vmas = NULL;
3291 struct page **pages = NULL;
3292 int i, j, got_pages = 0;
3297 if (!nr_args || nr_args > UIO_MAXIOV)
3300 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3302 if (!ctx->user_bufs)
3305 for (i = 0; i < nr_args; i++) {
3306 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3307 unsigned long off, start, end, ubuf;
3312 ret = io_copy_iov(ctx, &iov, arg, i);
3317 * Don't impose further limits on the size and buffer
3318 * constraints here, we'll -EINVAL later when IO is
3319 * submitted if they are wrong.
3322 if (!iov.iov_base || !iov.iov_len)
3325 /* arbitrary limit, but we need something */
3326 if (iov.iov_len > SZ_1G)
3329 ubuf = (unsigned long) iov.iov_base;
3330 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3331 start = ubuf >> PAGE_SHIFT;
3332 nr_pages = end - start;
3334 if (ctx->account_mem) {
3335 ret = io_account_mem(ctx->user, nr_pages);
3341 if (!pages || nr_pages > got_pages) {
3344 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
3346 vmas = kvmalloc_array(nr_pages,
3347 sizeof(struct vm_area_struct *),
3349 if (!pages || !vmas) {
3351 if (ctx->account_mem)
3352 io_unaccount_mem(ctx->user, nr_pages);
3355 got_pages = nr_pages;
3358 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
3362 if (ctx->account_mem)
3363 io_unaccount_mem(ctx->user, nr_pages);
3368 down_read(¤t->mm->mmap_sem);
3369 pret = get_user_pages(ubuf, nr_pages,
3370 FOLL_WRITE | FOLL_LONGTERM,
3372 if (pret == nr_pages) {
3373 /* don't support file backed memory */
3374 for (j = 0; j < nr_pages; j++) {
3375 struct vm_area_struct *vma = vmas[j];
3378 !is_file_hugepages(vma->vm_file)) {
3384 ret = pret < 0 ? pret : -EFAULT;
3386 up_read(¤t->mm->mmap_sem);
3389 * if we did partial map, or found file backed vmas,
3390 * release any pages we did get
3393 put_user_pages(pages, pret);
3394 if (ctx->account_mem)
3395 io_unaccount_mem(ctx->user, nr_pages);
3400 off = ubuf & ~PAGE_MASK;
3402 for (j = 0; j < nr_pages; j++) {
3405 vec_len = min_t(size_t, size, PAGE_SIZE - off);
3406 imu->bvec[j].bv_page = pages[j];
3407 imu->bvec[j].bv_len = vec_len;
3408 imu->bvec[j].bv_offset = off;
3412 /* store original address for later verification */
3414 imu->len = iov.iov_len;
3415 imu->nr_bvecs = nr_pages;
3417 ctx->nr_user_bufs++;
3425 io_sqe_buffer_unregister(ctx);
3429 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3431 __s32 __user *fds = arg;
3437 if (copy_from_user(&fd, fds, sizeof(*fds)))
3440 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3441 if (IS_ERR(ctx->cq_ev_fd)) {
3442 int ret = PTR_ERR(ctx->cq_ev_fd);
3443 ctx->cq_ev_fd = NULL;
3450 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3452 if (ctx->cq_ev_fd) {
3453 eventfd_ctx_put(ctx->cq_ev_fd);
3454 ctx->cq_ev_fd = NULL;
3461 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3463 io_finish_async(ctx);
3465 mmdrop(ctx->sqo_mm);
3467 io_iopoll_reap_events(ctx);
3468 io_sqe_buffer_unregister(ctx);
3469 io_sqe_files_unregister(ctx);
3470 io_eventfd_unregister(ctx);
3472 #if defined(CONFIG_UNIX)
3473 if (ctx->ring_sock) {
3474 ctx->ring_sock->file = NULL; /* so that iput() is called */
3475 sock_release(ctx->ring_sock);
3479 io_mem_free(ctx->rings);
3480 io_mem_free(ctx->sq_sqes);
3482 percpu_ref_exit(&ctx->refs);
3483 if (ctx->account_mem)
3484 io_unaccount_mem(ctx->user,
3485 ring_pages(ctx->sq_entries, ctx->cq_entries));
3486 free_uid(ctx->user);
3490 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3492 struct io_ring_ctx *ctx = file->private_data;
3495 poll_wait(file, &ctx->cq_wait, wait);
3497 * synchronizes with barrier from wq_has_sleeper call in
3501 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
3502 ctx->rings->sq_ring_entries)
3503 mask |= EPOLLOUT | EPOLLWRNORM;
3504 if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
3505 mask |= EPOLLIN | EPOLLRDNORM;
3510 static int io_uring_fasync(int fd, struct file *file, int on)
3512 struct io_ring_ctx *ctx = file->private_data;
3514 return fasync_helper(fd, file, on, &ctx->cq_fasync);
3517 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3519 mutex_lock(&ctx->uring_lock);
3520 percpu_ref_kill(&ctx->refs);
3521 mutex_unlock(&ctx->uring_lock);
3523 io_kill_timeouts(ctx);
3524 io_poll_remove_all(ctx);
3525 io_iopoll_reap_events(ctx);
3526 wait_for_completion(&ctx->ctx_done);
3527 io_ring_ctx_free(ctx);
3530 static int io_uring_release(struct inode *inode, struct file *file)
3532 struct io_ring_ctx *ctx = file->private_data;
3534 file->private_data = NULL;
3535 io_ring_ctx_wait_and_kill(ctx);
3539 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3541 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
3542 unsigned long sz = vma->vm_end - vma->vm_start;
3543 struct io_ring_ctx *ctx = file->private_data;
3549 case IORING_OFF_SQ_RING:
3550 case IORING_OFF_CQ_RING:
3553 case IORING_OFF_SQES:
3560 page = virt_to_head_page(ptr);
3561 if (sz > page_size(page))
3564 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3565 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3568 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3569 u32, min_complete, u32, flags, const sigset_t __user *, sig,
3572 struct io_ring_ctx *ctx;
3577 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
3585 if (f.file->f_op != &io_uring_fops)
3589 ctx = f.file->private_data;
3590 if (!percpu_ref_tryget(&ctx->refs))
3594 * For SQ polling, the thread will do all submissions and completions.
3595 * Just return the requested submit count, and wake the thread if
3599 if (ctx->flags & IORING_SETUP_SQPOLL) {
3600 if (flags & IORING_ENTER_SQ_WAKEUP)
3601 wake_up(&ctx->sqo_wait);
3602 submitted = to_submit;
3603 } else if (to_submit) {
3604 bool block_for_last = false;
3606 to_submit = min(to_submit, ctx->sq_entries);
3609 * Allow last submission to block in a series, IFF the caller
3610 * asked to wait for events and we don't currently have
3611 * enough. This potentially avoids an async punt.
3613 if (to_submit == min_complete &&
3614 io_cqring_events(ctx->rings) < min_complete)
3615 block_for_last = true;
3617 mutex_lock(&ctx->uring_lock);
3618 submitted = io_ring_submit(ctx, to_submit, block_for_last);
3619 mutex_unlock(&ctx->uring_lock);
3621 if (flags & IORING_ENTER_GETEVENTS) {
3622 unsigned nr_events = 0;
3624 min_complete = min(min_complete, ctx->cq_entries);
3626 if (ctx->flags & IORING_SETUP_IOPOLL) {
3627 ret = io_iopoll_check(ctx, &nr_events, min_complete);
3629 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3633 io_ring_drop_ctx_refs(ctx, 1);
3636 return submitted ? submitted : ret;
3639 static const struct file_operations io_uring_fops = {
3640 .release = io_uring_release,
3641 .mmap = io_uring_mmap,
3642 .poll = io_uring_poll,
3643 .fasync = io_uring_fasync,
3646 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3647 struct io_uring_params *p)
3649 struct io_rings *rings;
3650 size_t size, sq_array_offset;
3652 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
3653 if (size == SIZE_MAX)
3656 rings = io_mem_alloc(size);
3661 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3662 rings->sq_ring_mask = p->sq_entries - 1;
3663 rings->cq_ring_mask = p->cq_entries - 1;
3664 rings->sq_ring_entries = p->sq_entries;
3665 rings->cq_ring_entries = p->cq_entries;
3666 ctx->sq_mask = rings->sq_ring_mask;
3667 ctx->cq_mask = rings->cq_ring_mask;
3668 ctx->sq_entries = rings->sq_ring_entries;
3669 ctx->cq_entries = rings->cq_ring_entries;
3671 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3672 if (size == SIZE_MAX)
3675 ctx->sq_sqes = io_mem_alloc(size);
3683 * Allocate an anonymous fd, this is what constitutes the application
3684 * visible backing of an io_uring instance. The application mmaps this
3685 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3686 * we have to tie this fd to a socket for file garbage collection purposes.
3688 static int io_uring_get_fd(struct io_ring_ctx *ctx)
3693 #if defined(CONFIG_UNIX)
3694 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3700 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3704 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
3705 O_RDWR | O_CLOEXEC);
3708 ret = PTR_ERR(file);
3712 #if defined(CONFIG_UNIX)
3713 ctx->ring_sock->file = file;
3714 ctx->ring_sock->sk->sk_user_data = ctx;
3716 fd_install(ret, file);
3719 #if defined(CONFIG_UNIX)
3720 sock_release(ctx->ring_sock);
3721 ctx->ring_sock = NULL;
3726 static int io_uring_create(unsigned entries, struct io_uring_params *p)
3728 struct user_struct *user = NULL;
3729 struct io_ring_ctx *ctx;
3733 if (!entries || entries > IORING_MAX_ENTRIES)
3737 * Use twice as many entries for the CQ ring. It's possible for the
3738 * application to drive a higher depth than the size of the SQ ring,
3739 * since the sqes are only used at submission time. This allows for
3740 * some flexibility in overcommitting a bit.
3742 p->sq_entries = roundup_pow_of_two(entries);
3743 p->cq_entries = 2 * p->sq_entries;
3745 user = get_uid(current_user());
3746 account_mem = !capable(CAP_IPC_LOCK);
3749 ret = io_account_mem(user,
3750 ring_pages(p->sq_entries, p->cq_entries));
3757 ctx = io_ring_ctx_alloc(p);
3760 io_unaccount_mem(user, ring_pages(p->sq_entries,
3765 ctx->compat = in_compat_syscall();
3766 ctx->account_mem = account_mem;
3769 ret = io_allocate_scq_urings(ctx, p);
3773 ret = io_sq_offload_start(ctx, p);
3777 ret = io_uring_get_fd(ctx);
3781 memset(&p->sq_off, 0, sizeof(p->sq_off));
3782 p->sq_off.head = offsetof(struct io_rings, sq.head);
3783 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3784 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3785 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3786 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3787 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3788 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3790 memset(&p->cq_off, 0, sizeof(p->cq_off));
3791 p->cq_off.head = offsetof(struct io_rings, cq.head);
3792 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3793 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3794 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3795 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3796 p->cq_off.cqes = offsetof(struct io_rings, cqes);
3798 p->features = IORING_FEAT_SINGLE_MMAP;
3801 io_ring_ctx_wait_and_kill(ctx);
3806 * Sets up an aio uring context, and returns the fd. Applications asks for a
3807 * ring size, we return the actual sq/cq ring sizes (among other things) in the
3808 * params structure passed in.
3810 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3812 struct io_uring_params p;
3816 if (copy_from_user(&p, params, sizeof(p)))
3818 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3823 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3824 IORING_SETUP_SQ_AFF))
3827 ret = io_uring_create(entries, &p);
3831 if (copy_to_user(params, &p, sizeof(p)))
3837 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3838 struct io_uring_params __user *, params)
3840 return io_uring_setup(entries, params);
3843 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3844 void __user *arg, unsigned nr_args)
3845 __releases(ctx->uring_lock)
3846 __acquires(ctx->uring_lock)
3851 * We're inside the ring mutex, if the ref is already dying, then
3852 * someone else killed the ctx or is already going through
3853 * io_uring_register().
3855 if (percpu_ref_is_dying(&ctx->refs))
3858 percpu_ref_kill(&ctx->refs);
3861 * Drop uring mutex before waiting for references to exit. If another
3862 * thread is currently inside io_uring_enter() it might need to grab
3863 * the uring_lock to make progress. If we hold it here across the drain
3864 * wait, then we can deadlock. It's safe to drop the mutex here, since
3865 * no new references will come in after we've killed the percpu ref.
3867 mutex_unlock(&ctx->uring_lock);
3868 wait_for_completion(&ctx->ctx_done);
3869 mutex_lock(&ctx->uring_lock);
3872 case IORING_REGISTER_BUFFERS:
3873 ret = io_sqe_buffer_register(ctx, arg, nr_args);
3875 case IORING_UNREGISTER_BUFFERS:
3879 ret = io_sqe_buffer_unregister(ctx);
3881 case IORING_REGISTER_FILES:
3882 ret = io_sqe_files_register(ctx, arg, nr_args);
3884 case IORING_UNREGISTER_FILES:
3888 ret = io_sqe_files_unregister(ctx);
3890 case IORING_REGISTER_EVENTFD:
3894 ret = io_eventfd_register(ctx, arg);
3896 case IORING_UNREGISTER_EVENTFD:
3900 ret = io_eventfd_unregister(ctx);
3907 /* bring the ctx back to life */
3908 reinit_completion(&ctx->ctx_done);
3909 percpu_ref_reinit(&ctx->refs);
3913 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
3914 void __user *, arg, unsigned int, nr_args)
3916 struct io_ring_ctx *ctx;
3925 if (f.file->f_op != &io_uring_fops)
3928 ctx = f.file->private_data;
3930 mutex_lock(&ctx->uring_lock);
3931 ret = __io_uring_register(ctx, opcode, arg, nr_args);
3932 mutex_unlock(&ctx->uring_lock);
3938 static int __init io_uring_init(void)
3940 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3943 __initcall(io_uring_init);