1 // SPDX-License-Identifier: GPL-2.0
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side. When the application reads the CQ ring
8 * tail, it must use an appropriate smp_rmb() to order with the smp_wmb()
9 * the kernel uses after writing the tail. Failure to do so could cause a
10 * delay in when the application notices that completion events available.
11 * This isn't a fatal condition. Likewise, the application must use an
12 * appropriate smp_wmb() both before writing the SQ tail, and after writing
13 * the SQ tail. The first one orders the sqe writes with the tail write, and
14 * the latter is paired with the smp_rmb() the kernel will issue before
15 * reading the SQ tail on submission.
17 * Also see the examples in the liburing library:
19 * git://git.kernel.dk/liburing
21 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
22 * from data shared between the kernel and application. This is done both
23 * for ordering purposes, but also to ensure that once a value is loaded from
24 * data that the application could potentially modify, it remains stable.
26 * Copyright (C) 2018-2019 Jens Axboe
27 * Copyright (c) 2018-2019 Christoph Hellwig
29 #include <linux/kernel.h>
30 #include <linux/init.h>
31 #include <linux/errno.h>
32 #include <linux/syscalls.h>
33 #include <linux/compat.h>
34 #include <linux/refcount.h>
35 #include <linux/uio.h>
37 #include <linux/sched/signal.h>
39 #include <linux/file.h>
40 #include <linux/fdtable.h>
42 #include <linux/mman.h>
43 #include <linux/mmu_context.h>
44 #include <linux/percpu.h>
45 #include <linux/slab.h>
46 #include <linux/workqueue.h>
47 #include <linux/blkdev.h>
48 #include <linux/bvec.h>
49 #include <linux/net.h>
51 #include <net/af_unix.h>
53 #include <linux/anon_inodes.h>
54 #include <linux/sched/mm.h>
55 #include <linux/uaccess.h>
56 #include <linux/nospec.h>
57 #include <linux/sizes.h>
58 #include <linux/hugetlb.h>
60 #include <uapi/linux/io_uring.h>
64 #define IORING_MAX_ENTRIES 4096
65 #define IORING_MAX_FIXED_FILES 1024
68 u32 head ____cacheline_aligned_in_smp;
69 u32 tail ____cacheline_aligned_in_smp;
86 struct io_uring_cqe cqes[];
89 struct io_mapped_ubuf {
93 unsigned int nr_bvecs;
98 struct percpu_ref refs;
99 } ____cacheline_aligned_in_smp;
107 struct io_sq_ring *sq_ring;
108 unsigned cached_sq_head;
111 struct io_uring_sqe *sq_sqes;
112 } ____cacheline_aligned_in_smp;
115 struct workqueue_struct *sqo_wq;
116 struct mm_struct *sqo_mm;
120 struct io_cq_ring *cq_ring;
121 unsigned cached_cq_tail;
124 struct wait_queue_head cq_wait;
125 struct fasync_struct *cq_fasync;
126 } ____cacheline_aligned_in_smp;
129 * If used, fixed file set. Writers must ensure that ->refs is dead,
130 * readers must ensure that ->refs is alive as long as the file* is
131 * used. Only updated through io_uring_register(2).
133 struct file **user_files;
134 unsigned nr_user_files;
136 /* if used, fixed mapped user buffers */
137 unsigned nr_user_bufs;
138 struct io_mapped_ubuf *user_bufs;
140 struct user_struct *user;
142 struct completion ctx_done;
145 struct mutex uring_lock;
146 wait_queue_head_t wait;
147 } ____cacheline_aligned_in_smp;
150 spinlock_t completion_lock;
151 bool poll_multi_file;
153 * ->poll_list is protected by the ctx->uring_lock for
154 * io_uring instances that don't use IORING_SETUP_SQPOLL.
155 * For SQPOLL, only the single threaded io_sq_thread() will
156 * manipulate the list, hence no extra locking is needed there.
158 struct list_head poll_list;
159 } ____cacheline_aligned_in_smp;
161 #if defined(CONFIG_UNIX)
162 struct socket *ring_sock;
167 const struct io_uring_sqe *sqe;
168 unsigned short index;
176 struct sqe_submit submit;
178 struct io_ring_ctx *ctx;
179 struct list_head list;
181 #define REQ_F_FORCE_NONBLOCK 1 /* inline submission attempt */
182 #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
183 #define REQ_F_FIXED_FILE 4 /* ctx owns file */
187 struct work_struct work;
190 #define IO_PLUG_THRESHOLD 2
191 #define IO_IOPOLL_BATCH 8
193 struct io_submit_state {
194 struct blk_plug plug;
197 * io_kiocb alloc cache
199 void *reqs[IO_IOPOLL_BATCH];
200 unsigned int free_reqs;
201 unsigned int cur_req;
204 * File reference cache
208 unsigned int has_refs;
209 unsigned int used_refs;
210 unsigned int ios_left;
213 static struct kmem_cache *req_cachep;
215 static const struct file_operations io_uring_fops;
217 struct sock *io_uring_get_socket(struct file *file)
219 #if defined(CONFIG_UNIX)
220 if (file->f_op == &io_uring_fops) {
221 struct io_ring_ctx *ctx = file->private_data;
223 return ctx->ring_sock->sk;
228 EXPORT_SYMBOL(io_uring_get_socket);
230 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
232 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
234 complete(&ctx->ctx_done);
237 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
239 struct io_ring_ctx *ctx;
241 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
245 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 0, GFP_KERNEL)) {
250 ctx->flags = p->flags;
251 init_waitqueue_head(&ctx->cq_wait);
252 init_completion(&ctx->ctx_done);
253 mutex_init(&ctx->uring_lock);
254 init_waitqueue_head(&ctx->wait);
255 spin_lock_init(&ctx->completion_lock);
256 INIT_LIST_HEAD(&ctx->poll_list);
260 static void io_commit_cqring(struct io_ring_ctx *ctx)
262 struct io_cq_ring *ring = ctx->cq_ring;
264 if (ctx->cached_cq_tail != READ_ONCE(ring->r.tail)) {
265 /* order cqe stores with ring update */
266 smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
269 * Write sider barrier of tail update, app has read side. See
270 * comment at the top of this file.
274 if (wq_has_sleeper(&ctx->cq_wait)) {
275 wake_up_interruptible(&ctx->cq_wait);
276 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
281 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
283 struct io_cq_ring *ring = ctx->cq_ring;
286 tail = ctx->cached_cq_tail;
287 /* See comment at the top of the file */
289 if (tail + 1 == READ_ONCE(ring->r.head))
292 ctx->cached_cq_tail++;
293 return &ring->cqes[tail & ctx->cq_mask];
296 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
297 long res, unsigned ev_flags)
299 struct io_uring_cqe *cqe;
302 * If we can't get a cq entry, userspace overflowed the
303 * submission (by quite a lot). Increment the overflow count in
306 cqe = io_get_cqring(ctx);
308 WRITE_ONCE(cqe->user_data, ki_user_data);
309 WRITE_ONCE(cqe->res, res);
310 WRITE_ONCE(cqe->flags, ev_flags);
312 unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
314 WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
318 static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 ki_user_data,
319 long res, unsigned ev_flags)
323 spin_lock_irqsave(&ctx->completion_lock, flags);
324 io_cqring_fill_event(ctx, ki_user_data, res, ev_flags);
325 io_commit_cqring(ctx);
326 spin_unlock_irqrestore(&ctx->completion_lock, flags);
328 if (waitqueue_active(&ctx->wait))
332 static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
334 percpu_ref_put_many(&ctx->refs, refs);
336 if (waitqueue_active(&ctx->wait))
340 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
341 struct io_submit_state *state)
343 struct io_kiocb *req;
345 if (!percpu_ref_tryget(&ctx->refs))
349 req = kmem_cache_alloc(req_cachep, __GFP_NOWARN);
352 } else if (!state->free_reqs) {
356 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
357 ret = kmem_cache_alloc_bulk(req_cachep, __GFP_NOWARN, sz,
359 if (unlikely(ret <= 0))
361 state->free_reqs = ret - 1;
363 req = state->reqs[0];
365 req = state->reqs[state->cur_req];
374 io_ring_drop_ctx_refs(ctx, 1);
378 static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
381 kmem_cache_free_bulk(req_cachep, *nr, reqs);
382 io_ring_drop_ctx_refs(ctx, *nr);
387 static void io_free_req(struct io_kiocb *req)
389 io_ring_drop_ctx_refs(req->ctx, 1);
390 kmem_cache_free(req_cachep, req);
394 * Find and free completed poll iocbs
396 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
397 struct list_head *done)
399 void *reqs[IO_IOPOLL_BATCH];
400 int file_count, to_free;
401 struct file *file = NULL;
402 struct io_kiocb *req;
404 file_count = to_free = 0;
405 while (!list_empty(done)) {
406 req = list_first_entry(done, struct io_kiocb, list);
407 list_del(&req->list);
409 io_cqring_fill_event(ctx, req->user_data, req->error, 0);
411 reqs[to_free++] = req;
415 * Batched puts of the same file, to avoid dirtying the
416 * file usage count multiple times, if avoidable.
418 if (!(req->flags & REQ_F_FIXED_FILE)) {
420 file = req->rw.ki_filp;
422 } else if (file == req->rw.ki_filp) {
425 fput_many(file, file_count);
426 file = req->rw.ki_filp;
431 if (to_free == ARRAY_SIZE(reqs))
432 io_free_req_many(ctx, reqs, &to_free);
434 io_commit_cqring(ctx);
437 fput_many(file, file_count);
438 io_free_req_many(ctx, reqs, &to_free);
441 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
444 struct io_kiocb *req, *tmp;
450 * Only spin for completions if we don't have multiple devices hanging
451 * off our complete list, and we're under the requested amount.
453 spin = !ctx->poll_multi_file && *nr_events < min;
456 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
457 struct kiocb *kiocb = &req->rw;
460 * Move completed entries to our local list. If we find a
461 * request that requires polling, break out and complete
462 * the done list first, if we have entries there.
464 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
465 list_move_tail(&req->list, &done);
468 if (!list_empty(&done))
471 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
480 if (!list_empty(&done))
481 io_iopoll_complete(ctx, nr_events, &done);
487 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
488 * non-spinning poll check - we'll still enter the driver poll loop, but only
489 * as a non-spinning completion check.
491 static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
494 while (!list_empty(&ctx->poll_list)) {
497 ret = io_do_iopoll(ctx, nr_events, min);
500 if (!min || *nr_events >= min)
508 * We can't just wait for polled events to come to us, we have to actively
509 * find and complete them.
511 static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
513 if (!(ctx->flags & IORING_SETUP_IOPOLL))
516 mutex_lock(&ctx->uring_lock);
517 while (!list_empty(&ctx->poll_list)) {
518 unsigned int nr_events = 0;
520 io_iopoll_getevents(ctx, &nr_events, 1);
522 mutex_unlock(&ctx->uring_lock);
525 static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
533 if (*nr_events < min)
534 tmin = min - *nr_events;
536 ret = io_iopoll_getevents(ctx, nr_events, tmin);
540 } while (min && !*nr_events && !need_resched());
545 static void kiocb_end_write(struct kiocb *kiocb)
547 if (kiocb->ki_flags & IOCB_WRITE) {
548 struct inode *inode = file_inode(kiocb->ki_filp);
551 * Tell lockdep we inherited freeze protection from submission
554 if (S_ISREG(inode->i_mode))
555 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
556 file_end_write(kiocb->ki_filp);
560 static void io_fput(struct io_kiocb *req)
562 if (!(req->flags & REQ_F_FIXED_FILE))
563 fput(req->rw.ki_filp);
566 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
568 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
570 kiocb_end_write(kiocb);
573 io_cqring_add_event(req->ctx, req->user_data, res, 0);
577 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
579 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
581 kiocb_end_write(kiocb);
585 req->flags |= REQ_F_IOPOLL_COMPLETED;
589 * After the iocb has been issued, it's safe to be found on the poll list.
590 * Adding the kiocb to the list AFTER submission ensures that we don't
591 * find it from a io_iopoll_getevents() thread before the issuer is done
592 * accessing the kiocb cookie.
594 static void io_iopoll_req_issued(struct io_kiocb *req)
596 struct io_ring_ctx *ctx = req->ctx;
599 * Track whether we have multiple files in our lists. This will impact
600 * how we do polling eventually, not spinning if we're on potentially
603 if (list_empty(&ctx->poll_list)) {
604 ctx->poll_multi_file = false;
605 } else if (!ctx->poll_multi_file) {
606 struct io_kiocb *list_req;
608 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
610 if (list_req->rw.ki_filp != req->rw.ki_filp)
611 ctx->poll_multi_file = true;
615 * For fast devices, IO may have already completed. If it has, add
616 * it to the front so we find it first.
618 if (req->flags & REQ_F_IOPOLL_COMPLETED)
619 list_add(&req->list, &ctx->poll_list);
621 list_add_tail(&req->list, &ctx->poll_list);
624 static void io_file_put(struct io_submit_state *state, struct file *file)
628 } else if (state->file) {
629 int diff = state->has_refs - state->used_refs;
632 fput_many(state->file, diff);
638 * Get as many references to a file as we have IOs left in this submission,
639 * assuming most submissions are for one file, or at least that each file
640 * has more than one submission.
642 static struct file *io_file_get(struct io_submit_state *state, int fd)
648 if (state->fd == fd) {
653 io_file_put(state, NULL);
655 state->file = fget_many(fd, state->ios_left);
660 state->has_refs = state->ios_left;
661 state->used_refs = 1;
667 * If we tracked the file through the SCM inflight mechanism, we could support
668 * any file. For now, just ensure that anything potentially problematic is done
671 static bool io_file_supports_async(struct file *file)
673 umode_t mode = file_inode(file)->i_mode;
675 if (S_ISBLK(mode) || S_ISCHR(mode))
677 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
683 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
684 bool force_nonblock, struct io_submit_state *state)
686 struct io_ring_ctx *ctx = req->ctx;
687 struct kiocb *kiocb = &req->rw;
688 unsigned ioprio, flags;
691 /* For -EAGAIN retry, everything is already prepped */
695 flags = READ_ONCE(sqe->flags);
696 fd = READ_ONCE(sqe->fd);
698 if (flags & IOSQE_FIXED_FILE) {
699 if (unlikely(!ctx->user_files ||
700 (unsigned) fd >= ctx->nr_user_files))
702 kiocb->ki_filp = ctx->user_files[fd];
703 req->flags |= REQ_F_FIXED_FILE;
705 kiocb->ki_filp = io_file_get(state, fd);
706 if (unlikely(!kiocb->ki_filp))
708 if (force_nonblock && !io_file_supports_async(kiocb->ki_filp))
709 force_nonblock = false;
711 kiocb->ki_pos = READ_ONCE(sqe->off);
712 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
713 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
715 ioprio = READ_ONCE(sqe->ioprio);
717 ret = ioprio_check_cap(ioprio);
721 kiocb->ki_ioprio = ioprio;
723 kiocb->ki_ioprio = get_current_ioprio();
725 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
728 if (force_nonblock) {
729 kiocb->ki_flags |= IOCB_NOWAIT;
730 req->flags |= REQ_F_FORCE_NONBLOCK;
732 if (ctx->flags & IORING_SETUP_IOPOLL) {
734 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
735 !kiocb->ki_filp->f_op->iopoll)
739 kiocb->ki_flags |= IOCB_HIPRI;
740 kiocb->ki_complete = io_complete_rw_iopoll;
742 if (kiocb->ki_flags & IOCB_HIPRI) {
746 kiocb->ki_complete = io_complete_rw;
750 if (!(flags & IOSQE_FIXED_FILE)) {
752 * in case of error, we didn't use this file reference. drop it.
756 io_file_put(state, kiocb->ki_filp);
761 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
767 case -ERESTARTNOINTR:
768 case -ERESTARTNOHAND:
769 case -ERESTART_RESTARTBLOCK:
771 * We can't just restart the syscall, since previously
772 * submitted sqes may already be in progress. Just fail this
778 kiocb->ki_complete(kiocb, ret, 0);
782 static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
783 const struct io_uring_sqe *sqe,
784 struct iov_iter *iter)
786 size_t len = READ_ONCE(sqe->len);
787 struct io_mapped_ubuf *imu;
788 unsigned index, buf_index;
792 /* attempt to use fixed buffers without having provided iovecs */
793 if (unlikely(!ctx->user_bufs))
796 buf_index = READ_ONCE(sqe->buf_index);
797 if (unlikely(buf_index >= ctx->nr_user_bufs))
800 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
801 imu = &ctx->user_bufs[index];
802 buf_addr = READ_ONCE(sqe->addr);
805 if (buf_addr + len < buf_addr)
807 /* not inside the mapped region */
808 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
812 * May not be a start of buffer, set size appropriately
813 * and advance us to the beginning.
815 offset = buf_addr - imu->ubuf;
816 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
818 iov_iter_advance(iter, offset);
822 static int io_import_iovec(struct io_ring_ctx *ctx, int rw,
823 const struct sqe_submit *s, struct iovec **iovec,
824 struct iov_iter *iter)
826 const struct io_uring_sqe *sqe = s->sqe;
827 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
828 size_t sqe_len = READ_ONCE(sqe->len);
832 * We're reading ->opcode for the second time, but the first read
833 * doesn't care whether it's _FIXED or not, so it doesn't matter
834 * whether ->opcode changes concurrently. The first read does care
835 * about whether it is a READ or a WRITE, so we don't trust this read
836 * for that purpose and instead let the caller pass in the read/write
839 opcode = READ_ONCE(sqe->opcode);
840 if (opcode == IORING_OP_READ_FIXED ||
841 opcode == IORING_OP_WRITE_FIXED) {
842 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
852 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
856 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
859 static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
860 bool force_nonblock, struct io_submit_state *state)
862 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
863 struct kiocb *kiocb = &req->rw;
864 struct iov_iter iter;
868 ret = io_prep_rw(req, s->sqe, force_nonblock, state);
871 file = kiocb->ki_filp;
874 if (unlikely(!(file->f_mode & FMODE_READ)))
877 if (unlikely(!file->f_op->read_iter))
880 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
884 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_iter_count(&iter));
888 /* Catch -EAGAIN return for forced non-blocking submission */
889 ret2 = call_read_iter(file, kiocb, &iter);
890 if (!force_nonblock || ret2 != -EAGAIN)
891 io_rw_done(kiocb, ret2);
897 /* Hold on to the file for -EAGAIN */
898 if (unlikely(ret && ret != -EAGAIN))
903 static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
904 bool force_nonblock, struct io_submit_state *state)
906 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
907 struct kiocb *kiocb = &req->rw;
908 struct iov_iter iter;
912 ret = io_prep_rw(req, s->sqe, force_nonblock, state);
915 /* Hold on to the file for -EAGAIN */
916 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
920 file = kiocb->ki_filp;
921 if (unlikely(!(file->f_mode & FMODE_WRITE)))
924 if (unlikely(!file->f_op->write_iter))
927 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
931 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos,
932 iov_iter_count(&iter));
935 * Open-code file_start_write here to grab freeze protection,
936 * which will be released by another thread in
937 * io_complete_rw(). Fool lockdep by telling it the lock got
938 * released so that it doesn't complain about the held lock when
939 * we return to userspace.
941 if (S_ISREG(file_inode(file)->i_mode)) {
942 __sb_start_write(file_inode(file)->i_sb,
943 SB_FREEZE_WRITE, true);
944 __sb_writers_release(file_inode(file)->i_sb,
947 kiocb->ki_flags |= IOCB_WRITE;
948 io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
958 * IORING_OP_NOP just posts a completion event, nothing else.
960 static int io_nop(struct io_kiocb *req, u64 user_data)
962 struct io_ring_ctx *ctx = req->ctx;
965 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
969 * Twilight zone - it's possible that someone issued an opcode that
970 * has a file attached, then got -EAGAIN on submission, and changed
971 * the sqe before we retried it from async context. Avoid dropping
972 * a file reference for this malicious case, and flag the error.
974 if (req->rw.ki_filp) {
978 io_cqring_add_event(ctx, user_data, err, 0);
983 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
985 struct io_ring_ctx *ctx = req->ctx;
989 /* Prep already done */
993 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
995 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
998 fd = READ_ONCE(sqe->fd);
999 flags = READ_ONCE(sqe->flags);
1001 if (flags & IOSQE_FIXED_FILE) {
1002 if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
1004 req->rw.ki_filp = ctx->user_files[fd];
1005 req->flags |= REQ_F_FIXED_FILE;
1007 req->rw.ki_filp = fget(fd);
1008 if (unlikely(!req->rw.ki_filp))
1015 static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1016 bool force_nonblock)
1018 loff_t sqe_off = READ_ONCE(sqe->off);
1019 loff_t sqe_len = READ_ONCE(sqe->len);
1020 loff_t end = sqe_off + sqe_len;
1021 unsigned fsync_flags;
1024 fsync_flags = READ_ONCE(sqe->fsync_flags);
1025 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1028 ret = io_prep_fsync(req, sqe);
1032 /* fsync always requires a blocking context */
1036 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1037 end > 0 ? end : LLONG_MAX,
1038 fsync_flags & IORING_FSYNC_DATASYNC);
1041 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
1046 static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1047 const struct sqe_submit *s, bool force_nonblock,
1048 struct io_submit_state *state)
1053 if (unlikely(s->index >= ctx->sq_entries))
1055 req->user_data = READ_ONCE(s->sqe->user_data);
1057 opcode = READ_ONCE(s->sqe->opcode);
1060 ret = io_nop(req, req->user_data);
1062 case IORING_OP_READV:
1063 if (unlikely(s->sqe->buf_index))
1065 ret = io_read(req, s, force_nonblock, state);
1067 case IORING_OP_WRITEV:
1068 if (unlikely(s->sqe->buf_index))
1070 ret = io_write(req, s, force_nonblock, state);
1072 case IORING_OP_READ_FIXED:
1073 ret = io_read(req, s, force_nonblock, state);
1075 case IORING_OP_WRITE_FIXED:
1076 ret = io_write(req, s, force_nonblock, state);
1078 case IORING_OP_FSYNC:
1079 ret = io_fsync(req, s->sqe, force_nonblock);
1089 if (ctx->flags & IORING_SETUP_IOPOLL) {
1090 if (req->error == -EAGAIN)
1093 /* workqueue context doesn't hold uring_lock, grab it now */
1095 mutex_lock(&ctx->uring_lock);
1096 io_iopoll_req_issued(req);
1098 mutex_unlock(&ctx->uring_lock);
1104 static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
1106 u8 opcode = READ_ONCE(sqe->opcode);
1108 return !(opcode == IORING_OP_READ_FIXED ||
1109 opcode == IORING_OP_WRITE_FIXED);
1112 static void io_sq_wq_submit_work(struct work_struct *work)
1114 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1115 struct sqe_submit *s = &req->submit;
1116 const struct io_uring_sqe *sqe = s->sqe;
1117 struct io_ring_ctx *ctx = req->ctx;
1118 mm_segment_t old_fs;
1122 /* Ensure we clear previously set forced non-block flag */
1123 req->flags &= ~REQ_F_FORCE_NONBLOCK;
1124 req->rw.ki_flags &= ~IOCB_NOWAIT;
1126 s->needs_lock = true;
1127 s->has_user = false;
1130 * If we're doing IO to fixed buffers, we don't need to get/set
1133 needs_user = io_sqe_needs_user(s->sqe);
1135 if (!mmget_not_zero(ctx->sqo_mm)) {
1139 use_mm(ctx->sqo_mm);
1146 ret = __io_submit_sqe(ctx, req, s, false, NULL);
1148 * We can get EAGAIN for polled IO even though we're forcing
1149 * a sync submission from here, since we can't wait for
1150 * request slots on the block side.
1159 unuse_mm(ctx->sqo_mm);
1164 io_cqring_add_event(ctx, sqe->user_data, ret, 0);
1168 /* async context always use a copy of the sqe */
1172 static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1173 struct io_submit_state *state)
1175 struct io_kiocb *req;
1178 /* enforce forwards compatibility on users */
1179 if (unlikely(s->sqe->flags & ~IOSQE_FIXED_FILE))
1182 req = io_get_req(ctx, state);
1186 req->rw.ki_filp = NULL;
1188 ret = __io_submit_sqe(ctx, req, s, true, state);
1189 if (ret == -EAGAIN) {
1190 struct io_uring_sqe *sqe_copy;
1192 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
1194 memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
1197 memcpy(&req->submit, s, sizeof(*s));
1198 INIT_WORK(&req->work, io_sq_wq_submit_work);
1199 queue_work(ctx->sqo_wq, &req->work);
1210 * Batched submission is done, ensure local IO is flushed out.
1212 static void io_submit_state_end(struct io_submit_state *state)
1214 blk_finish_plug(&state->plug);
1215 io_file_put(state, NULL);
1216 if (state->free_reqs)
1217 kmem_cache_free_bulk(req_cachep, state->free_reqs,
1218 &state->reqs[state->cur_req]);
1222 * Start submission side cache.
1224 static void io_submit_state_start(struct io_submit_state *state,
1225 struct io_ring_ctx *ctx, unsigned max_ios)
1227 blk_start_plug(&state->plug);
1228 state->free_reqs = 0;
1230 state->ios_left = max_ios;
1233 static void io_commit_sqring(struct io_ring_ctx *ctx)
1235 struct io_sq_ring *ring = ctx->sq_ring;
1237 if (ctx->cached_sq_head != READ_ONCE(ring->r.head)) {
1239 * Ensure any loads from the SQEs are done at this point,
1240 * since once we write the new head, the application could
1241 * write new data to them.
1243 smp_store_release(&ring->r.head, ctx->cached_sq_head);
1246 * write side barrier of head update, app has read side. See
1247 * comment at the top of this file
1254 * Undo last io_get_sqring()
1256 static void io_drop_sqring(struct io_ring_ctx *ctx)
1258 ctx->cached_sq_head--;
1262 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
1263 * that is mapped by userspace. This means that care needs to be taken to
1264 * ensure that reads are stable, as we cannot rely on userspace always
1265 * being a good citizen. If members of the sqe are validated and then later
1266 * used, it's important that those reads are done through READ_ONCE() to
1267 * prevent a re-load down the line.
1269 static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
1271 struct io_sq_ring *ring = ctx->sq_ring;
1275 * The cached sq head (or cq tail) serves two purposes:
1277 * 1) allows us to batch the cost of updating the user visible
1279 * 2) allows the kernel side to track the head on its own, even
1280 * though the application is the one updating it.
1282 head = ctx->cached_sq_head;
1283 /* See comment at the top of this file */
1285 if (head == READ_ONCE(ring->r.tail))
1288 head = READ_ONCE(ring->array[head & ctx->sq_mask]);
1289 if (head < ctx->sq_entries) {
1291 s->sqe = &ctx->sq_sqes[head];
1292 ctx->cached_sq_head++;
1296 /* drop invalid entries */
1297 ctx->cached_sq_head++;
1299 /* See comment at the top of this file */
1304 static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
1306 struct io_submit_state state, *statep = NULL;
1307 int i, ret = 0, submit = 0;
1309 if (to_submit > IO_PLUG_THRESHOLD) {
1310 io_submit_state_start(&state, ctx, to_submit);
1314 for (i = 0; i < to_submit; i++) {
1315 struct sqe_submit s;
1317 if (!io_get_sqring(ctx, &s))
1321 s.needs_lock = false;
1323 ret = io_submit_sqe(ctx, &s, statep);
1325 io_drop_sqring(ctx);
1331 io_commit_sqring(ctx);
1334 io_submit_state_end(statep);
1336 return submit ? submit : ret;
1339 static unsigned io_cqring_events(struct io_cq_ring *ring)
1341 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
1345 * Wait until events become available, if we don't already have some. The
1346 * application must reap them itself, as they reside on the shared cq ring.
1348 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
1349 const sigset_t __user *sig, size_t sigsz)
1351 struct io_cq_ring *ring = ctx->cq_ring;
1352 sigset_t ksigmask, sigsaved;
1356 /* See comment at the top of this file */
1358 if (io_cqring_events(ring) >= min_events)
1362 ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
1368 prepare_to_wait(&ctx->wait, &wait, TASK_INTERRUPTIBLE);
1371 /* See comment at the top of this file */
1373 if (io_cqring_events(ring) >= min_events)
1379 if (signal_pending(current))
1383 finish_wait(&ctx->wait, &wait);
1386 restore_user_sigmask(sig, &sigsaved);
1388 return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0;
1391 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
1393 #if defined(CONFIG_UNIX)
1394 if (ctx->ring_sock) {
1395 struct sock *sock = ctx->ring_sock->sk;
1396 struct sk_buff *skb;
1398 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
1404 for (i = 0; i < ctx->nr_user_files; i++)
1405 fput(ctx->user_files[i]);
1409 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
1411 if (!ctx->user_files)
1414 __io_sqe_files_unregister(ctx);
1415 kfree(ctx->user_files);
1416 ctx->user_files = NULL;
1417 ctx->nr_user_files = 0;
1421 static void io_finish_async(struct io_ring_ctx *ctx)
1424 destroy_workqueue(ctx->sqo_wq);
1429 #if defined(CONFIG_UNIX)
1430 static void io_destruct_skb(struct sk_buff *skb)
1432 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
1434 io_finish_async(ctx);
1435 unix_destruct_scm(skb);
1439 * Ensure the UNIX gc is aware of our file set, so we are certain that
1440 * the io_uring can be safely unregistered on process exit, even if we have
1441 * loops in the file referencing.
1443 static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
1445 struct sock *sk = ctx->ring_sock->sk;
1446 struct scm_fp_list *fpl;
1447 struct sk_buff *skb;
1450 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
1451 unsigned long inflight = ctx->user->unix_inflight + nr;
1453 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
1457 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
1461 skb = alloc_skb(0, GFP_KERNEL);
1468 skb->destructor = io_destruct_skb;
1470 fpl->user = get_uid(ctx->user);
1471 for (i = 0; i < nr; i++) {
1472 fpl->fp[i] = get_file(ctx->user_files[i + offset]);
1473 unix_inflight(fpl->user, fpl->fp[i]);
1476 fpl->max = fpl->count = nr;
1477 UNIXCB(skb).fp = fpl;
1478 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1479 skb_queue_head(&sk->sk_receive_queue, skb);
1481 for (i = 0; i < nr; i++)
1488 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
1489 * causes regular reference counting to break down. We rely on the UNIX
1490 * garbage collection to take care of this problem for us.
1492 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
1494 unsigned left, total;
1498 left = ctx->nr_user_files;
1500 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
1503 ret = __io_sqe_files_scm(ctx, this_files, total);
1507 total += this_files;
1513 while (total < ctx->nr_user_files) {
1514 fput(ctx->user_files[total]);
1521 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
1527 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
1530 __s32 __user *fds = (__s32 __user *) arg;
1534 if (ctx->user_files)
1538 if (nr_args > IORING_MAX_FIXED_FILES)
1541 ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
1542 if (!ctx->user_files)
1545 for (i = 0; i < nr_args; i++) {
1547 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
1550 ctx->user_files[i] = fget(fd);
1553 if (!ctx->user_files[i])
1556 * Don't allow io_uring instances to be registered. If UNIX
1557 * isn't enabled, then this causes a reference cycle and this
1558 * instance can never get freed. If UNIX is enabled we'll
1559 * handle it just fine, but there's still no point in allowing
1560 * a ring fd as it doesn't support regular read/write anyway.
1562 if (ctx->user_files[i]->f_op == &io_uring_fops) {
1563 fput(ctx->user_files[i]);
1566 ctx->nr_user_files++;
1571 for (i = 0; i < ctx->nr_user_files; i++)
1572 fput(ctx->user_files[i]);
1574 kfree(ctx->user_files);
1575 ctx->nr_user_files = 0;
1579 ret = io_sqe_files_scm(ctx);
1581 io_sqe_files_unregister(ctx);
1586 static int io_sq_offload_start(struct io_ring_ctx *ctx)
1590 mmgrab(current->mm);
1591 ctx->sqo_mm = current->mm;
1593 /* Do QD, or 2 * CPUS, whatever is smallest */
1594 ctx->sqo_wq = alloc_workqueue("io_ring-wq", WQ_UNBOUND | WQ_FREEZABLE,
1595 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
1603 mmdrop(ctx->sqo_mm);
1608 static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
1610 atomic_long_sub(nr_pages, &user->locked_vm);
1613 static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
1615 unsigned long page_limit, cur_pages, new_pages;
1617 /* Don't allow more pages than we can safely lock */
1618 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1621 cur_pages = atomic_long_read(&user->locked_vm);
1622 new_pages = cur_pages + nr_pages;
1623 if (new_pages > page_limit)
1625 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
1626 new_pages) != cur_pages);
1631 static void io_mem_free(void *ptr)
1633 struct page *page = virt_to_head_page(ptr);
1635 if (put_page_testzero(page))
1636 free_compound_page(page);
1639 static void *io_mem_alloc(size_t size)
1641 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
1644 return (void *) __get_free_pages(gfp_flags, get_order(size));
1647 static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
1649 struct io_sq_ring *sq_ring;
1650 struct io_cq_ring *cq_ring;
1653 bytes = struct_size(sq_ring, array, sq_entries);
1654 bytes += array_size(sizeof(struct io_uring_sqe), sq_entries);
1655 bytes += struct_size(cq_ring, cqes, cq_entries);
1657 return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1660 static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
1664 if (!ctx->user_bufs)
1667 for (i = 0; i < ctx->nr_user_bufs; i++) {
1668 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
1670 for (j = 0; j < imu->nr_bvecs; j++)
1671 put_page(imu->bvec[j].bv_page);
1673 if (ctx->account_mem)
1674 io_unaccount_mem(ctx->user, imu->nr_bvecs);
1679 kfree(ctx->user_bufs);
1680 ctx->user_bufs = NULL;
1681 ctx->nr_user_bufs = 0;
1685 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
1686 void __user *arg, unsigned index)
1688 struct iovec __user *src;
1690 #ifdef CONFIG_COMPAT
1692 struct compat_iovec __user *ciovs;
1693 struct compat_iovec ciov;
1695 ciovs = (struct compat_iovec __user *) arg;
1696 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
1699 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
1700 dst->iov_len = ciov.iov_len;
1704 src = (struct iovec __user *) arg;
1705 if (copy_from_user(dst, &src[index], sizeof(*dst)))
1710 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
1713 struct vm_area_struct **vmas = NULL;
1714 struct page **pages = NULL;
1715 int i, j, got_pages = 0;
1720 if (!nr_args || nr_args > UIO_MAXIOV)
1723 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
1725 if (!ctx->user_bufs)
1728 for (i = 0; i < nr_args; i++) {
1729 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
1730 unsigned long off, start, end, ubuf;
1735 ret = io_copy_iov(ctx, &iov, arg, i);
1740 * Don't impose further limits on the size and buffer
1741 * constraints here, we'll -EINVAL later when IO is
1742 * submitted if they are wrong.
1745 if (!iov.iov_base || !iov.iov_len)
1748 /* arbitrary limit, but we need something */
1749 if (iov.iov_len > SZ_1G)
1752 ubuf = (unsigned long) iov.iov_base;
1753 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1754 start = ubuf >> PAGE_SHIFT;
1755 nr_pages = end - start;
1757 if (ctx->account_mem) {
1758 ret = io_account_mem(ctx->user, nr_pages);
1764 if (!pages || nr_pages > got_pages) {
1767 pages = kmalloc_array(nr_pages, sizeof(struct page *),
1769 vmas = kmalloc_array(nr_pages,
1770 sizeof(struct vm_area_struct *),
1772 if (!pages || !vmas) {
1774 if (ctx->account_mem)
1775 io_unaccount_mem(ctx->user, nr_pages);
1778 got_pages = nr_pages;
1781 imu->bvec = kmalloc_array(nr_pages, sizeof(struct bio_vec),
1785 if (ctx->account_mem)
1786 io_unaccount_mem(ctx->user, nr_pages);
1791 down_read(¤t->mm->mmap_sem);
1792 pret = get_user_pages_longterm(ubuf, nr_pages, FOLL_WRITE,
1794 if (pret == nr_pages) {
1795 /* don't support file backed memory */
1796 for (j = 0; j < nr_pages; j++) {
1797 struct vm_area_struct *vma = vmas[j];
1800 !is_file_hugepages(vma->vm_file)) {
1806 ret = pret < 0 ? pret : -EFAULT;
1808 up_read(¤t->mm->mmap_sem);
1811 * if we did partial map, or found file backed vmas,
1812 * release any pages we did get
1815 for (j = 0; j < pret; j++)
1818 if (ctx->account_mem)
1819 io_unaccount_mem(ctx->user, nr_pages);
1823 off = ubuf & ~PAGE_MASK;
1825 for (j = 0; j < nr_pages; j++) {
1828 vec_len = min_t(size_t, size, PAGE_SIZE - off);
1829 imu->bvec[j].bv_page = pages[j];
1830 imu->bvec[j].bv_len = vec_len;
1831 imu->bvec[j].bv_offset = off;
1835 /* store original address for later verification */
1837 imu->len = iov.iov_len;
1838 imu->nr_bvecs = nr_pages;
1840 ctx->nr_user_bufs++;
1848 io_sqe_buffer_unregister(ctx);
1852 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
1854 io_finish_async(ctx);
1856 mmdrop(ctx->sqo_mm);
1858 io_iopoll_reap_events(ctx);
1859 io_sqe_buffer_unregister(ctx);
1860 io_sqe_files_unregister(ctx);
1862 #if defined(CONFIG_UNIX)
1864 sock_release(ctx->ring_sock);
1867 io_mem_free(ctx->sq_ring);
1868 io_mem_free(ctx->sq_sqes);
1869 io_mem_free(ctx->cq_ring);
1871 percpu_ref_exit(&ctx->refs);
1872 if (ctx->account_mem)
1873 io_unaccount_mem(ctx->user,
1874 ring_pages(ctx->sq_entries, ctx->cq_entries));
1875 free_uid(ctx->user);
1879 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
1881 struct io_ring_ctx *ctx = file->private_data;
1884 poll_wait(file, &ctx->cq_wait, wait);
1885 /* See comment at the top of this file */
1887 if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head)
1888 mask |= EPOLLOUT | EPOLLWRNORM;
1889 if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
1890 mask |= EPOLLIN | EPOLLRDNORM;
1895 static int io_uring_fasync(int fd, struct file *file, int on)
1897 struct io_ring_ctx *ctx = file->private_data;
1899 return fasync_helper(fd, file, on, &ctx->cq_fasync);
1902 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
1904 mutex_lock(&ctx->uring_lock);
1905 percpu_ref_kill(&ctx->refs);
1906 mutex_unlock(&ctx->uring_lock);
1908 io_iopoll_reap_events(ctx);
1909 wait_for_completion(&ctx->ctx_done);
1910 io_ring_ctx_free(ctx);
1913 static int io_uring_release(struct inode *inode, struct file *file)
1915 struct io_ring_ctx *ctx = file->private_data;
1917 file->private_data = NULL;
1918 io_ring_ctx_wait_and_kill(ctx);
1922 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
1924 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
1925 unsigned long sz = vma->vm_end - vma->vm_start;
1926 struct io_ring_ctx *ctx = file->private_data;
1932 case IORING_OFF_SQ_RING:
1935 case IORING_OFF_SQES:
1938 case IORING_OFF_CQ_RING:
1945 page = virt_to_head_page(ptr);
1946 if (sz > (PAGE_SIZE << compound_order(page)))
1949 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
1950 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1953 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
1954 u32, min_complete, u32, flags, const sigset_t __user *, sig,
1957 struct io_ring_ctx *ctx;
1962 if (flags & ~IORING_ENTER_GETEVENTS)
1970 if (f.file->f_op != &io_uring_fops)
1974 ctx = f.file->private_data;
1975 if (!percpu_ref_tryget(&ctx->refs))
1980 to_submit = min(to_submit, ctx->sq_entries);
1982 mutex_lock(&ctx->uring_lock);
1983 submitted = io_ring_submit(ctx, to_submit);
1984 mutex_unlock(&ctx->uring_lock);
1989 if (flags & IORING_ENTER_GETEVENTS) {
1990 unsigned nr_events = 0;
1992 min_complete = min(min_complete, ctx->cq_entries);
1995 * The application could have included the 'to_submit' count
1996 * in how many events it wanted to wait for. If we failed to
1997 * submit the desired count, we may need to adjust the number
1998 * of events to poll/wait for.
2000 if (submitted < to_submit)
2001 min_complete = min_t(unsigned, submitted, min_complete);
2003 if (ctx->flags & IORING_SETUP_IOPOLL) {
2004 mutex_lock(&ctx->uring_lock);
2005 ret = io_iopoll_check(ctx, &nr_events, min_complete);
2006 mutex_unlock(&ctx->uring_lock);
2008 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
2013 io_ring_drop_ctx_refs(ctx, 1);
2016 return submitted ? submitted : ret;
2019 static const struct file_operations io_uring_fops = {
2020 .release = io_uring_release,
2021 .mmap = io_uring_mmap,
2022 .poll = io_uring_poll,
2023 .fasync = io_uring_fasync,
2026 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
2027 struct io_uring_params *p)
2029 struct io_sq_ring *sq_ring;
2030 struct io_cq_ring *cq_ring;
2033 sq_ring = io_mem_alloc(struct_size(sq_ring, array, p->sq_entries));
2037 ctx->sq_ring = sq_ring;
2038 sq_ring->ring_mask = p->sq_entries - 1;
2039 sq_ring->ring_entries = p->sq_entries;
2040 ctx->sq_mask = sq_ring->ring_mask;
2041 ctx->sq_entries = sq_ring->ring_entries;
2043 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
2044 if (size == SIZE_MAX)
2047 ctx->sq_sqes = io_mem_alloc(size);
2048 if (!ctx->sq_sqes) {
2049 io_mem_free(ctx->sq_ring);
2053 cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
2055 io_mem_free(ctx->sq_ring);
2056 io_mem_free(ctx->sq_sqes);
2060 ctx->cq_ring = cq_ring;
2061 cq_ring->ring_mask = p->cq_entries - 1;
2062 cq_ring->ring_entries = p->cq_entries;
2063 ctx->cq_mask = cq_ring->ring_mask;
2064 ctx->cq_entries = cq_ring->ring_entries;
2069 * Allocate an anonymous fd, this is what constitutes the application
2070 * visible backing of an io_uring instance. The application mmaps this
2071 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
2072 * we have to tie this fd to a socket for file garbage collection purposes.
2074 static int io_uring_get_fd(struct io_ring_ctx *ctx)
2079 #if defined(CONFIG_UNIX)
2080 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
2086 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
2090 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
2091 O_RDWR | O_CLOEXEC);
2094 ret = PTR_ERR(file);
2098 #if defined(CONFIG_UNIX)
2099 ctx->ring_sock->file = file;
2100 ctx->ring_sock->sk->sk_user_data = ctx;
2102 fd_install(ret, file);
2105 #if defined(CONFIG_UNIX)
2106 sock_release(ctx->ring_sock);
2107 ctx->ring_sock = NULL;
2112 static int io_uring_create(unsigned entries, struct io_uring_params *p)
2114 struct user_struct *user = NULL;
2115 struct io_ring_ctx *ctx;
2119 if (!entries || entries > IORING_MAX_ENTRIES)
2123 * Use twice as many entries for the CQ ring. It's possible for the
2124 * application to drive a higher depth than the size of the SQ ring,
2125 * since the sqes are only used at submission time. This allows for
2126 * some flexibility in overcommitting a bit.
2128 p->sq_entries = roundup_pow_of_two(entries);
2129 p->cq_entries = 2 * p->sq_entries;
2131 user = get_uid(current_user());
2132 account_mem = !capable(CAP_IPC_LOCK);
2135 ret = io_account_mem(user,
2136 ring_pages(p->sq_entries, p->cq_entries));
2143 ctx = io_ring_ctx_alloc(p);
2146 io_unaccount_mem(user, ring_pages(p->sq_entries,
2151 ctx->compat = in_compat_syscall();
2152 ctx->account_mem = account_mem;
2155 ret = io_allocate_scq_urings(ctx, p);
2159 ret = io_sq_offload_start(ctx);
2163 ret = io_uring_get_fd(ctx);
2167 memset(&p->sq_off, 0, sizeof(p->sq_off));
2168 p->sq_off.head = offsetof(struct io_sq_ring, r.head);
2169 p->sq_off.tail = offsetof(struct io_sq_ring, r.tail);
2170 p->sq_off.ring_mask = offsetof(struct io_sq_ring, ring_mask);
2171 p->sq_off.ring_entries = offsetof(struct io_sq_ring, ring_entries);
2172 p->sq_off.flags = offsetof(struct io_sq_ring, flags);
2173 p->sq_off.dropped = offsetof(struct io_sq_ring, dropped);
2174 p->sq_off.array = offsetof(struct io_sq_ring, array);
2176 memset(&p->cq_off, 0, sizeof(p->cq_off));
2177 p->cq_off.head = offsetof(struct io_cq_ring, r.head);
2178 p->cq_off.tail = offsetof(struct io_cq_ring, r.tail);
2179 p->cq_off.ring_mask = offsetof(struct io_cq_ring, ring_mask);
2180 p->cq_off.ring_entries = offsetof(struct io_cq_ring, ring_entries);
2181 p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
2182 p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
2185 io_ring_ctx_wait_and_kill(ctx);
2190 * Sets up an aio uring context, and returns the fd. Applications asks for a
2191 * ring size, we return the actual sq/cq ring sizes (among other things) in the
2192 * params structure passed in.
2194 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
2196 struct io_uring_params p;
2200 if (copy_from_user(&p, params, sizeof(p)))
2202 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
2207 if (p.flags & ~IORING_SETUP_IOPOLL)
2210 ret = io_uring_create(entries, &p);
2214 if (copy_to_user(params, &p, sizeof(p)))
2220 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
2221 struct io_uring_params __user *, params)
2223 return io_uring_setup(entries, params);
2226 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
2227 void __user *arg, unsigned nr_args)
2231 percpu_ref_kill(&ctx->refs);
2232 wait_for_completion(&ctx->ctx_done);
2235 case IORING_REGISTER_BUFFERS:
2236 ret = io_sqe_buffer_register(ctx, arg, nr_args);
2238 case IORING_UNREGISTER_BUFFERS:
2242 ret = io_sqe_buffer_unregister(ctx);
2244 case IORING_REGISTER_FILES:
2245 ret = io_sqe_files_register(ctx, arg, nr_args);
2247 case IORING_UNREGISTER_FILES:
2251 ret = io_sqe_files_unregister(ctx);
2258 /* bring the ctx back to life */
2259 reinit_completion(&ctx->ctx_done);
2260 percpu_ref_reinit(&ctx->refs);
2264 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
2265 void __user *, arg, unsigned int, nr_args)
2267 struct io_ring_ctx *ctx;
2276 if (f.file->f_op != &io_uring_fops)
2279 ctx = f.file->private_data;
2281 mutex_lock(&ctx->uring_lock);
2282 ret = __io_uring_register(ctx, opcode, arg, nr_args);
2283 mutex_unlock(&ctx->uring_lock);
2289 static int __init io_uring_init(void)
2291 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2294 __initcall(io_uring_init);