+static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ struct sqe_submit *s, bool force_nonblock)
+{
+ int ret;
+
+ ret = io_req_defer(ctx, req, s->sqe);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+ io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ }
+ return 0;
+ }
+
+ return __io_queue_sqe(ctx, req, s, force_nonblock);
+}
+
+static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ struct sqe_submit *s, struct io_kiocb *shadow,
+ bool force_nonblock)
+{
+ int ret;
+ int need_submit = false;
+
+ if (!shadow)
+ return io_queue_sqe(ctx, req, s, force_nonblock);
+
+ /*
+ * Mark the first IO in link list as DRAIN, let all the following
+ * IOs enter the defer list. all IO needs to be completed before link
+ * list.
+ */
+ req->flags |= REQ_F_IO_DRAIN;
+ ret = io_req_defer(ctx, req, s->sqe);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+ io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ return 0;
+ }
+ } else {
+ /*
+ * If ret == 0 means that all IOs in front of link io are
+ * running done. let's queue link head.
+ */
+ need_submit = true;
+ }
+
+ /* Insert shadow req to defer_list, blocking next IOs */
+ spin_lock_irq(&ctx->completion_lock);
+ list_add_tail(&shadow->list, &ctx->defer_list);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ if (need_submit)
+ return __io_queue_sqe(ctx, req, s, force_nonblock);
+
+ return 0;
+}
+