return NULL;
}
-static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool __io_sequence_defer(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ atomic_read(&ctx->cached_cq_overflow);
}
-static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool io_sequence_defer(struct io_kiocb *req)
{
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false;
- return __io_sequence_defer(ctx, req);
+ return __io_sequence_defer(req);
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
- if (req && !io_sequence_defer(ctx, req)) {
+ if (req && !io_sequence_defer(req)) {
list_del_init(&req->list);
return req;
}
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
- if (req && !__io_sequence_defer(ctx, req)) {
+ if (req && !__io_sequence_defer(req)) {
list_del_init(&req->list);
return req;
}
return do_hashed;
}
-static inline void io_queue_async_work(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline void io_queue_async_work(struct io_kiocb *req)
{
bool do_hashed = io_prep_async_work(req);
+ struct io_ring_ctx *ctx = req->ctx;
trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
req->flags);
continue;
}
req->flags |= REQ_F_IO_DRAINED;
- io_queue_async_work(ctx, req);
+ io_queue_async_work(req);
}
}
kmem_cache_free(req_cachep, req);
}
-static bool io_link_cancel_timeout(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static bool io_link_cancel_timeout(struct io_kiocb *req)
{
+ struct io_ring_ctx *ctx = req->ctx;
int ret;
ret = hrtimer_try_to_cancel(&req->timeout.timer);
* in this context instead of having to queue up new async work.
*/
if (req->flags & REQ_F_LINK_TIMEOUT) {
- wake_ev = io_link_cancel_timeout(ctx, nxt);
+ wake_ev = io_link_cancel_timeout(nxt);
/* we dropped this link, get next */
nxt = list_first_entry_or_null(&req->link_list,
*nxtptr = nxt;
break;
} else {
- io_queue_async_work(req->ctx, nxt);
+ io_queue_async_work(nxt);
break;
}
}
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
- io_link_cancel_timeout(ctx, link);
+ io_link_cancel_timeout(link);
} else {
io_cqring_fill_event(link, -ECANCELED);
io_double_put_req(link);
if (nxtptr)
*nxtptr = nxt;
else
- io_queue_async_work(nxt->ctx, nxt);
+ io_queue_async_work(nxt);
}
}
WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) {
list_del_init(&poll->wait.entry);
- io_queue_async_work(req->ctx, req);
+ io_queue_async_work(req);
}
spin_unlock(&poll->head->lock);
return 0;
}
-static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
- __poll_t mask)
+static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
{
+ struct io_ring_ctx *ctx = req->ctx;
+
req->poll.done = true;
io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx);
return;
}
list_del_init(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_complete(req, mask);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
list_del(&req->list);
- io_poll_complete(ctx, req, mask);
+ io_poll_complete(req, mask);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
io_put_req(req, NULL);
} else {
- io_queue_async_work(ctx, req);
+ io_queue_async_work(req);
}
return 1;
}
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(ctx, req, mask);
+ io_poll_complete(req, mask);
}
spin_unlock_irq(&ctx->completion_lock);
return 0;
}
-static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static int io_req_defer(struct io_kiocb *req)
{
const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy;
+ struct io_ring_ctx *ctx = req->ctx;
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
+ if (!io_sequence_defer(req) && list_empty(&ctx->defer_list))
return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
return -EAGAIN;
spin_lock_irq(&ctx->completion_lock);
- if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
+ if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy);
return 0;
return -EIOCBQUEUED;
}
-static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct io_kiocb **nxt, bool force_nonblock)
+static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
int ret, opcode;
struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
opcode = READ_ONCE(s->sqe->opcode);
switch (opcode) {
{
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_ring_ctx *ctx = req->ctx;
struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe;
struct io_kiocb *nxt = NULL;
s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
s->in_async = true;
do {
- ret = __io_submit_sqe(ctx, req, &nxt, false);
+ ret = __io_submit_sqe(req, &nxt, false);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
return table->files[index & IORING_FILE_TABLE_MASK];
}
-static int io_req_set_file(struct io_ring_ctx *ctx,
- struct io_submit_state *state, struct io_kiocb *req)
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
{
struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int fd;
return 0;
}
-static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static int io_grab_files(struct io_kiocb *req)
{
int ret = -EBADF;
+ struct io_ring_ctx *ctx = req->ctx;
rcu_read_lock();
spin_lock_irq(&ctx->inflight_lock);
return NULL;
}
-static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static int __io_queue_sqe(struct io_kiocb *req)
{
struct io_kiocb *nxt;
int ret;
goto err;
}
- ret = __io_submit_sqe(ctx, req, NULL, true);
+ ret = __io_submit_sqe(req, NULL, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
if (sqe_copy) {
s->sqe = sqe_copy;
if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
- ret = io_grab_files(ctx, req);
+ ret = io_grab_files(req);
if (ret) {
kfree(sqe_copy);
goto err;
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
- io_queue_async_work(ctx, req);
+ io_queue_async_work(req);
return 0;
}
}
return ret;
}
-static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static int io_queue_sqe(struct io_kiocb *req)
{
int ret;
- ret = io_req_defer(ctx, req);
+ ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
io_cqring_add_event(req, ret);
return 0;
}
- return __io_queue_sqe(ctx, req);
+ return __io_queue_sqe(req);
}
-static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct io_kiocb *shadow)
+static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
{
int ret;
int need_submit = false;
+ struct io_ring_ctx *ctx = req->ctx;
if (!shadow)
- return io_queue_sqe(ctx, req);
+ return io_queue_sqe(req);
/*
* Mark the first IO in link list as DRAIN, let all the following
* list.
*/
req->flags |= REQ_F_IO_DRAIN;
- ret = io_req_defer(ctx, req);
+ ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
io_cqring_add_event(req, ret);
spin_unlock_irq(&ctx->completion_lock);
if (need_submit)
- return __io_queue_sqe(ctx, req);
+ return __io_queue_sqe(req);
return 0;
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
-static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct io_submit_state *state, struct io_kiocb **link)
+static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
+ struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
struct sqe_submit *s = &req->submit;
+ struct io_ring_ctx *ctx = req->ctx;
int ret;
req->user_data = s->sqe->user_data;
goto err_req;
}
- ret = io_req_set_file(ctx, state, req);
+ ret = io_req_set_file(state, req);
if (unlikely(ret)) {
err_req:
io_cqring_add_event(req, ret);
ret = -EINVAL;
goto err_req;
} else {
- io_queue_sqe(ctx, req);
+ io_queue_sqe(req);
}
}
req->submit.needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
true, async);
- io_submit_sqe(ctx, req, statep, &link);
+ io_submit_sqe(req, statep, &link);
submitted++;
/*
* that's the end of the chain. Submit the previous link.
*/
if (!(sqe_flags & IOSQE_IO_LINK) && link) {
- io_queue_link_head(ctx, link, shadow_req);
+ io_queue_link_head(link, shadow_req);
link = NULL;
shadow_req = NULL;
}
}
if (link)
- io_queue_link_head(ctx, link, shadow_req);
+ io_queue_link_head(link, shadow_req);
if (statep)
io_submit_state_end(&state);