io_uring: remove struct io_tw_state::locked

ctx is always locked for task_work now, so get rid of struct
io_tw_state::locked. Note I'm stopping one step before removing
io_tw_state altogether, which is not empty, because it still serves the
purpose of indicating which function is a tw callback and forcing users
not to invoke them carelessly out of a wrong context. The removal can
always be done later.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/e95e1ea116d0bfa54b656076e6a977bc221392a4.1710799188.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-03-18 22:00:30 +00:00 committed by Jens Axboe
parent 92219afb98
commit 8e5b3b89ec
8 changed files with 17 additions and 47 deletions

View File

@ -438,8 +438,6 @@ struct io_ring_ctx {
}; };
struct io_tw_state { struct io_tw_state {
/* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
bool locked;
}; };
enum { enum {

View File

@ -247,14 +247,12 @@ static __cold void io_fallback_req_func(struct work_struct *work)
fallback_work.work); fallback_work.work);
struct llist_node *node = llist_del_all(&ctx->fallback_llist); struct llist_node *node = llist_del_all(&ctx->fallback_llist);
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
struct io_tw_state ts = { .locked = true, }; struct io_tw_state ts = {};
percpu_ref_get(&ctx->refs); percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node) llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, &ts); req->io_task_work.func(req, &ts);
if (WARN_ON_ONCE(!ts.locked))
return;
io_submit_flush_completions(ctx); io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs); percpu_ref_put(&ctx->refs);
@ -1157,11 +1155,9 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
return; return;
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
if (ts->locked) {
io_submit_flush_completions(ctx); io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
ts->locked = false;
}
percpu_ref_put(&ctx->refs); percpu_ref_put(&ctx->refs);
} }
@ -1185,8 +1181,6 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
if (req->ctx != ctx) { if (req->ctx != ctx) {
ctx_flush_and_put(ctx, &ts); ctx_flush_and_put(ctx, &ts);
ctx = req->ctx; ctx = req->ctx;
ts.locked = true;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
percpu_ref_get(&ctx->refs); percpu_ref_get(&ctx->refs);
} }
@ -1459,22 +1453,16 @@ again:
static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
int min_events) int min_events)
{ {
struct io_tw_state ts = { .locked = true, }; struct io_tw_state ts = {};
int ret;
if (llist_empty(&ctx->work_llist)) if (llist_empty(&ctx->work_llist))
return 0; return 0;
return __io_run_local_work(ctx, &ts, min_events);
ret = __io_run_local_work(ctx, &ts, min_events);
/* shouldn't happen! */
if (WARN_ON_ONCE(!ts.locked))
mutex_lock(&ctx->uring_lock);
return ret;
} }
static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
{ {
struct io_tw_state ts = { .locked = true }; struct io_tw_state ts = {};
int ret; int ret;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
@ -1702,10 +1690,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts) void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
{ {
if (ts->locked) io_req_complete_defer(req);
io_req_complete_defer(req);
else
io_req_complete_post(req, IO_URING_F_UNLOCKED);
} }
/* /*

View File

@ -351,10 +351,7 @@ static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
{ {
if (!ts->locked) { lockdep_assert_held(&ctx->uring_lock);
mutex_lock(&ctx->uring_lock);
ts->locked = true;
}
} }
/* /*

View File

@ -322,7 +322,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
__poll_t mask = mangle_poll(req->cqe.res & __poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events); req->apoll_events);
if (!io_fill_cqe_req_aux(req, ts->locked, mask, if (!io_fill_cqe_req_aux(req, true, mask,
IORING_CQE_F_MORE)) { IORING_CQE_F_MORE)) {
io_req_set_res(req, mask, 0); io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES; return IOU_POLL_REMOVE_POLL_USE_RES;

View File

@ -305,11 +305,9 @@ void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
io_req_io_end(req); io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; req->cqe.flags |= io_put_kbuf(req, 0);
req->cqe.flags |= io_put_kbuf(req, issue_flags);
}
io_req_task_complete(req, ts); io_req_task_complete(req, ts);
} }

View File

@ -72,10 +72,7 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
if (!io_timeout_finish(timeout, data)) { if (!io_timeout_finish(timeout, data)) {
bool filled; if (io_fill_cqe_req_aux(req, true, -ETIME, IORING_CQE_F_MORE)) {
filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME,
IORING_CQE_F_MORE);
if (filled) {
/* re-arm timer */ /* re-arm timer */
spin_lock_irq(&ctx->timeout_lock); spin_lock_irq(&ctx->timeout_lock);
list_add(&timeout->list, ctx->timeout_list.prev); list_add(&timeout->list, ctx->timeout_list.prev);
@ -301,7 +298,6 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts) static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts)
{ {
unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_kiocb *prev = timeout->prev; struct io_kiocb *prev = timeout->prev;
int ret = -ENOENT; int ret = -ENOENT;
@ -313,7 +309,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t
.data = prev->cqe.user_data, .data = prev->cqe.user_data,
}; };
ret = io_try_cancel(req->task->io_uring, &cd, issue_flags); ret = io_try_cancel(req->task->io_uring, &cd, 0);
} }
io_req_set_res(req, ret ?: -ETIME, 0); io_req_set_res(req, ret ?: -ETIME, 0);
io_req_task_complete(req, ts); io_req_task_complete(req, ts);

View File

@ -87,13 +87,9 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts) static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
{ {
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
unsigned issue_flags = IO_URING_F_UNLOCKED;
/* locked task_work executor checks the deffered list completion */ /* task_work executor checks the deffered list completion */
if (ts->locked) ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER);
issue_flags = IO_URING_F_COMPLETE_DEFER;
ioucmd->task_work_cb(ioucmd, issue_flags);
} }
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,

View File

@ -118,7 +118,7 @@ static int io_waitid_finish(struct io_kiocb *req, int ret)
static void io_waitid_complete(struct io_kiocb *req, int ret) static void io_waitid_complete(struct io_kiocb *req, int ret)
{ {
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
struct io_tw_state ts = { .locked = true }; struct io_tw_state ts = {};
/* anyone completing better be holding a reference */ /* anyone completing better be holding a reference */
WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));