io_uring: correct link-list traversal locking

As io_remove_next_linked() is now under ->timeout_lock (see
io_link_timeout_fn), we should update locking around io_for_each_link()
and io_match_task() to use the new lock.

Cc: stable@kernel.org # 5.15+
Fixes: 89850fce16 ("io_uring: run timeouts from task_work")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b54541cedf7de59cb5ae36109e58529ca16e66aa.1637631883.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-11-23 01:45:35 +00:00 committed by Jens Axboe
parent f6f9b278f2
commit 674ee8e1b4

View File

@ -1502,10 +1502,10 @@ static void io_prep_async_link(struct io_kiocb *req)
if (req->flags & REQ_F_LINK_TIMEOUT) { if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
spin_lock(&ctx->completion_lock); spin_lock_irq(&ctx->timeout_lock);
io_for_each_link(cur, req) io_for_each_link(cur, req)
io_prep_async_work(cur); io_prep_async_work(cur);
spin_unlock(&ctx->completion_lock); spin_unlock_irq(&ctx->timeout_lock);
} else { } else {
io_for_each_link(cur, req) io_for_each_link(cur, req)
io_prep_async_work(cur); io_prep_async_work(cur);
@ -5699,6 +5699,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
int posted = 0, i; int posted = 0, i;
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
spin_lock_irq(&ctx->timeout_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
struct hlist_head *list; struct hlist_head *list;
@ -5708,6 +5709,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
posted += io_poll_remove_one(req); posted += io_poll_remove_one(req);
} }
} }
spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
if (posted) if (posted)
@ -9568,9 +9570,9 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
/* protect against races with linked timeouts */ /* protect against races with linked timeouts */
spin_lock(&ctx->completion_lock); spin_lock_irq(&ctx->timeout_lock);
ret = io_match_task(req, cancel->task, cancel->all); ret = io_match_task(req, cancel->task, cancel->all);
spin_unlock(&ctx->completion_lock); spin_unlock_irq(&ctx->timeout_lock);
} else { } else {
ret = io_match_task(req, cancel->task, cancel->all); ret = io_match_task(req, cancel->task, cancel->all);
} }
@ -9585,12 +9587,14 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
LIST_HEAD(list); LIST_HEAD(list);
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_reverse(de, &ctx->defer_list, list) { list_for_each_entry_reverse(de, &ctx->defer_list, list) {
if (io_match_task(de->req, task, cancel_all)) { if (io_match_task(de->req, task, cancel_all)) {
list_cut_position(&list, &ctx->defer_list, &de->list); list_cut_position(&list, &ctx->defer_list, &de->list);
break; break;
} }
} }
spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
if (list_empty(&list)) if (list_empty(&list))
return false; return false;