mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
io_uring: fix complete_post races for linked req
Calling io_queue_next() after spin_unlock in io_req_complete_post() races with the other side extracting and reusing this request. Hand coded parts of io_req_find_next() considering that io_disarm_next() and io_req_task_queue() have (and safe) to be called with completion_lock held. It already does io_commit_cqring() and io_cqring_ev_posted(), so just reuse it for post io_disarm_next(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/5672a62f3150ee7c55849f40c0037655c4f2840f.1615250156.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
33cc89a9fc
commit
7a612350a9
@ -985,6 +985,7 @@ static const struct io_op_def io_op_defs[] = {
|
||||
[IORING_OP_UNLINKAT] = {},
|
||||
};
|
||||
|
||||
static bool io_disarm_next(struct io_kiocb *req);
|
||||
static void io_uring_del_task_file(unsigned long index);
|
||||
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task,
|
||||
@ -1525,15 +1526,14 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
|
||||
__io_cqring_fill_event(req, res, 0);
|
||||
}
|
||||
|
||||
static inline void io_req_complete_post(struct io_kiocb *req, long res,
|
||||
unsigned int cflags)
|
||||
static void io_req_complete_post(struct io_kiocb *req, long res,
|
||||
unsigned int cflags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ctx->completion_lock, flags);
|
||||
__io_cqring_fill_event(req, res, cflags);
|
||||
io_commit_cqring(ctx);
|
||||
/*
|
||||
* If we're the last reference to this request, add to our locked
|
||||
* free_list cache.
|
||||
@ -1541,19 +1541,26 @@ static inline void io_req_complete_post(struct io_kiocb *req, long res,
|
||||
if (refcount_dec_and_test(&req->refs)) {
|
||||
struct io_comp_state *cs = &ctx->submit_state.comp;
|
||||
|
||||
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
|
||||
if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
|
||||
io_disarm_next(req);
|
||||
if (req->link) {
|
||||
io_req_task_queue(req->link);
|
||||
req->link = NULL;
|
||||
}
|
||||
}
|
||||
io_dismantle_req(req);
|
||||
io_put_task(req->task, 1);
|
||||
list_add(&req->compl.list, &cs->locked_free_list);
|
||||
cs->locked_free_nr++;
|
||||
} else
|
||||
req = NULL;
|
||||
io_commit_cqring(ctx);
|
||||
spin_unlock_irqrestore(&ctx->completion_lock, flags);
|
||||
|
||||
io_cqring_ev_posted(ctx);
|
||||
if (req) {
|
||||
io_queue_next(req);
|
||||
|
||||
if (req)
|
||||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
}
|
||||
|
||||
static void io_req_complete_state(struct io_kiocb *req, long res,
|
||||
|
Loading…
Reference in New Issue
Block a user