io_uring: fix locked_free_list caches_free()

Don't forget to zero locked_free_nr, it's not a disaster but makes it
attempting to flush it with extra locking when there is nothing in the
list. Also, don't traverse a potentially long list freeing requests
under spinlock, splice the list and do it afterwards.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-02-23 22:17:20 +00:00 committed by Jens Axboe
parent 7c977a58dc
commit e5547d2c5e

View File

@ -8708,6 +8708,7 @@ static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
static void io_req_caches_free(struct io_ring_ctx *ctx, struct task_struct *tsk)
{
struct io_submit_state *submit_state = &ctx->submit_state;
struct io_comp_state *cs = &ctx->submit_state.comp;
mutex_lock(&ctx->uring_lock);
@ -8717,12 +8718,13 @@ static void io_req_caches_free(struct io_ring_ctx *ctx, struct task_struct *tsk)
submit_state->free_reqs = 0;
}
io_req_cache_free(&submit_state->comp.free_list, NULL);
spin_lock_irq(&ctx->completion_lock);
io_req_cache_free(&submit_state->comp.locked_free_list, NULL);
list_splice_init(&cs->locked_free_list, &cs->free_list);
cs->locked_free_nr = 0;
spin_unlock_irq(&ctx->completion_lock);
io_req_cache_free(&cs->free_list, NULL);
mutex_unlock(&ctx->uring_lock);
}