diff --git a/fs/io_uring.c b/fs/io_uring.c index 584b36dcd0aa..3145c9cacee0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -269,6 +269,7 @@ struct io_buffer { __u64 addr; __u32 len; __u16 bid; + __u16 bgid; }; struct io_restriction { @@ -1351,6 +1352,36 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, return cflags; } +static void io_kbuf_recycle(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_buffer *head, *buf; + + if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + return; + + lockdep_assert_held(&ctx->uring_lock); + + buf = req->kbuf; + + head = xa_load(&ctx->io_buffers, buf->bgid); + if (head) { + list_add(&buf->list, &head->list); + } else { + int ret; + + INIT_LIST_HEAD(&buf->list); + + /* if we fail, just leave buffer attached */ + ret = xa_insert(&ctx->io_buffers, buf->bgid, buf, GFP_KERNEL); + if (unlikely(ret < 0)) + return; + } + + req->flags &= ~REQ_F_BUFFER_SELECTED; + req->kbuf = NULL; +} + static bool io_match_task(struct io_kiocb *head, struct task_struct *task, bool cancel_all) __must_hold(&req->ctx->timeout_lock) @@ -4763,6 +4794,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf, buf->addr = addr; buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT); buf->bid = bid; + buf->bgid = pbuf->bgid; addr += pbuf->len; bid++; if (!*head) { @@ -7395,8 +7427,12 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req) * Queued up for async execution, worker will release * submit reference when the iocb is actually submitted. */ + io_kbuf_recycle(req); io_queue_async_work(req, NULL); break; + case IO_APOLL_OK: + io_kbuf_recycle(req); + break; } if (linked_timeout)