io_uring/napi: postpone napi timeout adjustment

Remove io_napi_adjust_timeout() and move the adjustments out of the
common path into __io_napi_busy_loop(). Now the limit it's calculated
based on struct io_wait_queue::timeout, for which we query current time
another time. The overhead shouldn't be a problem, it's a polling path,
however that can be optimised later by additionally saving the delta
time value in io_cqring_wait().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/88e14686e245b3b42ff90a3c4d70895d48676206.1723039801.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-08-07 15:18:12 +01:00 committed by Jens Axboe
parent 489b80060c
commit d5cce407e4
3 changed files with 7 additions and 38 deletions

View File

@ -2423,7 +2423,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
dt = timespec64_to_ktime(ts);
iowq.timeout = ktime_add(dt, ktime_get());
io_napi_adjust_timeout(ctx, &iowq, dt);
}
if (sig) {

View File

@ -269,27 +269,6 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
return 0;
}
/*
* __io_napi_adjust_timeout() - adjust busy loop timeout
* @ctx: pointer to io-uring context structure
* @iowq: pointer to io wait queue
* @ts: pointer to timespec or NULL
*
* Adjust the busy loop timeout according to timespec and busy poll timeout.
* If the specified NAPI timeout is bigger than the wait timeout, then adjust
* the NAPI timeout accordingly.
*/
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
ktime_t to_wait)
{
ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
if (to_wait)
poll_dt = min(poll_dt, to_wait);
iowq->napi_busy_poll_dt = poll_dt;
}
/*
* __io_napi_busy_loop() - execute busy poll loop
* @ctx: pointer to io-uring context structure
@ -302,6 +281,13 @@ void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
if (ctx->flags & IORING_SETUP_SQPOLL)
return;
iowq->napi_busy_poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
if (iowq->timeout != KTIME_MAX) {
ktime_t dt = ktime_sub(iowq->timeout, ktime_get());
iowq->napi_busy_poll_dt = min_t(u64, iowq->napi_busy_poll_dt, dt);
}
iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
io_napi_blocking_busy_loop(ctx, iowq);
}

View File

@ -17,8 +17,6 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq, ktime_t to_wait);
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
@ -27,15 +25,6 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
return !list_empty(&ctx->napi_list);
}
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
ktime_t to_wait)
{
if (!io_napi(ctx))
return;
__io_napi_adjust_timeout(ctx, iowq, to_wait);
}
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
@ -86,11 +75,6 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
static inline void io_napi_add(struct io_kiocb *req)
{
}
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
ktime_t to_wait)
{
}
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{