forked from Minki/linux
io_uring: don't inline __io_get_cqe()
__io_get_cqe() is not as hot as io_get_cqe(), no need to inline it, it sheds ~500B from the binary. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/c1ac829198a881b7af8710926f99a3559b9f24c0.1655455613.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d245bca637
commit
faf88dde06
@ -166,6 +166,11 @@ static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
||||
__io_submit_flush_completions(ctx);
|
||||
}
|
||||
|
||||
static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
|
||||
}
|
||||
|
||||
static bool io_match_linked(struct io_kiocb *head)
|
||||
{
|
||||
struct io_kiocb *req;
|
||||
@ -676,6 +681,36 @@ bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* writes to the cq entry need to come after reading head; the
|
||||
* control dependency is enough as we're using WRITE_ONCE to
|
||||
* fill the cq entry
|
||||
*/
|
||||
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_rings *rings = ctx->rings;
|
||||
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
|
||||
unsigned int shift = 0;
|
||||
unsigned int free, queued, len;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_CQE32)
|
||||
shift = 1;
|
||||
|
||||
/* userspace may cheat modifying the tail, be safe and do min */
|
||||
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
|
||||
free = ctx->cq_entries - queued;
|
||||
/* we need a contiguous range, limit based on the current array offset */
|
||||
len = min(free, ctx->cq_entries - off);
|
||||
if (!len)
|
||||
return NULL;
|
||||
|
||||
ctx->cached_cq_tail++;
|
||||
ctx->cqe_cached = &rings->cqes[off];
|
||||
ctx->cqe_sentinel = ctx->cqe_cached + len;
|
||||
ctx->cqe_cached++;
|
||||
return &rings->cqes[off << shift];
|
||||
}
|
||||
|
||||
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
|
||||
u64 user_data, s32 res, u32 cflags)
|
||||
{
|
||||
|
@ -14,44 +14,10 @@ enum {
|
||||
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
|
||||
};
|
||||
|
||||
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
|
||||
bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||
u32 cflags, u64 extra1, u64 extra2);
|
||||
|
||||
static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
|
||||
}
|
||||
|
||||
/*
|
||||
* writes to the cq entry need to come after reading head; the
|
||||
* control dependency is enough as we're using WRITE_ONCE to
|
||||
* fill the cq entry
|
||||
*/
|
||||
static inline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_rings *rings = ctx->rings;
|
||||
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
|
||||
unsigned int shift = 0;
|
||||
unsigned int free, queued, len;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_CQE32)
|
||||
shift = 1;
|
||||
|
||||
/* userspace may cheat modifying the tail, be safe and do min */
|
||||
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
|
||||
free = ctx->cq_entries - queued;
|
||||
/* we need a contiguous range, limit based on the current array offset */
|
||||
len = min(free, ctx->cq_entries - off);
|
||||
if (!len)
|
||||
return NULL;
|
||||
|
||||
ctx->cached_cq_tail++;
|
||||
ctx->cqe_cached = &rings->cqes[off];
|
||||
ctx->cqe_sentinel = ctx->cqe_cached + len;
|
||||
ctx->cqe_cached++;
|
||||
return &rings->cqes[off << shift];
|
||||
}
|
||||
|
||||
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
|
||||
|
Loading…
Reference in New Issue
Block a user