io_uring: inline __tctx_task_work()
Inline __tctx_task_work() into tctx_task_work() in preparation for further optimisations. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/f9c05c4bc9763af7bd8e25ebc3c5f7b6f69148f8.1623949695.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
a3dbdf54da
commit
3f18407dc6
@@ -1888,15 +1888,18 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx)
|
|||||||
percpu_ref_put(&ctx->refs);
|
percpu_ref_put(&ctx->refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __tctx_task_work(struct io_uring_task *tctx)
|
static void tctx_task_work(struct callback_head *cb)
|
||||||
{
|
{
|
||||||
|
struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
|
||||||
|
task_work);
|
||||||
|
|
||||||
|
clear_bit(0, &tctx->task_state);
|
||||||
|
|
||||||
|
while (!wq_list_empty(&tctx->task_list)) {
|
||||||
struct io_ring_ctx *ctx = NULL;
|
struct io_ring_ctx *ctx = NULL;
|
||||||
struct io_wq_work_list list;
|
struct io_wq_work_list list;
|
||||||
struct io_wq_work_node *node;
|
struct io_wq_work_node *node;
|
||||||
|
|
||||||
if (wq_list_empty(&tctx->task_list))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
spin_lock_irq(&tctx->task_lock);
|
spin_lock_irq(&tctx->task_lock);
|
||||||
list = tctx->task_list;
|
list = tctx->task_list;
|
||||||
INIT_WQ_LIST(&tctx->task_list);
|
INIT_WQ_LIST(&tctx->task_list);
|
||||||
@@ -1905,32 +1908,24 @@ static bool __tctx_task_work(struct io_uring_task *tctx)
|
|||||||
node = list.first;
|
node = list.first;
|
||||||
while (node) {
|
while (node) {
|
||||||
struct io_wq_work_node *next = node->next;
|
struct io_wq_work_node *next = node->next;
|
||||||
struct io_kiocb *req;
|
struct io_kiocb *req = container_of(node, struct io_kiocb,
|
||||||
|
io_task_work.node);
|
||||||
|
|
||||||
req = container_of(node, struct io_kiocb, io_task_work.node);
|
|
||||||
if (req->ctx != ctx) {
|
if (req->ctx != ctx) {
|
||||||
ctx_flush_and_put(ctx);
|
ctx_flush_and_put(ctx);
|
||||||
ctx = req->ctx;
|
ctx = req->ctx;
|
||||||
percpu_ref_get(&ctx->refs);
|
percpu_ref_get(&ctx->refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
req->task_work.func(&req->task_work);
|
req->task_work.func(&req->task_work);
|
||||||
node = next;
|
node = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx_flush_and_put(ctx);
|
ctx_flush_and_put(ctx);
|
||||||
return list.first != NULL;
|
if (!list.first)
|
||||||
}
|
break;
|
||||||
|
|
||||||
static void tctx_task_work(struct callback_head *cb)
|
|
||||||
{
|
|
||||||
struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
|
|
||||||
|
|
||||||
clear_bit(0, &tctx->task_state);
|
|
||||||
|
|
||||||
while (__tctx_task_work(tctx))
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int io_req_task_work_add(struct io_kiocb *req)
|
static int io_req_task_work_add(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user