mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
io_uring-5.14-2021-07-30
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmEEE1IQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpm3jD/9Tx+VeLEmfapYdZXziDWrVecwm0iWfSoT8 ibKJGIYHmCH1XxIVT16+a1HcJAra7NGS60zW6JAvBh5ZWL/smEriOu+R6Twa1ez2 9Gby+39+V0PP3x9sKtRp7TsmsA0paIqVG4zwfyaUCyvYfiSTURoYde4lZwSGEMhb 8b0FPju/hmN/iyRGtu1eQvTbp252vahkGE8PKYkZWxNkTdJpvRax3kmbjH3A8/X3 rr0KDMgk4ePVap+3i/h94rXweaLCq9KiSm95Zvs63me6J2CbpKz/hGtvR5TiENiI 0mEqkA9PVE6LRbF4T6gk2gGYrkfEiAzca0r8BdoY0TxuEb2SfK5P3JOfYxS4VHek lVc+lvm0YglRn6dWxdUMhXYTrlAx4cRnIM9Oqg/WzN4WCZREouE2J/MXHNy3lNfQ zQQkmbNzeLQHaiq/JWAtu39LAeHWwEC/FernVV1i0wYOT6EacpVNM6OLUmHQGzkZ mnQpc9AzCgLifNZb4DDlP0MyM1D+GXGm5tdozmUILFQoAnvA6+3EtnRxOH9cWLaa mSNenp5kag/nbdkFTo3X+ptGYgLBWEluT/dKMsoqulPu+ZCV4zh9rAIgWqQUYwa2 5z/d6OAr3V8hjChiF4a7JrRUISu0f0Eh/GRCqTSp97Hys5rJcDxGB9WQKv7u+sTc BArhbyoHcg== =pzj2 -----END PGP SIGNATURE----- Merge tag 'io_uring-5.14-2021-07-30' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: - A fix for block backed reissue (me) - Reissue context hardening (me) - Async link locking fix (Pavel) * tag 'io_uring-5.14-2021-07-30' of git://git.kernel.dk/linux-block: io_uring: fix poll requests leaking second poll entries io_uring: don't block level reissue off completion path io_uring: always reissue from task_work context io_uring: fix race in unified task_work running io_uring: fix io_prep_async_link locking
This commit is contained in:
commit
27eb687bcd
@ -1279,8 +1279,17 @@ static void io_prep_async_link(struct io_kiocb *req)
|
||||
{
|
||||
struct io_kiocb *cur;
|
||||
|
||||
io_for_each_link(cur, req)
|
||||
io_prep_async_work(cur);
|
||||
if (req->flags & REQ_F_LINK_TIMEOUT) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
spin_lock_irq(&ctx->completion_lock);
|
||||
io_for_each_link(cur, req)
|
||||
io_prep_async_work(cur);
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
} else {
|
||||
io_for_each_link(cur, req)
|
||||
io_prep_async_work(cur);
|
||||
}
|
||||
}
|
||||
|
||||
static void io_queue_async_work(struct io_kiocb *req)
|
||||
@ -1950,9 +1959,13 @@ static void tctx_task_work(struct callback_head *cb)
|
||||
node = next;
|
||||
}
|
||||
if (wq_list_empty(&tctx->task_list)) {
|
||||
spin_lock_irq(&tctx->task_lock);
|
||||
clear_bit(0, &tctx->task_state);
|
||||
if (wq_list_empty(&tctx->task_list))
|
||||
if (wq_list_empty(&tctx->task_list)) {
|
||||
spin_unlock_irq(&tctx->task_lock);
|
||||
break;
|
||||
}
|
||||
spin_unlock_irq(&tctx->task_lock);
|
||||
/* another tctx_task_work() is enqueued, yield */
|
||||
if (test_and_set_bit(0, &tctx->task_state))
|
||||
break;
|
||||
@ -2047,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req)
|
||||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
static void io_req_task_queue_reissue(struct io_kiocb *req)
|
||||
{
|
||||
req->io_task_work.func = io_queue_async_work;
|
||||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
static inline void io_queue_next(struct io_kiocb *req)
|
||||
{
|
||||
struct io_kiocb *nxt = io_req_find_next(req);
|
||||
@ -2235,7 +2254,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||||
!(req->flags & REQ_F_DONT_REISSUE)) {
|
||||
req->iopoll_completed = 0;
|
||||
req_ref_get(req);
|
||||
io_queue_async_work(req);
|
||||
io_req_task_queue_reissue(req);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2428,6 +2447,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
|
||||
*/
|
||||
if (percpu_ref_is_dying(&ctx->refs))
|
||||
return false;
|
||||
/*
|
||||
* Play it safe and assume not safe to re-import and reissue if we're
|
||||
* not in the original thread group (or in task context).
|
||||
*/
|
||||
if (!same_thread_group(req->task, current) || !in_task())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
@ -2758,7 +2783,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
|
||||
req->flags &= ~REQ_F_REISSUE;
|
||||
if (io_resubmit_prep(req)) {
|
||||
req_ref_get(req);
|
||||
io_queue_async_work(req);
|
||||
io_req_task_queue_reissue(req);
|
||||
} else {
|
||||
int cflags = 0;
|
||||
|
||||
@ -4914,7 +4939,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
|
||||
if (req->poll.events & EPOLLONESHOT)
|
||||
flags = 0;
|
||||
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
|
||||
io_poll_remove_waitqs(req);
|
||||
req->poll.done = true;
|
||||
flags = 0;
|
||||
}
|
||||
@ -4937,6 +4961,7 @@ static void io_poll_task_func(struct io_kiocb *req)
|
||||
|
||||
done = io_poll_complete(req, req->result);
|
||||
if (done) {
|
||||
io_poll_remove_double(req);
|
||||
hash_del(&req->hash_node);
|
||||
} else {
|
||||
req->result = 0;
|
||||
@ -5124,7 +5149,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
|
||||
ipt->error = -EINVAL;
|
||||
|
||||
spin_lock_irq(&ctx->completion_lock);
|
||||
if (ipt->error)
|
||||
if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
|
||||
io_poll_remove_double(req);
|
||||
if (likely(poll->head)) {
|
||||
spin_lock(&poll->head->lock);
|
||||
@ -5196,7 +5221,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
|
||||
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
|
||||
io_async_wake);
|
||||
if (ret || ipt.error) {
|
||||
io_poll_remove_double(req);
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
if (ret)
|
||||
return IO_APOLL_READY;
|
||||
|
Loading…
Reference in New Issue
Block a user