io_uring-5.9-2020-10-02

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl93Z48QHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpmp4EACwxi4UVnL0zhaOBmXfqxDuaXViwkfVZNxx
 d40y+DcCewnpZMk2G9cES8OKG+Tu2GFX2yl1m2XdrIWJ6jpnGFKJOkNQGfPDQrT3
 fI7qFrEDeSVeLUMMBxtvZLW8w2D0KcNCgla4h/ESXI9xtPTZdYXhYQY0zfuWalUC
 ZplUgAWlHx82qJari7ZmIfeVtpAoujTvkccRe+/RtPv5vO+UsvP7kqPSCYMGqhHS
 7z5gK3Nw+PNMWrzZVZ6Rw5nLeExx9PJGgiEkitEjn7mRJELXV9eWnTt9D0eVwaec
 WO7OSQmrJLmMFER4ZhkDNJkXZFvlYUCygnwJQmH70LflRqUEA00O6wX4J32O3NIg
 fIDWKMGGANFU5atL+RHqfQgUYq0GY1UsIvZxJnwRwv1QssmJoQq9fpT6VYqiQMik
 2JAeWyMqTGI4vRNmVJKTR/13SpRUYrvS3wHN53kCaBBhE5Y/vFksgOGgXZBG/TPk
 odpegeJOTa5xuS0YcKIK6yL/xHENct1Y1BtVjczrXKJz0E90n5ZdIR0lEg6Ij3B1
 jZUwKiS2sY09eBaJIQvtD4hIaw5VgqtwinKTyt7MBw/6pCqJpSZtaV0Uvgvjq/Se
 1ifUo4cWwQBccZLgWeWoEalio2fNIyb+J+sm7eu9Xygjl67U2M8oMfAN2JjkM7As
 btLazer4lg==
 =fo3Z
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-5.9-2020-10-02' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

 - fix for async buffered reads if read-ahead is fully disabled (Hao)

 - double poll match fix

 - ->show_fdinfo() potential ABBA deadlock complaint fix

* tag 'io_uring-5.9-2020-10-02' of git://git.kernel.dk/linux-block:
  io_uring: fix async buffered reads when readahead is disabled
  io_uring: fix potential ABBA deadlock in ->show_fdinfo()
  io_uring: always delete double poll wait entry on match
This commit is contained in:
Linus Torvalds 2020-10-02 14:38:10 -07:00
commit 702bfc891d
2 changed files with 23 additions and 6 deletions

View File

@ -3049,6 +3049,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
if (!wake_page_match(wpq, key)) if (!wake_page_match(wpq, key))
return 0; return 0;
req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
list_del_init(&wait->entry); list_del_init(&wait->entry);
init_task_work(&req->task_work, io_req_task_submit); init_task_work(&req->task_work, io_req_task_submit);
@ -3106,6 +3107,7 @@ static bool io_rw_should_retry(struct io_kiocb *req)
wait->wait.flags = 0; wait->wait.flags = 0;
INIT_LIST_HEAD(&wait->wait.entry); INIT_LIST_HEAD(&wait->wait.entry);
kiocb->ki_flags |= IOCB_WAITQ; kiocb->ki_flags |= IOCB_WAITQ;
kiocb->ki_flags &= ~IOCB_NOWAIT;
kiocb->ki_waitq = wait; kiocb->ki_waitq = wait;
io_get_req_task(req); io_get_req_task(req);
@ -4743,6 +4745,8 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
if (mask && !(mask & poll->events)) if (mask && !(mask & poll->events))
return 0; return 0;
list_del_init(&wait->entry);
if (poll && poll->head) { if (poll && poll->head) {
bool done; bool done;
@ -8412,11 +8416,19 @@ static int io_uring_show_cred(int id, void *p, void *data)
static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{ {
bool has_lock;
int i; int i;
mutex_lock(&ctx->uring_lock); /*
* Avoid ABBA deadlock between the seq lock and the io_uring mutex,
* since fdinfo case grabs it in the opposite direction of normal use
* cases. If we fail to get the lock, we just don't iterate any
* structures that could be going away outside the io_uring mutex.
*/
has_lock = mutex_trylock(&ctx->uring_lock);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; i < ctx->nr_user_files; i++) { for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct fixed_file_table *table; struct fixed_file_table *table;
struct file *f; struct file *f;
@ -8428,13 +8440,13 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
seq_printf(m, "%5u: <none>\n", i); seq_printf(m, "%5u: <none>\n", i);
} }
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
for (i = 0; i < ctx->nr_user_bufs; i++) { for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
(unsigned int) buf->len); (unsigned int) buf->len);
} }
if (!idr_is_empty(&ctx->personality_idr)) { if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
seq_printf(m, "Personalities:\n"); seq_printf(m, "Personalities:\n");
idr_for_each(&ctx->personality_idr, io_uring_show_cred, m); idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
} }
@ -8449,7 +8461,8 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
req->task->task_works != NULL); req->task->task_works != NULL);
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
mutex_unlock(&ctx->uring_lock); if (has_lock)
mutex_unlock(&ctx->uring_lock);
} }
static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)

View File

@ -2365,7 +2365,11 @@ readpage:
} }
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
error = lock_page_killable(page); if (iocb->ki_flags & IOCB_WAITQ)
error = lock_page_async(page, iocb->ki_waitq);
else
error = lock_page_killable(page);
if (unlikely(error)) if (unlikely(error))
goto readpage_error; goto readpage_error;
if (!PageUptodate(page)) { if (!PageUptodate(page)) {