forked from Minki/linux
dax fix 4.21
* Clean up unnecessary usage of prepare_to_wait_exclusive() -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcJ8sYAAoJEB7SkWpmfYgCdJ8QAJ6uPXu26SOfwew3MITNfNGO 8VXnjDlOq8mi9u51xPeurcT+4h3g1AiEMGl6ugI8Jxx704a4/P80fnAjVTmYvugy 7Ub29tTpqFPaermaT2N/K4zyqZxo/ozo5k1q3EqvNYc2IIBDlHKwKcirQpTqzIJ/ hv8sgLLf/f9J6CtBNSXeGfsV6DKp8bmqXvSGzSsphhbkcW/i1UMCey5rXN5iIT4/ gSdSeLxP6asjzeGm1/sC1G6g3Pi6USVmWe6Cs7dMbPSgkmzpGirkobmx+e34npBQ gmabFMxaClPCar2vAGorhPbtXu5uZrHCURirVpMvmIj9MJlK/8uX4kbgn6r6N5nS hZRZlnIvvjfucb66xCyFE/1I2xL7iIdOlcLSyG4f6bGAZTmupFGGOsoyf+BQSeT0 08n4rvmBWQ/thUXAzkR4yUu77zRmQkmwbTjnOXUv4GNocvMoUcLwazh1QeY8W2rW RnUkk8B3iEgjfpKrjok/6MWd8qokwUVozOKUSVvKc8MEMraPVNzQMDKIl0hWUuE5 kjF+YXv+qozYvLR7IToqx+2TZp6VcZUujV5qof05nPQGHIztkwHIKZg7EimZe8qa hKZA2X+1XOv2EGYLq5XexxR8rehiqgH7HlaMwuQBYqEnmkTx4tVWHwax2+vzVnVh UcpyHRN2RFwPWIIUTaeN =2anQ -----END PGP SIGNATURE----- Merge tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull dax fix from Dan Williams: "Clean up unnecessary usage of prepare_to_wait_exclusive(). While I feel a bit silly sending a single-commit pull-request there is nothing else queued up for dax this cycle. This change has shipped in -next for multiple releases" * tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dax: Use non-exclusive wait in wait_entry_unlocked()
This commit is contained in:
commit
2a1a2c1a76
16
fs/dax.c
16
fs/dax.c
@ -246,18 +246,16 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
|
||||
ewait.wait.func = wake_exceptional_entry_func;
|
||||
|
||||
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
|
||||
prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
|
||||
/*
|
||||
* Unlike get_unlocked_entry() there is no guarantee that this
|
||||
* path ever successfully retrieves an unlocked entry before an
|
||||
* inode dies. Perform a non-exclusive wait in case this path
|
||||
* never successfully performs its own wake up.
|
||||
*/
|
||||
prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
|
||||
xas_unlock_irq(xas);
|
||||
schedule();
|
||||
finish_wait(wq, &ewait.wait);
|
||||
|
||||
/*
|
||||
* Entry lock waits are exclusive. Wake up the next waiter since
|
||||
* we aren't sure we will acquire the entry lock and thus wake
|
||||
* the next waiter up on unlock.
|
||||
*/
|
||||
if (waitqueue_active(wq))
|
||||
__wake_up(wq, TASK_NORMAL, 1, &ewait.key);
|
||||
}
|
||||
|
||||
static void put_unlocked_entry(struct xa_state *xas, void *entry)
|
||||
|
Loading…
Reference in New Issue
Block a user