btrfs: rename extent_buffer::lock_nested to extent_buffer::lock_recursed

Nested locking with lockdep and everything else refers to lock hierarchy
within the same lock map.  This is how we indicate the same locks for
different objects are ok to take in a specific order, for our use case
that would be to take the lock on a leaf and then take a lock on an
adjacent leaf.

What ->lock_nested _actually_ refers to is if we happen to already be
holding the write lock on the extent buffer and we're allowing a read
lock to be taken on that extent buffer, which is recursion.  Rename this
so we don't get confused when we switch to a rwsem and have to start
using the _nested helpers.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2020-08-20 11:46:00 -04:00 committed by David Sterba
parent b9ba017fb0
commit 329ced799b
3 changed files with 14 additions and 14 deletions

View File

@ -4990,7 +4990,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
rwlock_init(&eb->lock);
atomic_set(&eb->blocking_readers, 0);
eb->blocking_writers = 0;
eb->lock_nested = false;
eb->lock_recursed = false;
init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq);

View File

@ -102,7 +102,7 @@ struct extent_buffer {
int blocking_writers;
atomic_t blocking_readers;
bool lock_nested;
bool lock_recursed;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
short log_index;

View File

@ -57,8 +57,8 @@
* performance reasons.
*
*
* Lock nesting
* ------------
* Lock recursion
* --------------
*
* A write operation on a tree might indirectly start a look up on the same
* tree. This can happen when btrfs_cow_block locks the tree and needs to
@ -201,7 +201,7 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
* lock, but it won't change to or away from us. If we have the write
* lock, we are the owner and it'll never change.
*/
if (eb->lock_nested && current->pid == eb->lock_owner)
if (eb->lock_recursed && current->pid == eb->lock_owner)
return;
btrfs_assert_tree_read_locked(eb);
atomic_inc(&eb->blocking_readers);
@ -225,7 +225,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
* lock, but it won't change to or away from us. If we have the write
* lock, we are the owner and it'll never change.
*/
if (eb->lock_nested && current->pid == eb->lock_owner)
if (eb->lock_recursed && current->pid == eb->lock_owner)
return;
if (eb->blocking_writers == 0) {
btrfs_assert_spinning_writers_put(eb);
@ -263,8 +263,8 @@ again:
* depends on this as it may be called on a partly
* (write-)locked tree.
*/
BUG_ON(eb->lock_nested);
eb->lock_nested = true;
BUG_ON(eb->lock_recursed);
eb->lock_recursed = true;
read_unlock(&eb->lock);
trace_btrfs_tree_read_lock(eb, start_ns);
return;
@ -362,11 +362,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
/*
* if we're nested, we have the write lock. No new locking
* is needed as long as we are the lock owner.
* The write unlock will do a barrier for us, and the lock_nested
* The write unlock will do a barrier for us, and the lock_recursed
* field only matters to the lock owner.
*/
if (eb->lock_nested && current->pid == eb->lock_owner) {
eb->lock_nested = false;
if (eb->lock_recursed && current->pid == eb->lock_owner) {
eb->lock_recursed = false;
return;
}
btrfs_assert_tree_read_locked(eb);
@ -388,11 +388,11 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
/*
* if we're nested, we have the write lock. No new locking
* is needed as long as we are the lock owner.
* The write unlock will do a barrier for us, and the lock_nested
* The write unlock will do a barrier for us, and the lock_recursed
* field only matters to the lock owner.
*/
if (eb->lock_nested && current->pid == eb->lock_owner) {
eb->lock_nested = false;
if (eb->lock_recursed && current->pid == eb->lock_owner) {
eb->lock_recursed = false;
return;
}
btrfs_assert_tree_read_locked(eb);