btrfs: change test_range_bit to scan the whole range

The semantics of test_range_bit() with filled == 0 is now in it's own
helper so test_range_bit will check the whole range unconditionally.
The detection logic is flipped and assumes success by default and
catches exceptions.

Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
David Sterba 2020-08-14 11:35:16 +02:00
parent 99be1a66e1
commit 893fe24399
5 changed files with 22 additions and 22 deletions

View File

@ -1671,15 +1671,15 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32
}
/*
* Search a range in the state tree for a given mask. If 'filled' == 1, this
* returns 1 only if every extent in the tree has the bits set. Otherwise, 1
* is returned if any bit in the range is found set.
* Check if the whole range [@start,@end) contains the single @bit set.
*/
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, int filled, struct extent_state *cached)
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached)
{
struct extent_state *state = NULL;
int bitset = 0;
bool bitset = true;
ASSERT(is_power_of_2(bit));
spin_lock(&tree->lock);
if (cached && extent_state_in_tree(cached) && cached->start <= start &&
@ -1688,35 +1688,35 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
else
state = tree_search(tree, start);
while (state && start <= end) {
if (filled && state->start > start) {
bitset = 0;
if (state->start > start) {
bitset = false;
break;
}
if (state->start > end)
break;
if (state->state & bits) {
bitset = 1;
if (!filled)
break;
} else if (filled) {
bitset = 0;
if ((state->state & bit) == 0) {
bitset = false;
break;
}
if (state->end == (u64)-1)
break;
/*
* Last entry (if state->end is (u64)-1 and overflow happens),
* or next entry starts after the range.
*/
start = state->end + 1;
if (start > end)
if (start > end || start == 0)
break;
state = next_state(state);
}
/* We ran out of states and were still inside of our range. */
if (filled && !state)
bitset = 0;
if (!state)
bitset = false;
spin_unlock(&tree->lock);
return bitset;
}

View File

@ -131,8 +131,8 @@ u64 count_range_bits(struct extent_io_tree *tree,
struct extent_state **cached_state);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, int filled, struct extent_state *cached_state);
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);

View File

@ -394,7 +394,7 @@ again:
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, 1, cached_state);
EXTENT_DELALLOC, cached_state);
if (!ret) {
unlock_extent(tree, delalloc_start, delalloc_end,
&cached_state);

View File

@ -3290,7 +3290,7 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
if (btrfs_is_data_reloc_root(inode->root) &&
test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
1, NULL)) {
NULL)) {
/* Skip the range without csum for data reloc inode */
clear_extent_bits(&inode->io_tree, file_offset, end,
EXTENT_NODATASUM);

View File

@ -2631,7 +2631,7 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
u32 blocksize = rc->extent_root->fs_info->nodesize;
if (test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
return 1;
return 0;
}