btrfs: convert submit_eb_subpage() to take a folio

The old page API is being gradually replaced and converted to use folio
to improve code readability and avoid repeated conversion between page
and folio. Moreover, use folio_pos() instead of page_offset(),
which is more consistent with folio usage.

Signed-off-by: Li Zetao <lizetao1@huawei.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Li Zetao 2024-08-29 02:29:00 +08:00 committed by David Sterba
parent 884937793d
commit 135873258c

View File

@ -1735,12 +1735,11 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
* Return >=0 for the number of submitted extent buffers. * Return >=0 for the number of submitted extent buffers.
* Return <0 for fatal error. * Return <0 for fatal error.
*/ */
static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
{ {
struct btrfs_fs_info *fs_info = page_to_fs_info(page); struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
struct folio *folio = page_folio(page);
int submitted = 0; int submitted = 0;
u64 page_start = page_offset(page); u64 folio_start = folio_pos(folio);
int bit_start = 0; int bit_start = 0;
int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits; int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
@ -1755,21 +1754,21 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
* Take private lock to ensure the subpage won't be detached * Take private lock to ensure the subpage won't be detached
* in the meantime. * in the meantime.
*/ */
spin_lock(&page->mapping->i_private_lock); spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) { if (!folio_test_private(folio)) {
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&folio->mapping->i_private_lock);
break; break;
} }
spin_lock_irqsave(&subpage->lock, flags); spin_lock_irqsave(&subpage->lock, flags);
if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page, if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
subpage->bitmaps)) { subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock_irqrestore(&subpage->lock, flags);
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&folio->mapping->i_private_lock);
bit_start++; bit_start++;
continue; continue;
} }
start = page_start + bit_start * fs_info->sectorsize; start = folio_start + bit_start * fs_info->sectorsize;
bit_start += sectors_per_node; bit_start += sectors_per_node;
/* /*
@ -1778,7 +1777,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
*/ */
eb = find_extent_buffer_nolock(fs_info, start); eb = find_extent_buffer_nolock(fs_info, start);
spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock_irqrestore(&subpage->lock, flags);
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&folio->mapping->i_private_lock);
/* /*
* The eb has already reached 0 refs thus find_extent_buffer() * The eb has already reached 0 refs thus find_extent_buffer()
@ -1829,7 +1828,7 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
return 0; return 0;
if (page_to_fs_info(page)->nodesize < PAGE_SIZE) if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
return submit_eb_subpage(page, wbc); return submit_eb_subpage(folio, wbc);
spin_lock(&mapping->i_private_lock); spin_lock(&mapping->i_private_lock);
if (!folio_test_private(folio)) { if (!folio_test_private(folio)) {