btrfs: allow extent buffer helpers to skip cross-page handling

Currently btrfs extent buffer helpers are doing all the cross-page
handling, as there is no guarantee that all those eb pages are
contiguous.

However on systems with enough memory, there is a very high chance the
page cache for btree_inode are allocated with physically contiguous
pages.

In that case, we can skip all the complex cross-page handling, thus
speeding up the code.

This patch adds a new member, extent_buffer::addr, which is only set to
non-NULL if all the extent buffer pages are physically contiguous.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2023-11-16 15:49:06 +10:30 committed by David Sterba
parent 3ba2d3648f
commit 397239ed6a
3 changed files with 75 additions and 3 deletions

View File

@ -74,15 +74,26 @@ static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
static void csum_tree_block(struct extent_buffer *buf, u8 *result)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
const int num_pages = num_extent_pages(buf);
const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
int num_pages;
u32 first_page_part;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
char *kaddr;
int i;
shash->tfm = fs_info->csum_shash;
crypto_shash_init(shash);
kaddr = page_address(buf->pages[0]) + offset_in_page(buf->start);
if (buf->addr) {
/* Pages are contiguous, handle them as a big one. */
kaddr = buf->addr;
first_page_part = fs_info->nodesize;
num_pages = 1;
} else {
kaddr = page_address(buf->pages[0]);
first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
num_pages = num_extent_pages(buf);
}
crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
first_page_part - BTRFS_CSUM_SIZE);

View File

@ -3489,6 +3489,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct btrfs_subpage *prealloc = NULL;
u64 lockdep_owner = owner_root;
bool page_contig = true;
int uptodate = 1;
int ret;
@ -3575,6 +3576,14 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
eb->pages[i] = p;
/*
* Check if the current page is physically contiguous with previous eb
* page.
*/
if (i && eb->pages[i - 1] + 1 != p)
page_contig = false;
if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len))
uptodate = 0;
@ -3588,6 +3597,9 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
}
if (uptodate)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
/* All pages are physically contiguous, can skip cross page handling. */
if (page_contig)
eb->addr = page_address(eb->pages[0]) + offset_in_page(eb->start);
again:
ret = radix_tree_preload(GFP_NOFS);
if (ret) {
@ -4036,6 +4048,11 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
return;
}
if (eb->addr) {
memcpy(dstv, eb->addr + start, len);
return;
}
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
@ -4067,6 +4084,12 @@ int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
if (eb->addr) {
if (copy_to_user_nofault(dstv, eb->addr + start, len))
ret = -EFAULT;
return ret;
}
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
@ -4102,6 +4125,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
if (check_eb_range(eb, start, len))
return -EINVAL;
if (eb->addr)
return memcmp(ptrv, eb->addr + start, len);
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
@ -4169,6 +4195,14 @@ static void __write_extent_buffer(const struct extent_buffer *eb,
if (check_eb_range(eb, start, len))
return;
if (eb->addr) {
if (use_memmove)
memmove(eb->addr + start, srcv, len);
else
memcpy(eb->addr + start, srcv, len);
return;
}
offset = get_eb_offset_in_page(eb, start);
while (len > 0) {
@ -4201,6 +4235,11 @@ static void memset_extent_buffer(const struct extent_buffer *eb, int c,
{
unsigned long cur = start;
if (eb->addr) {
memset(eb->addr + start, c, len);
return;
}
while (cur < start + len) {
unsigned long index = get_eb_page_index(cur);
unsigned int offset = get_eb_offset_in_page(eb, cur);
@ -4428,6 +4467,16 @@ void memcpy_extent_buffer(const struct extent_buffer *dst,
check_eb_range(dst, src_offset, len))
return;
if (dst->addr) {
const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
if (use_memmove)
memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
else
memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
return;
}
while (cur_off < len) {
unsigned long cur_src = cur_off + src_offset;
unsigned long pg_index = get_eb_page_index(cur_src);
@ -4460,6 +4509,11 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
return;
}
if (dst->addr) {
memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
return;
}
while (len > 0) {
unsigned long src_i;
size_t cur;

View File

@ -78,6 +78,13 @@ struct extent_buffer {
unsigned long len;
unsigned long bflags;
struct btrfs_fs_info *fs_info;
/*
* The address where the eb can be accessed without any cross-page handling.
* This can be NULL if not possible.
*/
void *addr;
spinlock_t refs_lock;
atomic_t refs;
int read_mirror;