forked from Minki/linux
Btrfs: send, apply asynchronous page cache readahead to enhance page read
By analyzing the perf on btrfs send, we found it take large amount of cpu time on page_cache_sync_readahead. This effort can be reduced after switching to asynchronous one. Overall performance gain on HDD and SSD were 9 and 15 percent if simply send a large file. Signed-off-by: Kuanling Huang <peterh@synology.com> Reviewed-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
785884fc31
commit
eef16ba269
@ -4720,16 +4720,27 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
|
||||
/* initial readahead */
|
||||
memset(&sctx->ra, 0, sizeof(struct file_ra_state));
|
||||
file_ra_state_init(&sctx->ra, inode->i_mapping);
|
||||
page_cache_sync_readahead(inode->i_mapping, &sctx->ra, NULL, index,
|
||||
last_index - index + 1);
|
||||
|
||||
while (index <= last_index) {
|
||||
unsigned cur_len = min_t(unsigned, len,
|
||||
PAGE_SIZE - pg_offset);
|
||||
page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
|
||||
|
||||
page = find_lock_page(inode->i_mapping, index);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
|
||||
NULL, index, last_index + 1 - index);
|
||||
|
||||
page = find_or_create_page(inode->i_mapping, index,
|
||||
GFP_KERNEL);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (PageReadahead(page)) {
|
||||
page_cache_async_readahead(inode->i_mapping, &sctx->ra,
|
||||
NULL, page, index, last_index + 1 - index);
|
||||
}
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
|
Loading…
Reference in New Issue
Block a user