btrfs: compression: convert page allocation to folio interfaces

Currently we have two wrappers to allocate and free a page for
compression usage:

- btrfs_alloc_compr_page()
- btrfs_free_compr_page()

The allocator would try to grab a page from the pool, and only allocate
a new page if the pool is empty.

The reclaimer would check if the pool is full, and if not full it would
put the page into the pool.

This patch converts both helpers to use folio interfaces, and allowing
further conversion of compression path to folios.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2024-01-29 20:16:07 +10:30 committed by David Sterba
parent 6de3595473
commit 98fe01af7e
6 changed files with 24 additions and 24 deletions

View File

@ -161,7 +161,7 @@ static int compression_decompress(int type, struct list_head *ws,
static void btrfs_free_compressed_pages(struct compressed_bio *cb)
{
for (unsigned int i = 0; i < cb->nr_pages; i++)
btrfs_free_compr_page(cb->compressed_pages[i]);
btrfs_free_compr_folio(page_folio(cb->compressed_pages[i]));
kfree(cb->compressed_pages);
}
@ -223,25 +223,25 @@ static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_co
/*
* Common wrappers for page allocation from compression wrappers
*/
struct page *btrfs_alloc_compr_page(void)
struct folio *btrfs_alloc_compr_folio(void)
{
struct page *page = NULL;
struct folio *folio = NULL;
spin_lock(&compr_pool.lock);
if (compr_pool.count > 0) {
page = list_first_entry(&compr_pool.list, struct page, lru);
list_del_init(&page->lru);
folio = list_first_entry(&compr_pool.list, struct folio, lru);
list_del_init(&folio->lru);
compr_pool.count--;
}
spin_unlock(&compr_pool.lock);
if (page)
return page;
if (folio)
return folio;
return alloc_page(GFP_NOFS);
return folio_alloc(GFP_NOFS, 0);
}
void btrfs_free_compr_page(struct page *page)
void btrfs_free_compr_folio(struct folio *folio)
{
bool do_free = false;
@ -249,7 +249,7 @@ void btrfs_free_compr_page(struct page *page)
if (compr_pool.count > compr_pool.thresh) {
do_free = true;
} else {
list_add(&page->lru, &compr_pool.list);
list_add(&folio->lru, &compr_pool.list);
compr_pool.count++;
}
spin_unlock(&compr_pool.lock);
@ -257,8 +257,8 @@ void btrfs_free_compr_page(struct page *page)
if (!do_free)
return;
ASSERT(page_ref_count(page) == 1);
put_page(page);
ASSERT(folio_ref_count(folio) == 1);
folio_put(folio);
}
static void end_bbio_comprssed_read(struct btrfs_bio *bbio)

View File

@ -104,8 +104,8 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
struct page *btrfs_alloc_compr_page(void);
void btrfs_free_compr_page(struct page *page);
struct folio *btrfs_alloc_compr_folio(void);
void btrfs_free_compr_folio(struct folio *folio);
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,

View File

@ -1047,7 +1047,7 @@ free_pages:
if (pages) {
for (i = 0; i < nr_pages; i++) {
WARN_ON(pages[i]->mapping);
btrfs_free_compr_page(pages[i]);
btrfs_free_compr_folio(page_folio(pages[i]));
}
kfree(pages);
}
@ -1062,7 +1062,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
btrfs_free_compr_page(async_extent->pages[i]);
btrfs_free_compr_folio(page_folio(async_extent->pages[i]));
}
kfree(async_extent->pages);
async_extent->nr_pages = 0;

View File

@ -152,7 +152,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
cur_page = out_pages[*cur_out / PAGE_SIZE];
/* Allocate a new page */
if (!cur_page) {
cur_page = btrfs_alloc_compr_page();
cur_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (!cur_page)
return -ENOMEM;
out_pages[*cur_out / PAGE_SIZE] = cur_page;
@ -178,7 +178,7 @@ static int copy_compressed_data_to_page(char *compressed_data,
cur_page = out_pages[*cur_out / PAGE_SIZE];
/* Allocate a new page */
if (!cur_page) {
cur_page = btrfs_alloc_compr_page();
cur_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (!cur_page)
return -ENOMEM;
out_pages[*cur_out / PAGE_SIZE] = cur_page;

View File

@ -121,7 +121,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
@ -206,7 +206,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
@ -242,7 +242,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;

View File

@ -414,7 +414,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
/* Allocate and map in the output buffer */
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
@ -459,7 +459,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
@ -519,7 +519,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -E2BIG;
goto out;
}
out_page = btrfs_alloc_compr_page();
out_page = folio_page(btrfs_alloc_compr_folio(), 0);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;