mm/writeback: Add __folio_end_writeback()

test_clear_page_writeback() is actually an mm-internal function, although
it's named as if it's a pagecache function.  Move it to mm/internal.h,
rename it to __folio_end_writeback() and change the return type to bool.

The conversion from page to folio is mostly about accounting the number
of pages being written back, although it does eliminate a couple of
calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-01-15 23:34:16 -05:00
parent cc24df4cd1
commit 269ccca389
4 changed files with 17 additions and 16 deletions

View File

@ -657,7 +657,6 @@ static __always_inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
int test_clear_page_writeback(struct page *page);
int __test_set_page_writeback(struct page *page, bool keep_write);
#define test_set_page_writeback(page) \

View File

@ -1590,7 +1590,7 @@ void folio_end_writeback(struct folio *folio)
* reused before the folio_wake().
*/
folio_get(folio);
if (!test_clear_page_writeback(&folio->page))
if (!__folio_end_writeback(folio))
BUG();
smp_mb__after_atomic();

View File

@ -43,6 +43,7 @@ static inline void *folio_raw_mapping(struct folio *folio)
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);

View File

@ -583,7 +583,7 @@ static void wb_domain_writeout_add(struct wb_domain *dom,
/*
* Increment @wb's writeout completion count and the global writeout
* completion count. Called from test_clear_page_writeback().
* completion count. Called from __folio_end_writeback().
*/
static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
{
@ -2766,27 +2766,28 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
}
int test_clear_page_writeback(struct page *page)
bool __folio_end_writeback(struct folio *folio)
{
struct address_space *mapping = page_mapping(page);
int ret;
long nr = folio_nr_pages(folio);
struct address_space *mapping = folio_mapping(folio);
bool ret;
lock_page_memcg(page);
folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags;
xa_lock_irqsave(&mapping->i_pages, flags);
ret = TestClearPageWriteback(page);
ret = folio_test_clear_writeback(folio);
if (ret) {
__xa_clear_mark(&mapping->i_pages, page_index(page),
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
PAGECACHE_TAG_WRITEBACK);
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
struct bdi_writeback *wb = inode_to_wb(inode);
dec_wb_stat(wb, WB_WRITEBACK);
__wb_writeout_add(wb, 1);
wb_stat_mod(wb, WB_WRITEBACK, -nr);
__wb_writeout_add(wb, nr);
if (!mapping_tagged(mapping,
PAGECACHE_TAG_WRITEBACK))
wb_inode_writeback_end(wb);
@ -2799,14 +2800,14 @@ int test_clear_page_writeback(struct page *page)
xa_unlock_irqrestore(&mapping->i_pages, flags);
} else {
ret = TestClearPageWriteback(page);
ret = folio_test_clear_writeback(folio);
}
if (ret) {
dec_lruvec_page_state(page, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_node_page_state(page, NR_WRITTEN);
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
node_stat_mod_folio(folio, NR_WRITTEN, nr);
}
unlock_page_memcg(page);
folio_memcg_unlock(folio);
return ret;
}