writeback: factor writeback_get_batch() out of write_cache_pages()

This simple helper will be the basis of the writeback iterator.  To make
this work, we need to remember the current index and end positions in
writeback_control.

[hch@lst.de: heavily rebased, add helpers to get the tag and end index, don't keep the end index in struct writeback_control]
Link: https://lkml.kernel.org/r/20240215063649.2164017-9-hch@lst.de
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Dave Chinner <dchinner@redhat.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-02-15 07:36:43 +01:00 committed by Andrew Morton
parent b1793929b7
commit 751e0d559c
2 changed files with 44 additions and 22 deletions

View File

@ -11,6 +11,7 @@
#include <linux/flex_proportions.h> #include <linux/flex_proportions.h>
#include <linux/backing-dev-defs.h> #include <linux/backing-dev-defs.h>
#include <linux/blk_types.h> #include <linux/blk_types.h>
#include <linux/pagevec.h>
struct bio; struct bio;
@ -40,6 +41,7 @@ enum writeback_sync_modes {
* in a manner such that unspecified fields are set to zero. * in a manner such that unspecified fields are set to zero.
*/ */
struct writeback_control { struct writeback_control {
/* public fields that can be set and/or consumed by the caller: */
long nr_to_write; /* Write this many pages, and decrement long nr_to_write; /* Write this many pages, and decrement
this for each page written */ this for each page written */
long pages_skipped; /* Pages which were not written */ long pages_skipped; /* Pages which were not written */
@ -77,6 +79,10 @@ struct writeback_control {
*/ */
struct swap_iocb **swap_plug; struct swap_iocb **swap_plug;
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb; /* wb this writeback is issued under */ struct bdi_writeback *wb; /* wb this writeback is issued under */
struct inode *inode; /* inode being written out */ struct inode *inode; /* inode being written out */

View File

@ -2392,6 +2392,29 @@ static bool folio_prepare_writeback(struct address_space *mapping,
return true; return true;
} }
static xa_mark_t wbc_to_tag(struct writeback_control *wbc)
{
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
return PAGECACHE_TAG_TOWRITE;
return PAGECACHE_TAG_DIRTY;
}
static pgoff_t wbc_end(struct writeback_control *wbc)
{
if (wbc->range_cyclic)
return -1;
return wbc->range_end >> PAGE_SHIFT;
}
static void writeback_get_batch(struct address_space *mapping,
struct writeback_control *wbc)
{
folio_batch_release(&wbc->fbatch);
cond_resched();
filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
wbc_to_tag(wbc), &wbc->fbatch);
}
/** /**
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write * @mapping: address space structure to write
@ -2429,38 +2452,32 @@ int write_cache_pages(struct address_space *mapping,
{ {
int ret = 0; int ret = 0;
int error; int error;
struct folio_batch fbatch;
struct folio *folio; struct folio *folio;
int nr_folios;
pgoff_t index;
pgoff_t end; /* Inclusive */ pgoff_t end; /* Inclusive */
xa_mark_t tag;
folio_batch_init(&fbatch);
if (wbc->range_cyclic) { if (wbc->range_cyclic) {
index = mapping->writeback_index; /* prev offset */ wbc->index = mapping->writeback_index; /* prev offset */
end = -1; end = -1;
} else { } else {
index = wbc->range_start >> PAGE_SHIFT; wbc->index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT;
} }
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) { if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end); tag_pages_for_writeback(mapping, wbc->index, end);
tag = PAGECACHE_TAG_TOWRITE;
} else { folio_batch_init(&wbc->fbatch);
tag = PAGECACHE_TAG_DIRTY;
} while (wbc->index <= end) {
while (index <= end) {
int i; int i;
nr_folios = filemap_get_folios_tag(mapping, &index, end, writeback_get_batch(mapping, wbc);
tag, &fbatch);
if (nr_folios == 0) if (wbc->fbatch.nr == 0)
break; break;
for (i = 0; i < nr_folios; i++) { for (i = 0; i < wbc->fbatch.nr; i++) {
folio = fbatch.folios[i]; folio = wbc->fbatch.folios[i];
folio_lock(folio); folio_lock(folio);
if (!folio_prepare_writeback(mapping, wbc, folio)) { if (!folio_prepare_writeback(mapping, wbc, folio)) {
folio_unlock(folio); folio_unlock(folio);
@ -2498,8 +2515,6 @@ int write_cache_pages(struct address_space *mapping,
goto done; goto done;
} }
} }
folio_batch_release(&fbatch);
cond_resched();
} }
/* /*
@ -2512,6 +2527,7 @@ int write_cache_pages(struct address_space *mapping,
* of the file if we are called again, which can only happen due to * of the file if we are called again, which can only happen due to
* -ENOMEM from the file system. * -ENOMEM from the file system.
*/ */
folio_batch_release(&wbc->fbatch);
if (wbc->range_cyclic) if (wbc->range_cyclic)
mapping->writeback_index = 0; mapping->writeback_index = 0;
return ret; return ret;
@ -2519,7 +2535,7 @@ int write_cache_pages(struct address_space *mapping,
done: done:
if (wbc->range_cyclic) if (wbc->range_cyclic)
mapping->writeback_index = folio->index + folio_nr_pages(folio); mapping->writeback_index = folio->index + folio_nr_pages(folio);
folio_batch_release(&fbatch); folio_batch_release(&wbc->fbatch);
return error; return error;
} }
EXPORT_SYMBOL(write_cache_pages); EXPORT_SYMBOL(write_cache_pages);