mm: Convert find_lock_entries() to use a folio_batch

find_lock_entries() already only returned the head page of folios, so
convert it to return a folio_batch instead of a pagevec.  That cascades
through converting truncate_inode_pages_range() to
delete_from_page_cache_batch() and page_cache_delete_batch().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-12-07 14:15:07 -05:00
parent 0e499ed3d7
commit 51dcbdac28
6 changed files with 69 additions and 80 deletions

View File

@ -28,6 +28,8 @@
#include <linux/fscrypt.h> #include <linux/fscrypt.h>
#include <linux/fsverity.h> #include <linux/fsverity.h>
struct pagevec;
#ifdef CONFIG_F2FS_CHECK_FS #ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition) #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
#else #else

View File

@ -16,7 +16,7 @@
#include <linux/hardirq.h> /* for in_interrupt() */ #include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h> #include <linux/hugetlb_inline.h>
struct pagevec; struct folio_batch;
static inline bool mapping_empty(struct address_space *mapping) static inline bool mapping_empty(struct address_space *mapping)
{ {
@ -936,7 +936,7 @@ static inline void __delete_from_page_cache(struct page *page, void *shadow)
} }
void replace_page_cache_page(struct page *old, struct page *new); void replace_page_cache_page(struct page *old, struct page *new);
void delete_from_page_cache_batch(struct address_space *mapping, void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec); struct folio_batch *fbatch);
int try_to_release_page(struct page *page, gfp_t gfp); int try_to_release_page(struct page *page, gfp_t gfp);
bool filemap_release_folio(struct folio *folio, gfp_t gfp); bool filemap_release_folio(struct folio *folio, gfp_t gfp);
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,

View File

@ -270,30 +270,29 @@ void filemap_remove_folio(struct folio *folio)
} }
/* /*
* page_cache_delete_batch - delete several pages from page cache * page_cache_delete_batch - delete several folios from page cache
* @mapping: the mapping to which pages belong * @mapping: the mapping to which folios belong
* @pvec: pagevec with pages to delete * @fbatch: batch of folios to delete
* *
* The function walks over mapping->i_pages and removes pages passed in @pvec * The function walks over mapping->i_pages and removes folios passed in
* from the mapping. The function expects @pvec to be sorted by page index * @fbatch from the mapping. The function expects @fbatch to be sorted
* and is optimised for it to be dense. * by page index and is optimised for it to be dense.
* It tolerates holes in @pvec (mapping entries at those indices are not * It tolerates holes in @fbatch (mapping entries at those indices are not
* modified). The function expects only THP head pages to be present in the * modified).
* @pvec.
* *
* The function expects the i_pages lock to be held. * The function expects the i_pages lock to be held.
*/ */
static void page_cache_delete_batch(struct address_space *mapping, static void page_cache_delete_batch(struct address_space *mapping,
struct pagevec *pvec) struct folio_batch *fbatch)
{ {
XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
int total_pages = 0; int total_pages = 0;
int i = 0; int i = 0;
struct folio *folio; struct folio *folio;
mapping_set_update(&xas, mapping); mapping_set_update(&xas, mapping);
xas_for_each(&xas, folio, ULONG_MAX) { xas_for_each(&xas, folio, ULONG_MAX) {
if (i >= pagevec_count(pvec)) if (i >= folio_batch_count(fbatch))
break; break;
/* A swap/dax/shadow entry got inserted? Skip it. */ /* A swap/dax/shadow entry got inserted? Skip it. */
@ -306,9 +305,9 @@ static void page_cache_delete_batch(struct address_space *mapping,
* means our page has been removed, which shouldn't be * means our page has been removed, which shouldn't be
* possible because we're holding the PageLock. * possible because we're holding the PageLock.
*/ */
if (&folio->page != pvec->pages[i]) { if (folio != fbatch->folios[i]) {
VM_BUG_ON_FOLIO(folio->index > VM_BUG_ON_FOLIO(folio->index >
pvec->pages[i]->index, folio); fbatch->folios[i]->index, folio);
continue; continue;
} }
@ -316,12 +315,11 @@ static void page_cache_delete_batch(struct address_space *mapping,
if (folio->index == xas.xa_index) if (folio->index == xas.xa_index)
folio->mapping = NULL; folio->mapping = NULL;
/* Leave page->index set: truncation lookup relies on it */ /* Leave folio->index set: truncation lookup relies on it */
/* /*
* Move to the next page in the vector if this is a regular * Move to the next folio in the batch if this is a regular
* page or the index is of the last sub-page of this compound * folio or the index is of the last sub-page of this folio.
* page.
*/ */
if (folio->index + folio_nr_pages(folio) - 1 == xas.xa_index) if (folio->index + folio_nr_pages(folio) - 1 == xas.xa_index)
i++; i++;
@ -332,29 +330,29 @@ static void page_cache_delete_batch(struct address_space *mapping,
} }
void delete_from_page_cache_batch(struct address_space *mapping, void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec) struct folio_batch *fbatch)
{ {
int i; int i;
if (!pagevec_count(pvec)) if (!folio_batch_count(fbatch))
return; return;
spin_lock(&mapping->host->i_lock); spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = page_folio(pvec->pages[i]); struct folio *folio = fbatch->folios[i];
trace_mm_filemap_delete_from_page_cache(folio); trace_mm_filemap_delete_from_page_cache(folio);
filemap_unaccount_folio(mapping, folio); filemap_unaccount_folio(mapping, folio);
} }
page_cache_delete_batch(mapping, pvec); page_cache_delete_batch(mapping, fbatch);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
for (i = 0; i < pagevec_count(pvec); i++) for (i = 0; i < folio_batch_count(fbatch); i++)
filemap_free_folio(mapping, page_folio(pvec->pages[i])); filemap_free_folio(mapping, fbatch->folios[i]);
} }
int filemap_check_errors(struct address_space *mapping) int filemap_check_errors(struct address_space *mapping)
@ -2052,8 +2050,8 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
* @mapping: The address_space to search. * @mapping: The address_space to search.
* @start: The starting page cache index. * @start: The starting page cache index.
* @end: The final page index (inclusive). * @end: The final page index (inclusive).
* @pvec: Where the resulting entries are placed. * @fbatch: Where the resulting entries are placed.
* @indices: The cache indices of the entries in @pvec. * @indices: The cache indices of the entries in @fbatch.
* *
* find_lock_entries() will return a batch of entries from @mapping. * find_lock_entries() will return a batch of entries from @mapping.
* Swap, shadow and DAX entries are included. Folios are returned * Swap, shadow and DAX entries are included. Folios are returned
@ -2068,7 +2066,7 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
* Return: The number of entries which were found. * Return: The number of entries which were found.
*/ */
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices) pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
{ {
XA_STATE(xas, &mapping->i_pages, start); XA_STATE(xas, &mapping->i_pages, start);
struct folio *folio; struct folio *folio;
@ -2088,8 +2086,8 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
folio); folio);
} }
indices[pvec->nr] = xas.xa_index; indices[fbatch->nr] = xas.xa_index;
if (!pagevec_add(pvec, &folio->page)) if (!folio_batch_add(fbatch, folio))
break; break;
goto next; goto next;
unlock: unlock:
@ -2106,7 +2104,7 @@ next:
} }
rcu_read_unlock(); rcu_read_unlock();
return pagevec_count(pvec); return folio_batch_count(fbatch);
} }
/** /**

View File

@ -93,7 +93,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
} }
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices); pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start, unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio); void filemap_free_folio(struct address_space *mapping, struct folio *folio);

View File

@ -919,7 +919,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
pgoff_t end = (lend + 1) >> PAGE_SHIFT; pgoff_t end = (lend + 1) >> PAGE_SHIFT;
unsigned int partial_start = lstart & (PAGE_SIZE - 1); unsigned int partial_start = lstart & (PAGE_SIZE - 1);
unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
struct pagevec pvec;
struct folio_batch fbatch; struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0; long nr_swaps_freed = 0;
@ -932,12 +931,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (info->fallocend > start && info->fallocend <= end && !unfalloc) if (info->fallocend > start && info->fallocend <= end && !unfalloc)
info->fallocend = start; info->fallocend = start;
pagevec_init(&pvec); folio_batch_init(&fbatch);
index = start; index = start;
while (index < end && find_lock_entries(mapping, index, end - 1, while (index < end && find_lock_entries(mapping, index, end - 1,
&pvec, indices)) { &fbatch, indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = (struct folio *)pvec.pages[i]; struct folio *folio = fbatch.folios[i];
index = indices[i]; index = indices[i];
@ -954,8 +953,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
truncate_inode_folio(mapping, folio); truncate_inode_folio(mapping, folio);
folio_unlock(folio); folio_unlock(folio);
} }
pagevec_remove_exceptionals(&pvec); folio_batch_remove_exceptionals(&fbatch);
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
index++; index++;
} }
@ -988,7 +987,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (start >= end) if (start >= end)
return; return;
folio_batch_init(&fbatch);
index = start; index = start;
while (index < end) { while (index < end) {
cond_resched(); cond_resched();

View File

@ -56,11 +56,11 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
/* /*
* Unconditionally remove exceptional entries. Usually called from truncate * Unconditionally remove exceptional entries. Usually called from truncate
* path. Note that the pagevec may be altered by this function by removing * path. Note that the folio_batch may be altered by this function by removing
* exceptional entries similar to what pagevec_remove_exceptionals does. * exceptional entries similar to what pagevec_remove_exceptionals does.
*/ */
static void truncate_exceptional_pvec_entries(struct address_space *mapping, static void truncate_folio_batch_exceptionals(struct address_space *mapping,
struct pagevec *pvec, pgoff_t *indices) struct folio_batch *fbatch, pgoff_t *indices)
{ {
int i, j; int i, j;
bool dax; bool dax;
@ -69,11 +69,11 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
if (shmem_mapping(mapping)) if (shmem_mapping(mapping))
return; return;
for (j = 0; j < pagevec_count(pvec); j++) for (j = 0; j < folio_batch_count(fbatch); j++)
if (xa_is_value(pvec->pages[j])) if (xa_is_value(fbatch->folios[j]))
break; break;
if (j == pagevec_count(pvec)) if (j == folio_batch_count(fbatch))
return; return;
dax = dax_mapping(mapping); dax = dax_mapping(mapping);
@ -82,12 +82,12 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
} }
for (i = j; i < pagevec_count(pvec); i++) { for (i = j; i < folio_batch_count(fbatch); i++) {
struct page *page = pvec->pages[i]; struct folio *folio = fbatch->folios[i];
pgoff_t index = indices[i]; pgoff_t index = indices[i];
if (!xa_is_value(page)) { if (!xa_is_value(folio)) {
pvec->pages[j++] = page; fbatch->folios[j++] = folio;
continue; continue;
} }
@ -96,7 +96,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
continue; continue;
} }
__clear_shadow_entry(mapping, index, page); __clear_shadow_entry(mapping, index, folio);
} }
if (!dax) { if (!dax) {
@ -105,14 +105,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
inode_add_lru(mapping->host); inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
} }
pvec->nr = j; fbatch->nr = j;
}
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
struct folio_batch *fbatch, pgoff_t *indices)
{
truncate_exceptional_pvec_entries(mapping, (struct pagevec *)fbatch,
indices);
} }
/* /*
@ -303,7 +296,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
pgoff_t end; /* exclusive */ pgoff_t end; /* exclusive */
unsigned int partial_start; /* inclusive */ unsigned int partial_start; /* inclusive */
unsigned int partial_end; /* exclusive */ unsigned int partial_end; /* exclusive */
struct pagevec pvec;
struct folio_batch fbatch; struct folio_batch fbatch;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index; pgoff_t index;
@ -333,18 +325,18 @@ void truncate_inode_pages_range(struct address_space *mapping,
else else
end = (lend + 1) >> PAGE_SHIFT; end = (lend + 1) >> PAGE_SHIFT;
pagevec_init(&pvec); folio_batch_init(&fbatch);
index = start; index = start;
while (index < end && find_lock_entries(mapping, index, end - 1, while (index < end && find_lock_entries(mapping, index, end - 1,
&pvec, indices)) { &fbatch, indices)) {
index = indices[pagevec_count(&pvec) - 1] + 1; index = indices[folio_batch_count(&fbatch) - 1] + 1;
truncate_exceptional_pvec_entries(mapping, &pvec, indices); truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
for (i = 0; i < pagevec_count(&pvec); i++) for (i = 0; i < folio_batch_count(&fbatch); i++)
truncate_cleanup_folio(page_folio(pvec.pages[i])); truncate_cleanup_folio(fbatch.folios[i]);
delete_from_page_cache_batch(mapping, &pvec); delete_from_page_cache_batch(mapping, &fbatch);
for (i = 0; i < pagevec_count(&pvec); i++) for (i = 0; i < folio_batch_count(&fbatch); i++)
unlock_page(pvec.pages[i]); folio_unlock(fbatch.folios[i]);
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
} }
@ -387,7 +379,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (start >= end) if (start >= end)
goto out; goto out;
folio_batch_init(&fbatch);
index = start; index = start;
for ( ; ; ) { for ( ; ; ) {
cond_resched(); cond_resched();
@ -489,16 +480,16 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
{ {
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec; struct folio_batch fbatch;
pgoff_t index = start; pgoff_t index = start;
unsigned long ret; unsigned long ret;
unsigned long count = 0; unsigned long count = 0;
int i; int i;
pagevec_init(&pvec); folio_batch_init(&fbatch);
while (find_lock_entries(mapping, index, end, &pvec, indices)) { while (find_lock_entries(mapping, index, end, &fbatch, indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct page *page = pvec.pages[i]; struct page *page = &fbatch.folios[i]->page;
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing page->index */
index = indices[i]; index = indices[i];
@ -525,8 +516,8 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
} }
count += ret; count += ret;
} }
pagevec_remove_exceptionals(&pvec); folio_batch_remove_exceptionals(&fbatch);
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
index++; index++;
} }