mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm: handle large folios in free_unref_folios()
Call folio_undo_large_rmappable() if needed. free_unref_page_prepare() destroys the ability to call folio_order(), so stash the order in folio->private for the benefit of the second loop. Link: https://lkml.kernel.org/r/20240227174254.710559-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f1ee018bae
commit
31b2ff82ae
@ -2516,7 +2516,7 @@ void free_unref_page(struct page *page, unsigned int order)
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a batch of 0-order pages
|
||||
* Free a batch of folios
|
||||
*/
|
||||
void free_unref_folios(struct folio_batch *folios)
|
||||
{
|
||||
@ -2529,19 +2529,25 @@ void free_unref_folios(struct folio_batch *folios)
|
||||
for (i = 0, j = 0; i < folios->nr; i++) {
|
||||
struct folio *folio = folios->folios[i];
|
||||
unsigned long pfn = folio_pfn(folio);
|
||||
if (!free_unref_page_prepare(&folio->page, pfn, 0))
|
||||
unsigned int order = folio_order(folio);
|
||||
|
||||
if (order > 0 && folio_test_large_rmappable(folio))
|
||||
folio_undo_large_rmappable(folio);
|
||||
if (!free_unref_page_prepare(&folio->page, pfn, order))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Free isolated folios directly to the allocator, see
|
||||
* comment in free_unref_page.
|
||||
* Free isolated folios and orders not handled on the PCP
|
||||
* directly to the allocator, see comment in free_unref_page.
|
||||
*/
|
||||
migratetype = get_pcppage_migratetype(&folio->page);
|
||||
if (unlikely(is_migrate_isolate(migratetype))) {
|
||||
if (!pcp_allowed_order(order) ||
|
||||
is_migrate_isolate(migratetype)) {
|
||||
free_one_page(folio_zone(folio), &folio->page, pfn,
|
||||
0, migratetype, FPI_NONE);
|
||||
order, migratetype, FPI_NONE);
|
||||
continue;
|
||||
}
|
||||
folio->private = (void *)(unsigned long)order;
|
||||
if (j != i)
|
||||
folios->folios[j] = folio;
|
||||
j++;
|
||||
@ -2551,7 +2557,9 @@ void free_unref_folios(struct folio_batch *folios)
|
||||
for (i = 0; i < folios->nr; i++) {
|
||||
struct folio *folio = folios->folios[i];
|
||||
struct zone *zone = folio_zone(folio);
|
||||
unsigned int order = (unsigned long)folio->private;
|
||||
|
||||
folio->private = NULL;
|
||||
migratetype = get_pcppage_migratetype(&folio->page);
|
||||
|
||||
/* Different zone requires a different pcp lock */
|
||||
@ -2570,7 +2578,7 @@ void free_unref_folios(struct folio_batch *folios)
|
||||
if (unlikely(!pcp)) {
|
||||
pcp_trylock_finish(UP_flags);
|
||||
free_one_page(zone, &folio->page,
|
||||
folio_pfn(folio), 0,
|
||||
folio_pfn(folio), order,
|
||||
migratetype, FPI_NONE);
|
||||
locked_zone = NULL;
|
||||
continue;
|
||||
@ -2586,7 +2594,8 @@ void free_unref_folios(struct folio_batch *folios)
|
||||
migratetype = MIGRATE_MOVABLE;
|
||||
|
||||
trace_mm_page_free_batched(&folio->page);
|
||||
free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0);
|
||||
free_unref_page_commit(zone, pcp, &folio->page, migratetype,
|
||||
order);
|
||||
}
|
||||
|
||||
if (pcp) {
|
||||
|
Loading…
Reference in New Issue
Block a user