mm: move page->deferred_list to folio->_deferred_list

Remove the entire block of definitions for the second tail page, and add
the deferred list to the struct folio.  This actually moves _deferred_list
to a different offset in struct folio because I don't see a need to
include the padding.

This lets us use list_for_each_entry_safe() in deferred_split_scan()
and avoid a number of calls to compound_head().

Link: https://lkml.kernel.org/r/20230111142915.1001531-25-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-01-11 14:29:10 +00:00 committed by Andrew Morton
parent a8d55327cc
commit 4375a553f4
3 changed files with 27 additions and 28 deletions

View File

@ -295,11 +295,10 @@ static inline bool thp_migration_supported(void)
static inline struct list_head *page_deferred_list(struct page *page)
{
/*
* See organization of tail pages of compound page in
* "struct page" definition.
*/
return &page[2].deferred_list;
struct folio *folio = (struct folio *)page;
VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
return &folio->_deferred_list;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */

View File

@ -141,12 +141,6 @@ struct page {
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
};
struct { /* Second tail page of transparent huge page */
unsigned long _compound_pad_1; /* compound_head */
unsigned long _compound_pad_2;
/* For both global and memcg */
struct list_head deferred_list;
};
struct { /* Second tail page of hugetlb page */
unsigned long _hugetlb_pad_1; /* compound_head */
void *hugetlb_subpool;
@ -302,6 +296,7 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
* @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
* @_deferred_list: Folios to be split under memory pressure.
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
@ -366,6 +361,13 @@ struct folio {
void *_hugetlb_cgroup;
void *_hugetlb_cgroup_rsvd;
void *_hugetlb_hwpoison;
/* private: the union with struct page is transitional */
};
struct {
unsigned long _flags_2a;
unsigned long _head_2a;
/* public: */
struct list_head _deferred_list;
/* private: the union with struct page is transitional */
};
struct page __page_2;

View File

@ -2756,9 +2756,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
if (folio_ref_freeze(folio, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(&folio->page))) {
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
list_del(page_deferred_list(&folio->page));
list_del(&folio->_deferred_list);
}
spin_unlock(&ds_queue->split_queue_lock);
if (mapping) {
@ -2873,8 +2873,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
struct pglist_data *pgdata = NODE_DATA(sc->nid);
struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
unsigned long flags;
LIST_HEAD(list), *pos, *next;
struct page *page;
LIST_HEAD(list);
struct folio *folio, *next;
int split = 0;
#ifdef CONFIG_MEMCG
@ -2884,14 +2884,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
/* Take pin on all head pages to avoid freeing them under us */
list_for_each_safe(pos, next, &ds_queue->split_queue) {
page = list_entry((void *)pos, struct page, deferred_list);
page = compound_head(page);
if (get_page_unless_zero(page)) {
list_move(page_deferred_list(page), &list);
list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
_deferred_list) {
if (folio_try_get(folio)) {
list_move(&folio->_deferred_list, &list);
} else {
/* We lost race with put_compound_page() */
list_del_init(page_deferred_list(page));
/* We lost race with folio_put() */
list_del_init(&folio->_deferred_list);
ds_queue->split_queue_len--;
}
if (!--sc->nr_to_scan)
@ -2899,16 +2898,15 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
list_for_each_safe(pos, next, &list) {
page = list_entry((void *)pos, struct page, deferred_list);
if (!trylock_page(page))
list_for_each_entry_safe(folio, next, &list, _deferred_list) {
if (!folio_trylock(folio))
goto next;
/* split_huge_page() removes page from list on success */
if (!split_huge_page(page))
if (!split_folio(folio))
split++;
unlock_page(page);
folio_unlock(folio);
next:
put_page(page);
folio_put(folio);
}
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);