mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm/swap: stop using page->private on tail pages for THP_SWAP
Patch series "mm/swap: stop using page->private on tail pages for THP_SWAP + cleanups". This series stops using page->private on tail pages for THP_SWAP, replaces folio->private by folio->swap for swapcache folios, and starts using "new_folio" for tail pages that we are splitting to remove the usage of page->private for swapcache handling completely. This patch (of 4): Let's stop using page->private on tail pages, making it possible to just unconditionally reuse that field in the tail pages of large folios. The remaining usage of the private field for THP_SWAP is in the THP splitting code (mm/huge_memory.c), that we'll handle separately later. Update the THP_SWAP documentation and sanity checks in mm_types.h and __split_huge_page_tail(). [david@redhat.com: stop using page->private on tail pages for THP_SWAP] Link: https://lkml.kernel.org/r/6f0a82a3-6948-20d9-580b-be1dbf415701@redhat.com Link: https://lkml.kernel.org/r/20230821160849.531668-1-david@redhat.com Link: https://lkml.kernel.org/r/20230821160849.531668-2-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64] Reviewed-by: Yosry Ahmed <yosryahmed@google.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peter Xu <peterx@redhat.com> Cc: Seth Jennings <sjenning@redhat.com> Cc: Vitaly Wool <vitaly.wool@konsulko.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
bad5a3a42a
commit
cfeed8ffe5
@ -33,8 +33,9 @@ int mte_save_tags(struct page *page)
|
||||
|
||||
mte_save_page_tags(page_address(page), tag_storage);
|
||||
|
||||
/* page_private contains the swap entry.val set in do_swap_page */
|
||||
ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
|
||||
/* lookup the swap entry.val from the page */
|
||||
ret = xa_store(&mte_pages, page_swap_entry(page).val, tag_storage,
|
||||
GFP_KERNEL);
|
||||
if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
|
||||
mte_free_tag_storage(tag_storage);
|
||||
return xa_err(ret);
|
||||
|
@ -322,11 +322,8 @@ struct folio {
|
||||
atomic_t _pincount;
|
||||
#ifdef CONFIG_64BIT
|
||||
unsigned int _folio_nr_pages;
|
||||
/* 4 byte gap here */
|
||||
/* private: the union with struct page is transitional */
|
||||
/* Fix THP_SWAP to not use tail->private */
|
||||
unsigned long _private_1;
|
||||
#endif
|
||||
/* private: the union with struct page is transitional */
|
||||
};
|
||||
struct page __page_1;
|
||||
};
|
||||
@ -347,9 +344,6 @@ struct folio {
|
||||
/* public: */
|
||||
struct list_head _deferred_list;
|
||||
/* private: the union with struct page is transitional */
|
||||
unsigned long _avail_2a;
|
||||
/* Fix THP_SWAP to not use tail->private */
|
||||
unsigned long _private_2a;
|
||||
};
|
||||
struct page __page_2;
|
||||
};
|
||||
@ -374,9 +368,6 @@ FOLIO_MATCH(memcg_data, memcg_data);
|
||||
offsetof(struct page, pg) + sizeof(struct page))
|
||||
FOLIO_MATCH(flags, _flags_1);
|
||||
FOLIO_MATCH(compound_head, _head_1);
|
||||
#ifdef CONFIG_64BIT
|
||||
FOLIO_MATCH(private, _private_1);
|
||||
#endif
|
||||
#undef FOLIO_MATCH
|
||||
#define FOLIO_MATCH(pg, fl) \
|
||||
static_assert(offsetof(struct folio, fl) == \
|
||||
@ -385,7 +376,6 @@ FOLIO_MATCH(flags, _flags_2);
|
||||
FOLIO_MATCH(compound_head, _head_2);
|
||||
FOLIO_MATCH(flags, _flags_2a);
|
||||
FOLIO_MATCH(compound_head, _head_2a);
|
||||
FOLIO_MATCH(private, _private_2a);
|
||||
#undef FOLIO_MATCH
|
||||
|
||||
/**
|
||||
|
@ -339,6 +339,15 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
|
||||
return entry;
|
||||
}
|
||||
|
||||
static inline swp_entry_t page_swap_entry(struct page *page)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
swp_entry_t entry = folio_swap_entry(folio);
|
||||
|
||||
entry.val += folio_page_idx(folio, page);
|
||||
return entry;
|
||||
}
|
||||
|
||||
static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
|
||||
{
|
||||
folio->private = (void *)entry.val;
|
||||
|
@ -2446,18 +2446,15 @@ static void __split_huge_page_tail(struct page *head, int tail,
|
||||
page_tail->index = head->index + tail;
|
||||
|
||||
/*
|
||||
* page->private should not be set in tail pages with the exception
|
||||
* of swap cache pages that store the swp_entry_t in tail pages.
|
||||
* Fix up and warn once if private is unexpectedly set.
|
||||
*
|
||||
* What of 32-bit systems, on which folio->_pincount overlays
|
||||
* head[1].private? No problem: THP_SWAP is not enabled on 32-bit, and
|
||||
* pincount must be 0 for folio_ref_freeze() to have succeeded.
|
||||
* page->private should not be set in tail pages. Fix up and warn once
|
||||
* if private is unexpectedly set.
|
||||
*/
|
||||
if (!folio_test_swapcache(page_folio(head))) {
|
||||
VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
|
||||
if (unlikely(page_tail->private)) {
|
||||
VM_WARN_ON_ONCE_PAGE(true, page_tail);
|
||||
page_tail->private = 0;
|
||||
}
|
||||
if (PageSwapCache(head))
|
||||
set_page_private(page_tail, (unsigned long)head->private + tail);
|
||||
|
||||
/* Page flags must be visible before we make the page non-compound. */
|
||||
smp_wmb();
|
||||
|
@ -3879,7 +3879,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
* changed.
|
||||
*/
|
||||
if (unlikely(!folio_test_swapcache(folio) ||
|
||||
page_private(page) != entry.val))
|
||||
page_swap_entry(page).val != entry.val))
|
||||
goto out_page;
|
||||
|
||||
/*
|
||||
|
@ -1647,7 +1647,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
*/
|
||||
dec_mm_counter(mm, mm_counter(&folio->page));
|
||||
} else if (folio_test_anon(folio)) {
|
||||
swp_entry_t entry = { .val = page_private(subpage) };
|
||||
swp_entry_t entry = page_swap_entry(subpage);
|
||||
pte_t swp_pte;
|
||||
/*
|
||||
* Store the swap location in the pte.
|
||||
|
@ -100,6 +100,7 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
|
||||
|
||||
folio_ref_add(folio, nr);
|
||||
folio_set_swapcache(folio);
|
||||
folio_set_swap_entry(folio, entry);
|
||||
|
||||
do {
|
||||
xas_lock_irq(&xas);
|
||||
@ -113,7 +114,6 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
|
||||
if (shadowp)
|
||||
*shadowp = old;
|
||||
}
|
||||
set_page_private(folio_page(folio, i), entry.val + i);
|
||||
xas_store(&xas, folio);
|
||||
xas_next(&xas);
|
||||
}
|
||||
@ -154,9 +154,10 @@ void __delete_from_swap_cache(struct folio *folio,
|
||||
for (i = 0; i < nr; i++) {
|
||||
void *entry = xas_store(&xas, shadow);
|
||||
VM_BUG_ON_PAGE(entry != folio, entry);
|
||||
set_page_private(folio_page(folio, i), 0);
|
||||
xas_next(&xas);
|
||||
}
|
||||
entry.val = 0;
|
||||
folio_set_swap_entry(folio, entry);
|
||||
folio_clear_swapcache(folio);
|
||||
address_space->nrpages -= nr;
|
||||
__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
|
||||
|
@ -3369,7 +3369,7 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
|
||||
|
||||
struct swap_info_struct *page_swap_info(struct page *page)
|
||||
{
|
||||
swp_entry_t entry = { .val = page_private(page) };
|
||||
swp_entry_t entry = page_swap_entry(page);
|
||||
return swp_swap_info(entry);
|
||||
}
|
||||
|
||||
@ -3384,7 +3384,7 @@ EXPORT_SYMBOL_GPL(swapcache_mapping);
|
||||
|
||||
pgoff_t __page_file_index(struct page *page)
|
||||
{
|
||||
swp_entry_t swap = { .val = page_private(page) };
|
||||
swp_entry_t swap = page_swap_entry(page);
|
||||
return swp_offset(swap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__page_file_index);
|
||||
|
Loading…
Reference in New Issue
Block a user