mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
mm/huge_memory: page_remove_rmap() -> folio_remove_rmap_pmd()
Let's convert zap_huge_pmd() and set_pmd_migration_entry(). While at it, perform some more folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-26-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5cc9695f06
commit
a8e61d584e
@ -1898,7 +1898,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
|
||||
if (pmd_present(orig_pmd)) {
|
||||
page = pmd_page(orig_pmd);
|
||||
page_remove_rmap(page, vma, true);
|
||||
folio_remove_rmap_pmd(page_folio(page), page, vma);
|
||||
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
} else if (thp_migration_supported()) {
|
||||
@ -2433,12 +2433,13 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
page = pfn_swap_entry_to_page(entry);
|
||||
} else {
|
||||
page = pmd_page(old_pmd);
|
||||
if (!PageDirty(page) && pmd_dirty(old_pmd))
|
||||
set_page_dirty(page);
|
||||
if (!PageReferenced(page) && pmd_young(old_pmd))
|
||||
SetPageReferenced(page);
|
||||
page_remove_rmap(page, vma, true);
|
||||
put_page(page);
|
||||
folio = page_folio(page);
|
||||
if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
|
||||
folio_set_dirty(folio);
|
||||
if (!folio_test_referenced(folio) && pmd_young(old_pmd))
|
||||
folio_set_referenced(folio);
|
||||
folio_remove_rmap_pmd(folio, page, vma);
|
||||
folio_put(folio);
|
||||
}
|
||||
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
|
||||
return;
|
||||
@ -2593,7 +2594,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
pte_unmap(pte - 1);
|
||||
|
||||
if (!pmd_migration)
|
||||
page_remove_rmap(page, vma, true);
|
||||
folio_remove_rmap_pmd(folio, page, vma);
|
||||
if (freeze)
|
||||
put_page(page);
|
||||
|
||||
@ -3536,6 +3537,7 @@ late_initcall(split_huge_pages_debugfs);
|
||||
int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
||||
struct page *page)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct vm_area_struct *vma = pvmw->vma;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long address = pvmw->address;
|
||||
@ -3551,14 +3553,14 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
||||
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
|
||||
|
||||
/* See page_try_share_anon_rmap(): invalidate PMD first. */
|
||||
anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
|
||||
anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
|
||||
if (anon_exclusive && page_try_share_anon_rmap(page)) {
|
||||
set_pmd_at(mm, address, pvmw->pmd, pmdval);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (pmd_dirty(pmdval))
|
||||
set_page_dirty(page);
|
||||
folio_set_dirty(folio);
|
||||
if (pmd_write(pmdval))
|
||||
entry = make_writable_migration_entry(page_to_pfn(page));
|
||||
else if (anon_exclusive)
|
||||
@ -3575,8 +3577,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
||||
if (pmd_uffd_wp(pmdval))
|
||||
pmdswp = pmd_swp_mkuffd_wp(pmdswp);
|
||||
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
|
||||
page_remove_rmap(page, vma, true);
|
||||
put_page(page);
|
||||
folio_remove_rmap_pmd(folio, page, vma);
|
||||
folio_put(folio);
|
||||
trace_set_migration_pmd(address, pmd_val(pmdswp));
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user