mm: factor out common parts between MADV_COLD and MADV_PAGEOUT

There are many common parts between MADV_COLD and MADV_PAGEOUT.
This patch factor them out to save code duplication.

Link: http://lkml.kernel.org/r/20190726023435.214162-6-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Daniel Colascione <dancol@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: kbuild test robot <lkp@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Oleksandr Natalenko <oleksandr@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Sonny Rao <sonnyrao@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tim Murray <timmurray@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Minchan Kim 2019-09-25 16:49:19 -07:00 committed by Linus Torvalds
parent 1a4e58cce8
commit d616d51265

View File

@ -32,6 +32,11 @@
#include "internal.h" #include "internal.h"
struct madvise_walk_private {
struct mmu_gather *tlb;
bool pageout;
};
/* /*
* Any behaviour which results in changes to the vma->vm_flags needs to * Any behaviour which results in changes to the vma->vm_flags needs to
* take mmap_sem for writing. Others, which simply traverse vmas, need * take mmap_sem for writing. Others, which simply traverse vmas, need
@ -292,15 +297,22 @@ static long madvise_willneed(struct vm_area_struct *vma,
return 0; return 0;
} }
static int madvise_cold_pte_range(pmd_t *pmd, unsigned long addr, static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
unsigned long end, struct mm_walk *walk) unsigned long addr, unsigned long end,
struct mm_walk *walk)
{ {
struct mmu_gather *tlb = walk->private; struct madvise_walk_private *private = walk->private;
struct mmu_gather *tlb = private->tlb;
bool pageout = private->pageout;
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
pte_t *orig_pte, *pte, ptent; pte_t *orig_pte, *pte, ptent;
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page = NULL;
LIST_HEAD(page_list);
if (fatal_signal_pending(current))
return -EINTR;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge(*pmd)) {
@ -348,10 +360,17 @@ static int madvise_cold_pte_range(pmd_t *pmd, unsigned long addr,
tlb_remove_pmd_tlb_entry(tlb, pmd, addr); tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
} }
ClearPageReferenced(page);
test_and_clear_page_young(page); test_and_clear_page_young(page);
deactivate_page(page); if (pageout) {
if (!isolate_lru_page(page))
list_add(&page->lru, &page_list);
} else
deactivate_page(page);
huge_unlock: huge_unlock:
spin_unlock(ptl); spin_unlock(ptl);
if (pageout)
reclaim_pages(&page_list);
return 0; return 0;
} }
@ -419,27 +438,39 @@ regular_page:
* As a side effect, it makes confuse idle-page tracking * As a side effect, it makes confuse idle-page tracking
* because they will miss recent referenced history. * because they will miss recent referenced history.
*/ */
ClearPageReferenced(page);
test_and_clear_page_young(page); test_and_clear_page_young(page);
deactivate_page(page); if (pageout) {
if (!isolate_lru_page(page))
list_add(&page->lru, &page_list);
} else
deactivate_page(page);
} }
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl); pte_unmap_unlock(orig_pte, ptl);
if (pageout)
reclaim_pages(&page_list);
cond_resched(); cond_resched();
return 0; return 0;
} }
static const struct mm_walk_ops cold_walk_ops = { static const struct mm_walk_ops cold_walk_ops = {
.pmd_entry = madvise_cold_pte_range, .pmd_entry = madvise_cold_or_pageout_pte_range,
}; };
static void madvise_cold_page_range(struct mmu_gather *tlb, static void madvise_cold_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
struct madvise_walk_private walk_private = {
.pageout = false,
.tlb = tlb,
};
tlb_start_vma(tlb, vma); tlb_start_vma(tlb, vma);
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, NULL); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
tlb_end_vma(tlb, vma); tlb_end_vma(tlb, vma);
} }
@ -462,150 +493,17 @@ static long madvise_cold(struct vm_area_struct *vma,
return 0; return 0;
} }
static int madvise_pageout_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct mmu_gather *tlb = walk->private;
struct mm_struct *mm = tlb->mm;
struct vm_area_struct *vma = walk->vma;
pte_t *orig_pte, *pte, ptent;
spinlock_t *ptl;
LIST_HEAD(page_list);
struct page *page;
if (fatal_signal_pending(current))
return -EINTR;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(*pmd)) {
pmd_t orig_pmd;
unsigned long next = pmd_addr_end(addr, end);
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0;
orig_pmd = *pmd;
if (is_huge_zero_pmd(orig_pmd))
goto huge_unlock;
if (unlikely(!pmd_present(orig_pmd))) {
VM_BUG_ON(thp_migration_supported() &&
!is_pmd_migration_entry(orig_pmd));
goto huge_unlock;
}
page = pmd_page(orig_pmd);
if (next - addr != HPAGE_PMD_SIZE) {
int err;
if (page_mapcount(page) != 1)
goto huge_unlock;
get_page(page);
spin_unlock(ptl);
lock_page(page);
err = split_huge_page(page);
unlock_page(page);
put_page(page);
if (!err)
goto regular_page;
return 0;
}
if (pmd_young(orig_pmd)) {
pmdp_invalidate(vma, addr, pmd);
orig_pmd = pmd_mkold(orig_pmd);
set_pmd_at(mm, addr, pmd, orig_pmd);
tlb_remove_tlb_entry(tlb, pmd, addr);
}
ClearPageReferenced(page);
test_and_clear_page_young(page);
if (!isolate_lru_page(page))
list_add(&page->lru, &page_list);
huge_unlock:
spin_unlock(ptl);
reclaim_pages(&page_list);
return 0;
}
if (pmd_trans_unstable(pmd))
return 0;
regular_page:
#endif
tlb_change_page_size(tlb, PAGE_SIZE);
orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
for (; addr < end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
if (!pte_present(ptent))
continue;
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
/*
* creating a THP page is expensive so split it only if we
* are sure it's worth. Split it if we are only owner.
*/
if (PageTransCompound(page)) {
if (page_mapcount(page) != 1)
break;
get_page(page);
if (!trylock_page(page)) {
put_page(page);
break;
}
pte_unmap_unlock(orig_pte, ptl);
if (split_huge_page(page)) {
unlock_page(page);
put_page(page);
pte_offset_map_lock(mm, pmd, addr, &ptl);
break;
}
unlock_page(page);
put_page(page);
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--;
addr -= PAGE_SIZE;
continue;
}
VM_BUG_ON_PAGE(PageTransCompound(page), page);
if (pte_young(ptent)) {
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
ptent = pte_mkold(ptent);
set_pte_at(mm, addr, pte, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
}
ClearPageReferenced(page);
test_and_clear_page_young(page);
if (!isolate_lru_page(page))
list_add(&page->lru, &page_list);
}
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl);
reclaim_pages(&page_list);
cond_resched();
return 0;
}
static void madvise_pageout_page_range(struct mmu_gather *tlb, static void madvise_pageout_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
struct madvise_walk_private walk_private = {
.pageout = true,
.tlb = tlb,
};
tlb_start_vma(tlb, vma); tlb_start_vma(tlb, vma);
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, NULL); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
tlb_end_vma(tlb, vma); tlb_end_vma(tlb, vma);
} }