forked from Minki/linux
thp: fix page_referenced to modify mapcount/vm_flags only if page is found
When vmscan.c calls page_referenced(), if an anon page was created before a process forked, rmap will search for it in both of the processes, even though one of them might have since broken COW. If the child process mlocks the vma where the COWed page belongs to, page_referenced() running on the page mapped by the parent would lead to *vm_flags getting VM_LOCKED set erroneously (leading to the references on the parent page being ignored and evicting the parent page too early). *mapcount would also be decremented by page_referenced_one even if the page wasn't found by page_check_address. This also lets pmdp_clear_flush_young_notify() go ahead on a pmd_trans_splitting() pmd. We hold the page_table_lock so __split_huge_page_map() must wait the pmdp_clear_flush_young_notify() to complete before it can modify the pmd. The pmd is also still mapped in userland so the young bit may materialize through a tlb miss before split_huge_page_map runs. This will provide a more accurate page_referenced() behavior during split_huge_page(). Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reported-by: Michel Lespinasse <walken@google.com> Reviewed-by: Michel Lespinasse <walken@google.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel<riel@redhat.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
78f9bbb5a6
commit
2da28bfd96
54
mm/rmap.c
54
mm/rmap.c
@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int referenced = 0;
|
||||
|
||||
/*
|
||||
* Don't want to elevate referenced for mlocked page that gets this far,
|
||||
* in order that it progresses to try_to_unmap and is moved to the
|
||||
* unevictable list.
|
||||
*/
|
||||
if (vma->vm_flags & VM_LOCKED) {
|
||||
*mapcount = 0; /* break early from loop */
|
||||
*vm_flags |= VM_LOCKED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Pretend the page is referenced if the task has the
|
||||
swap token and is in the middle of a page fault. */
|
||||
if (mm != current->mm && has_swap_token(mm) &&
|
||||
rwsem_is_locked(&mm->mmap_sem))
|
||||
referenced++;
|
||||
|
||||
if (unlikely(PageTransHuge(page))) {
|
||||
pmd_t *pmd;
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
/*
|
||||
* rmap might return false positives; we must filter
|
||||
* these out using page_check_address_pmd().
|
||||
*/
|
||||
pmd = page_check_address_pmd(page, mm, address,
|
||||
PAGE_CHECK_ADDRESS_PMD_FLAG);
|
||||
if (pmd && !pmd_trans_splitting(*pmd) &&
|
||||
pmdp_clear_flush_young_notify(vma, address, pmd))
|
||||
if (!pmd) {
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (vma->vm_flags & VM_LOCKED) {
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
*mapcount = 0; /* break early from loop */
|
||||
*vm_flags |= VM_LOCKED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* go ahead even if the pmd is pmd_trans_splitting() */
|
||||
if (pmdp_clear_flush_young_notify(vma, address, pmd))
|
||||
referenced++;
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
} else {
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
/*
|
||||
* rmap might return false positives; we must filter
|
||||
* these out using page_check_address().
|
||||
*/
|
||||
pte = page_check_address(page, mm, address, &ptl, 0);
|
||||
if (!pte)
|
||||
goto out;
|
||||
|
||||
if (vma->vm_flags & VM_LOCKED) {
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
*mapcount = 0; /* break early from loop */
|
||||
*vm_flags |= VM_LOCKED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ptep_clear_flush_young_notify(vma, address, pte)) {
|
||||
/*
|
||||
* Don't treat a reference through a sequentially read
|
||||
@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
}
|
||||
|
||||
/* Pretend the page is referenced if the task has the
|
||||
swap token and is in the middle of a page fault. */
|
||||
if (mm != current->mm && has_swap_token(mm) &&
|
||||
rwsem_is_locked(&mm->mmap_sem))
|
||||
referenced++;
|
||||
|
||||
(*mapcount)--;
|
||||
|
||||
if (referenced)
|
||||
|
Loading…
Reference in New Issue
Block a user