2011-01-13 23:46:40 +00:00
|
|
|
/*
|
|
|
|
* mm/pgtable-generic.c
|
|
|
|
*
|
|
|
|
* Generic pgtable methods declared in asm-generic/pgtable.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
2011-01-25 23:07:11 +00:00
|
|
|
#include <linux/pagemap.h>
|
2011-01-13 23:46:40 +00:00
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm-generic/pgtable.h>
|
|
|
|
|
2013-09-11 21:21:28 +00:00
|
|
|
/*
|
|
|
|
* If a p?d_bad entry is found while walking page tables, report
|
|
|
|
* the error, before resetting entry to p?d_none. Usually (but
|
|
|
|
* very seldom) called out from the p?d_none_or_clear_bad macros.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void pgd_clear_bad(pgd_t *pgd)
|
|
|
|
{
|
|
|
|
pgd_ERROR(*pgd);
|
|
|
|
pgd_clear(pgd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pud_clear_bad(pud_t *pud)
|
|
|
|
{
|
|
|
|
pud_ERROR(*pud);
|
|
|
|
pud_clear(pud);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pmd_clear_bad(pmd_t *pmd)
|
|
|
|
{
|
|
|
|
pmd_ERROR(*pmd);
|
|
|
|
pmd_clear(pmd);
|
|
|
|
}
|
|
|
|
|
2011-01-13 23:46:40 +00:00
|
|
|
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
|
|
|
/*
|
2012-11-06 09:56:01 +00:00
|
|
|
* Only sets the access flags (dirty, accessed), as well as write
|
|
|
|
* permission. Furthermore, we know it always gets set to a "more
|
2011-01-13 23:46:40 +00:00
|
|
|
* permissive" setting, which allows most architectures to optimize
|
|
|
|
* this. We return whether the PTE actually changed, which in turn
|
|
|
|
* instructs the caller to do things like update__mmu_cache. This
|
|
|
|
* used to be done in the caller, but sparc needs minor faults to
|
|
|
|
* force that call on sun4c so we changed this macro slightly
|
|
|
|
*/
|
|
|
|
int ptep_set_access_flags(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *ptep,
|
|
|
|
pte_t entry, int dirty)
|
|
|
|
{
|
|
|
|
int changed = !pte_same(*ptep, entry);
|
|
|
|
if (changed) {
|
|
|
|
set_pte_at(vma->vm_mm, address, ptep, entry);
|
2012-11-06 09:56:01 +00:00
|
|
|
flush_tlb_fix_spurious_fault(vma, address);
|
2011-01-13 23:46:40 +00:00
|
|
|
}
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
|
|
|
|
int pmdp_set_access_flags(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pmd_t *pmdp,
|
|
|
|
pmd_t entry, int dirty)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
int changed = !pmd_same(*pmdp, entry);
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
|
|
if (changed) {
|
|
|
|
set_pmd_at(vma->vm_mm, address, pmdp, entry);
|
|
|
|
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
|
|
}
|
|
|
|
return changed;
|
|
|
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
BUG();
|
|
|
|
return 0;
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
|
|
int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *ptep)
|
|
|
|
{
|
|
|
|
int young;
|
|
|
|
young = ptep_test_and_clear_young(vma, address, ptep);
|
|
|
|
if (young)
|
|
|
|
flush_tlb_page(vma, address);
|
|
|
|
return young;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
|
|
|
|
int pmdp_clear_flush_young(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
int young;
|
2012-03-21 23:34:27 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
|
|
#else
|
2011-01-13 23:46:40 +00:00
|
|
|
BUG();
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
young = pmdp_test_and_clear_young(vma, address, pmdp);
|
|
|
|
if (young)
|
|
|
|
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
|
|
return young;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
|
|
|
|
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pte_t *ptep)
|
|
|
|
{
|
mm: fix TLB flush race between migration, and change_protection_range
There are a few subtle races, between change_protection_range (used by
mprotect and change_prot_numa) on one side, and NUMA page migration and
compaction on the other side.
The basic race is that there is a time window between when the PTE gets
made non-present (PROT_NONE or NUMA), and the TLB is flushed.
During that time, a CPU may continue writing to the page.
This is fine most of the time, however compaction or the NUMA migration
code may come in, and migrate the page away.
When that happens, the CPU may continue writing, through the cached
translation, to what is no longer the current memory location of the
process.
This only affects x86, which has a somewhat optimistic pte_accessible.
All other architectures appear to be safe, and will either always flush,
or flush whenever there is a valid mapping, even with no permissions
(SPARC).
The basic race looks like this:
CPU A CPU B CPU C
load TLB entry
make entry PTE/PMD_NUMA
fault on entry
read/write old page
start migrating page
change PTE/PMD to new page
read/write old page [*]
flush TLB
reload TLB from new entry
read/write new page
lose data
[*] the old page may belong to a new user at this point!
The obvious fix is to flush remote TLB entries, by making sure that
pte_accessible aware of the fact that PROT_NONE and PROT_NUMA memory may
still be accessible if there is a TLB flush pending for the mm.
This should fix both NUMA migration and compaction.
[mgorman@suse.de: fix build]
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Alex Thorlton <athorlton@sgi.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-12-19 01:08:44 +00:00
|
|
|
struct mm_struct *mm = (vma)->vm_mm;
|
2011-01-13 23:46:40 +00:00
|
|
|
pte_t pte;
|
mm: fix TLB flush race between migration, and change_protection_range
There are a few subtle races, between change_protection_range (used by
mprotect and change_prot_numa) on one side, and NUMA page migration and
compaction on the other side.
The basic race is that there is a time window between when the PTE gets
made non-present (PROT_NONE or NUMA), and the TLB is flushed.
During that time, a CPU may continue writing to the page.
This is fine most of the time, however compaction or the NUMA migration
code may come in, and migrate the page away.
When that happens, the CPU may continue writing, through the cached
translation, to what is no longer the current memory location of the
process.
This only affects x86, which has a somewhat optimistic pte_accessible.
All other architectures appear to be safe, and will either always flush,
or flush whenever there is a valid mapping, even with no permissions
(SPARC).
The basic race looks like this:
CPU A CPU B CPU C
load TLB entry
make entry PTE/PMD_NUMA
fault on entry
read/write old page
start migrating page
change PTE/PMD to new page
read/write old page [*]
flush TLB
reload TLB from new entry
read/write new page
lose data
[*] the old page may belong to a new user at this point!
The obvious fix is to flush remote TLB entries, by making sure that
pte_accessible aware of the fact that PROT_NONE and PROT_NUMA memory may
still be accessible if there is a TLB flush pending for the mm.
This should fix both NUMA migration and compaction.
[mgorman@suse.de: fix build]
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Alex Thorlton <athorlton@sgi.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-12-19 01:08:44 +00:00
|
|
|
pte = ptep_get_and_clear(mm, address, ptep);
|
|
|
|
if (pte_accessible(mm, pte))
|
2012-10-09 13:31:59 +00:00
|
|
|
flush_tlb_page(vma, address);
|
2011-01-13 23:46:40 +00:00
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
|
2011-01-16 21:10:39 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
2011-01-13 23:46:40 +00:00
|
|
|
pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
pmd_t pmd;
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
2015-06-24 23:57:42 +00:00
|
|
|
VM_BUG_ON(!pmd_trans_huge(*pmdp));
|
2011-01-13 23:46:40 +00:00
|
|
|
pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
|
|
|
|
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
|
|
return pmd;
|
|
|
|
}
|
2011-01-16 21:10:39 +00:00
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
2011-01-13 23:46:40 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
|
2011-01-16 21:10:39 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
2012-03-28 17:59:18 +00:00
|
|
|
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pmd_t *pmdp)
|
2011-01-13 23:46:40 +00:00
|
|
|
{
|
|
|
|
pmd_t pmd = pmd_mksplitting(*pmdp);
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
|
|
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
|
|
|
|
/* tlb flush only to serialize against gup-fast */
|
|
|
|
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
|
|
}
|
2011-01-16 21:10:39 +00:00
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
2011-01-13 23:46:40 +00:00
|
|
|
#endif
|
2012-10-08 23:30:07 +00:00
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
2013-06-06 00:14:02 +00:00
|
|
|
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
|
|
pgtable_t pgtable)
|
2012-10-08 23:30:07 +00:00
|
|
|
{
|
2013-11-14 22:31:04 +00:00
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmdp));
|
2012-10-08 23:30:07 +00:00
|
|
|
|
|
|
|
/* FIFO */
|
2013-11-14 22:30:59 +00:00
|
|
|
if (!pmd_huge_pte(mm, pmdp))
|
2012-10-08 23:30:07 +00:00
|
|
|
INIT_LIST_HEAD(&pgtable->lru);
|
|
|
|
else
|
2013-11-14 22:30:59 +00:00
|
|
|
list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
|
|
|
|
pmd_huge_pte(mm, pmdp) = pgtable;
|
2012-10-08 23:30:07 +00:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
/* no "address" argument so destroys page coloring of some arch */
|
2013-06-06 00:14:02 +00:00
|
|
|
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
2012-10-08 23:30:07 +00:00
|
|
|
{
|
|
|
|
pgtable_t pgtable;
|
|
|
|
|
2013-11-14 22:31:04 +00:00
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmdp));
|
2012-10-08 23:30:07 +00:00
|
|
|
|
|
|
|
/* FIFO */
|
2013-11-14 22:30:59 +00:00
|
|
|
pgtable = pmd_huge_pte(mm, pmdp);
|
2012-10-08 23:30:07 +00:00
|
|
|
if (list_empty(&pgtable->lru))
|
2013-11-14 22:30:59 +00:00
|
|
|
pmd_huge_pte(mm, pmdp) = NULL;
|
2012-10-08 23:30:07 +00:00
|
|
|
else {
|
2013-11-14 22:30:59 +00:00
|
|
|
pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
|
2012-10-08 23:30:07 +00:00
|
|
|
struct page, lru);
|
|
|
|
list_del(&pgtable->lru);
|
|
|
|
}
|
|
|
|
return pgtable;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif
|
2012-10-08 23:30:09 +00:00
|
|
|
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pmd_t *pmdp)
|
|
|
|
{
|
2013-12-19 01:08:34 +00:00
|
|
|
pmd_t entry = *pmdp;
|
2014-08-29 22:18:33 +00:00
|
|
|
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
|
2012-10-08 23:30:09 +00:00
|
|
|
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif
|
2015-06-24 23:57:42 +00:00
|
|
|
|
|
|
|
#ifndef pmdp_collapse_flush
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
pmd_t pmd;
|
|
|
|
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
|
|
VM_BUG_ON(pmd_trans_huge(*pmdp));
|
|
|
|
pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
|
|
|
|
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif
|