mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
mm: Rename arch pte_mkwrite()'s to pte_mkwrite_novma()
The x86 Shadow stack feature includes a new type of memory called shadow stack. This shadow stack memory has some unusual properties, which requires some core mm changes to function properly. One of these unusual properties is that shadow stack memory is writable, but only in limited ways. These limits are applied via a specific PTE bit combination. Nevertheless, the memory is writable, and core mm code will need to apply the writable permissions in the typical paths that call pte_mkwrite(). The goal is to make pte_mkwrite() take a VMA, so that the x86 implementation of it can know whether to create regular writable or shadow stack mappings. But there are a couple of challenges to this. Modifying the signatures of each arch pte_mkwrite() implementation would be error prone because some are generated with macros and would need to be re-implemented. Also, some pte_mkwrite() callers operate on kernel memory without a VMA. So this can be done in a three step process. First pte_mkwrite() can be renamed to pte_mkwrite_novma() in each arch, with a generic pte_mkwrite() added that just calls pte_mkwrite_novma(). Next callers without a VMA can be moved to pte_mkwrite_novma(). And lastly, pte_mkwrite() and all callers can be changed to take/pass a VMA. Start the process by renaming pte_mkwrite() to pte_mkwrite_novma() and adding the pte_mkwrite() wrapper in linux/pgtable.h. Apply the same pattern for pmd_mkwrite(). Since not all archs have a pmd_mkwrite_novma(), create a new arch config HAS_HUGE_PAGE that can be used to tell if pmd_mkwrite() should be defined. Otherwise in the !HAS_HUGE_PAGE cases the compiler would not be able to find pmd_mkwrite_novma(). No functional change. Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: David Hildenbrand <david@redhat.com> Link: https://lore.kernel.org/lkml/CAHk-=wiZjSu7c9sFYZb3q04108stgHff2wfbokGCCgW7riz+8Q@mail.gmail.com/ Link: https://lore.kernel.org/all/20230613001108.3040476-2-rick.p.edgecombe%40intel.com
This commit is contained in:
parent
06c2afb862
commit
2f0584f3f4
@ -48,6 +48,9 @@ PTE Page Table Helpers
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pte_mkwrite | Creates a writable PTE |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pte_mkwrite_novma | Creates a writable PTE, of the conventional type |
|
||||
| | of writable. |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pte_wrprotect | Creates a write protected PTE |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pte_mkspecial | Creates a special PTE |
|
||||
@ -120,6 +123,9 @@ PMD Page Table Helpers
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pmd_mkwrite | Creates a writable PMD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pmd_mkwrite_novma | Creates a writable PMD, of the conventional type |
|
||||
| | of writable. |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pmd_wrprotect | Creates a write protected PMD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pmd_mkspecial | Creates a special PMD |
|
||||
|
@ -939,6 +939,14 @@ config HAVE_ARCH_HUGE_VMALLOC
|
||||
config ARCH_WANT_HUGE_PMD_SHARE
|
||||
bool
|
||||
|
||||
# Archs that want to use pmd_mkwrite on kernel memory need it defined even
|
||||
# if there are no userspace memory management features that use it
|
||||
config ARCH_WANT_KERNEL_PMD_MKWRITE
|
||||
bool
|
||||
|
||||
config ARCH_WANT_PMD_MKWRITE
|
||||
def_bool TRANSPARENT_HUGEPAGE || ARCH_WANT_KERNEL_PMD_MKWRITE
|
||||
|
||||
config HAVE_ARCH_SOFT_DIRTY
|
||||
bool
|
||||
|
||||
|
@ -256,7 +256,7 @@ extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
|
||||
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
|
||||
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
|
||||
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
|
||||
extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
|
||||
extern inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) &= ~_PAGE_FOW; return pte; }
|
||||
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
|
||||
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
|
||||
|
||||
|
@ -21,7 +21,7 @@ static inline pmd_t pte_pmd(pte_t pte)
|
||||
}
|
||||
|
||||
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
|
||||
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
|
||||
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
|
||||
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
|
||||
|
@ -87,7 +87,7 @@
|
||||
|
||||
PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
|
||||
PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
|
||||
PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
|
||||
PTE_BIT_FUNC(mkwrite_novma, |= (_PAGE_WRITE));
|
||||
PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
|
||||
PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
|
||||
PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
|
||||
|
@ -202,7 +202,7 @@ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
|
||||
|
||||
PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
|
||||
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
|
||||
PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
|
||||
PMD_BIT_FUNC(mkwrite_novma, &= ~L_PMD_SECT_RDONLY);
|
||||
PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
|
||||
PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
|
||||
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
|
||||
|
@ -227,7 +227,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
|
||||
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
@ -487,7 +487,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
|
||||
#define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
|
||||
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
|
||||
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
|
||||
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
|
||||
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
|
||||
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
|
||||
|
@ -176,7 +176,7 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= _PAGE_WRITE;
|
||||
if (pte_val(pte) & _PAGE_MODIFIED)
|
||||
|
@ -300,7 +300,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
}
|
||||
|
||||
/* pte_mkwrite - mark page as writable */
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= _PAGE_WRITE;
|
||||
return pte;
|
||||
|
@ -268,7 +268,7 @@ ia64_phys_addr_valid (unsigned long addr)
|
||||
* access rights:
|
||||
*/
|
||||
#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
|
||||
#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
|
||||
#define pte_mkwrite_novma(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
|
||||
#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
|
||||
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
|
||||
#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
|
||||
|
@ -390,7 +390,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= _PAGE_WRITE;
|
||||
if (pte_val(pte) & _PAGE_MODIFIED)
|
||||
@ -490,7 +490,7 @@ static inline int pmd_write(pmd_t pmd)
|
||||
return !!(pmd_val(pmd) & _PAGE_WRITE);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
|
||||
{
|
||||
pmd_val(pmd) |= _PAGE_WRITE;
|
||||
if (pmd_val(pmd) & _PAGE_MODIFIED)
|
||||
|
@ -210,7 +210,7 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= CF_PAGE_WRITABLE;
|
||||
return pte;
|
||||
|
@ -155,7 +155,7 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
|
||||
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
|
||||
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
|
||||
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
|
||||
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) &= ~_PAGE_RONLY; return pte; }
|
||||
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
|
||||
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
|
||||
static inline pte_t pte_mknocache(pte_t pte)
|
||||
|
@ -143,7 +143,7 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESS
|
||||
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
|
||||
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
|
||||
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_ACCESSED; return pte; }
|
||||
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= SUN3_PAGE_WRITEABLE; return pte; }
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) |= SUN3_PAGE_WRITEABLE; return pte; }
|
||||
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= SUN3_PAGE_MODIFIED; return pte; }
|
||||
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= SUN3_PAGE_ACCESSED; return pte; }
|
||||
static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE; return pte; }
|
||||
|
@ -266,7 +266,7 @@ static inline pte_t pte_mkread(pte_t pte) \
|
||||
{ pte_val(pte) |= _PAGE_USER; return pte; }
|
||||
static inline pte_t pte_mkexec(pte_t pte) \
|
||||
{ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
|
||||
static inline pte_t pte_mkwrite(pte_t pte) \
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte) \
|
||||
{ pte_val(pte) |= _PAGE_RW; return pte; }
|
||||
static inline pte_t pte_mkdirty(pte_t pte) \
|
||||
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
|
||||
|
@ -309,7 +309,7 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte.pte_low |= _PAGE_WRITE;
|
||||
if (pte.pte_low & _PAGE_MODIFIED) {
|
||||
@ -364,7 +364,7 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= _PAGE_WRITE;
|
||||
if (pte_val(pte) & _PAGE_MODIFIED)
|
||||
@ -627,7 +627,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
|
||||
{
|
||||
pmd_val(pmd) |= _PAGE_WRITE;
|
||||
if (pmd_val(pmd) & _PAGE_MODIFIED)
|
||||
|
@ -129,7 +129,7 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= _PAGE_WRITE;
|
||||
return pte;
|
||||
|
@ -250,7 +250,7 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= _PAGE_WRITE;
|
||||
return pte;
|
||||
|
@ -331,7 +331,7 @@ static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; retu
|
||||
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
|
||||
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
|
||||
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
|
||||
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
|
||||
|
||||
/*
|
||||
|
@ -498,7 +498,7 @@ static inline pte_t pte_mkpte(pte_t pte)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
}
|
||||
|
@ -600,7 +600,7 @@ static inline pte_t pte_mkexec(pte_t pte)
|
||||
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* write implies read, hence set both
|
||||
@ -1071,7 +1071,7 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
|
||||
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
|
||||
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
|
||||
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
|
||||
#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
|
||||
|
@ -170,8 +170,8 @@ void unmap_kernel_page(unsigned long va);
|
||||
#define pte_clear(mm, addr, ptep) \
|
||||
do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
|
||||
|
||||
#ifndef pte_mkwrite
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
#ifndef pte_mkwrite_novma
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
}
|
||||
|
@ -101,12 +101,12 @@ static inline int pte_write(pte_t pte)
|
||||
|
||||
#define pte_write pte_write
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_RO);
|
||||
}
|
||||
|
||||
#define pte_mkwrite pte_mkwrite
|
||||
#define pte_mkwrite_novma pte_mkwrite_novma
|
||||
|
||||
static inline bool pte_user(pte_t pte)
|
||||
{
|
||||
|
@ -85,7 +85,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
/* pte_clear moved to later in this file */
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
|
||||
/* static inline pte_t pte_mkread(pte_t pte) */
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_WRITE);
|
||||
}
|
||||
@ -664,9 +664,9 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
|
||||
return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
|
||||
{
|
||||
return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
|
||||
return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_wrprotect(pmd_t pmd)
|
||||
|
@ -128,6 +128,7 @@ config S390
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_WANT_OPTIMIZE_VMEMMAP
|
||||
select ARCH_WANT_KERNEL_PMD_MKWRITE
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
select DMA_OPS if PCI
|
||||
|
@ -104,7 +104,7 @@ static inline int huge_pte_dirty(pte_t pte)
|
||||
|
||||
static inline pte_t huge_pte_mkwrite(pte_t pte)
|
||||
{
|
||||
return pte_mkwrite(pte);
|
||||
return pte_mkwrite_novma(pte);
|
||||
}
|
||||
|
||||
static inline pte_t huge_pte_mkdirty(pte_t pte)
|
||||
|
@ -1002,7 +1002,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
||||
return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
|
||||
if (pte_val(pte) & _PAGE_DIRTY)
|
||||
@ -1485,7 +1485,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
|
||||
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
|
||||
{
|
||||
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
|
||||
if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
|
||||
|
@ -359,11 +359,11 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
|
||||
* kernel permissions), we attempt to couple them a bit more sanely here.
|
||||
*/
|
||||
PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
|
||||
PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
|
||||
PTE_BIT_FUNC(high, mkwrite_novma, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
|
||||
PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
|
||||
#else
|
||||
PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
|
||||
PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
|
||||
PTE_BIT_FUNC(low, mkwrite_novma, |= _PAGE_RW);
|
||||
PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
|
||||
#endif
|
||||
|
||||
|
@ -241,7 +241,7 @@ static inline pte_t pte_mkold(pte_t pte)
|
||||
return __pte(pte_val(pte) & ~SRMMU_REF);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | SRMMU_WRITE);
|
||||
}
|
||||
|
@ -517,7 +517,7 @@ static inline pte_t pte_mkclean(pte_t pte)
|
||||
return __pte(val);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
unsigned long val = pte_val(pte), mask;
|
||||
|
||||
@ -772,11 +772,11 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
|
||||
return __pmd(pte_val(pte));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
pte = pte_mkwrite(pte);
|
||||
pte = pte_mkwrite_novma(pte);
|
||||
|
||||
return __pmd(pte_val(pte));
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
|
||||
return(pte);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
if (unlikely(pte_get_bits(pte, _PAGE_RW)))
|
||||
return pte;
|
||||
|
@ -353,7 +353,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
|
||||
return pte_set_flags(pte, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_RW);
|
||||
}
|
||||
@ -454,7 +454,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
|
||||
return pmd_set_flags(pmd, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_RW);
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
|
||||
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
|
||||
static inline pte_t pte_mkyoung(pte_t pte)
|
||||
{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
||||
{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
|
||||
|
||||
#define pgprot_noncached(prot) \
|
||||
|
@ -22,7 +22,7 @@ static inline unsigned long huge_pte_dirty(pte_t pte)
|
||||
|
||||
static inline pte_t huge_pte_mkwrite(pte_t pte)
|
||||
{
|
||||
return pte_mkwrite(pte);
|
||||
return pte_mkwrite_novma(pte);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
|
||||
|
@ -515,6 +515,20 @@ extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
|
||||
pud_t *pudp);
|
||||
#endif
|
||||
|
||||
#ifndef pte_mkwrite
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
return pte_mkwrite_novma(pte);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
{
|
||||
return pmd_mkwrite_novma(pmd);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
struct mm_struct;
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
|
||||
|
Loading…
Reference in New Issue
Block a user