mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
arm64: Support execute-only permissions with Enhanced PAN
Enhanced Privileged Access Never (EPAN) allows Privileged Access Never
to be used with Execute-only mappings.
Absence of such support was a reason for 24cecc3774
("arm64: Revert
support for execute-only user mappings"). Thus now it can be revisited
and re-enabled.
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210312173811.58284-2-vladimir.murzin@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
1e28eed176
commit
18107f8a2d
@ -1058,6 +1058,9 @@ config SYS_SUPPORTS_HUGETLBFS
|
||||
config ARCH_HAS_CACHE_LINE_SIZE
|
||||
def_bool y
|
||||
|
||||
config ARCH_HAS_FILTER_PGPROT
|
||||
def_bool y
|
||||
|
||||
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
||||
def_bool y if PGTABLE_LEVELS > 2
|
||||
|
||||
@ -1681,6 +1684,20 @@ config ARM64_MTE
|
||||
|
||||
endmenu
|
||||
|
||||
menu "ARMv8.7 architectural features"
|
||||
|
||||
config ARM64_EPAN
|
||||
bool "Enable support for Enhanced Privileged Access Never (EPAN)"
|
||||
default y
|
||||
depends on ARM64_PAN
|
||||
help
|
||||
Enhanced Privileged Access Never (EPAN) allows Privileged
|
||||
Access Never to be used with Execute-only mappings.
|
||||
|
||||
The feature is detected at runtime, and will remain disabled
|
||||
if the cpu does not implement the feature.
|
||||
endmenu
|
||||
|
||||
config ARM64_SVE
|
||||
bool "ARM Scalable Vector Extension support"
|
||||
default y
|
||||
|
@ -66,7 +66,8 @@
|
||||
#define ARM64_WORKAROUND_1508412 58
|
||||
#define ARM64_HAS_LDAPR 59
|
||||
#define ARM64_KVM_PROTECTED_MODE 60
|
||||
#define ARM64_HAS_EPAN 61
|
||||
|
||||
#define ARM64_NCAPS 61
|
||||
#define ARM64_NCAPS 62
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
@ -87,12 +87,13 @@ extern bool arm64_use_ng_mappings;
|
||||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
|
||||
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
|
||||
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_READONLY
|
||||
#define __P011 PAGE_READONLY
|
||||
#define __P100 PAGE_READONLY_EXEC
|
||||
#define __P100 PAGE_EXECONLY
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_READONLY_EXEC
|
||||
#define __P111 PAGE_READONLY_EXEC
|
||||
@ -101,7 +102,7 @@ extern bool arm64_use_ng_mappings;
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_READONLY_EXEC
|
||||
#define __S100 PAGE_EXECONLY
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
@ -113,11 +113,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
|
||||
|
||||
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
||||
/*
|
||||
* Execute-only user mappings do not have the PTE_USER bit set. All valid
|
||||
* kernel mappings have the PTE_UXN bit set.
|
||||
*/
|
||||
#define pte_valid_not_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
||||
#define pte_valid_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
|
||||
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
|
||||
/*
|
||||
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
|
||||
* so that we don't erroneously return false for pages that have been
|
||||
@ -130,12 +131,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
|
||||
|
||||
/*
|
||||
* p??_access_permitted() is true for valid user mappings (subject to the
|
||||
* write permission check). PROT_NONE mappings do not have the PTE_VALID bit
|
||||
* set.
|
||||
* p??_access_permitted() is true for valid user mappings (PTE_USER
|
||||
* bit set, subject to the write permission check). For execute-only
|
||||
* mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
|
||||
* not set) must return false. PROT_NONE mappings do not have the
|
||||
* PTE_VALID bit set.
|
||||
*/
|
||||
#define pte_access_permitted(pte, write) \
|
||||
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
|
||||
(((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
|
||||
#define pmd_access_permitted(pmd, write) \
|
||||
(pte_access_permitted(pmd_pte(pmd), (write)))
|
||||
#define pud_access_permitted(pud, write) \
|
||||
@ -995,6 +998,18 @@ static inline bool arch_wants_old_prefaulted_pte(void)
|
||||
}
|
||||
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
|
||||
|
||||
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_EPAN))
|
||||
return prot;
|
||||
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
|
||||
return prot;
|
||||
|
||||
return PAGE_READONLY_EXEC;
|
||||
}
|
||||
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_PGTABLE_H */
|
||||
|
@ -597,6 +597,7 @@
|
||||
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
|
||||
|
||||
/* SCTLR_EL1 specific flags. */
|
||||
#define SCTLR_EL1_EPAN (BIT(57))
|
||||
#define SCTLR_EL1_ATA0 (BIT(42))
|
||||
|
||||
#define SCTLR_EL1_TCF0_SHIFT 38
|
||||
@ -637,7 +638,7 @@
|
||||
SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
|
||||
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
|
||||
SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
|
||||
SCTLR_EL1_RES1)
|
||||
SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
|
||||
|
||||
/* MAIR_ELx memory attributes (used by Linux) */
|
||||
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
|
||||
|
@ -1821,6 +1821,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.cpu_enable = cpu_enable_pan,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
#ifdef CONFIG_ARM64_EPAN
|
||||
{
|
||||
.desc = "Enhanced Privileged Access Never",
|
||||
.capability = ARM64_HAS_EPAN,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.sys_reg = SYS_ID_AA64MMFR1_EL1,
|
||||
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.min_field_value = 3,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_EPAN */
|
||||
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
||||
{
|
||||
.desc = "LSE atomic instructions",
|
||||
|
@ -527,7 +527,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
|
||||
const struct fault_info *inf;
|
||||
struct mm_struct *mm = current->mm;
|
||||
vm_fault_t fault;
|
||||
unsigned long vm_flags = VM_ACCESS_FLAGS;
|
||||
unsigned long vm_flags;
|
||||
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
|
||||
unsigned long addr = untagged_addr(far);
|
||||
|
||||
@ -544,12 +544,28 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
|
||||
if (user_mode(regs))
|
||||
mm_flags |= FAULT_FLAG_USER;
|
||||
|
||||
/*
|
||||
* vm_flags tells us what bits we must have in vma->vm_flags
|
||||
* for the fault to be benign, __do_page_fault() would check
|
||||
* vma->vm_flags & vm_flags and returns an error if the
|
||||
* intersection is empty
|
||||
*/
|
||||
if (is_el0_instruction_abort(esr)) {
|
||||
/* It was exec fault */
|
||||
vm_flags = VM_EXEC;
|
||||
mm_flags |= FAULT_FLAG_INSTRUCTION;
|
||||
} else if (is_write_abort(esr)) {
|
||||
/* It was write fault */
|
||||
vm_flags = VM_WRITE;
|
||||
mm_flags |= FAULT_FLAG_WRITE;
|
||||
} else {
|
||||
/* It was read fault */
|
||||
vm_flags = VM_READ;
|
||||
/* Write implies read */
|
||||
vm_flags |= VM_WRITE;
|
||||
/* If EPAN is absent then exec implies read */
|
||||
if (!cpus_have_const_cap(ARM64_HAS_EPAN))
|
||||
vm_flags |= VM_EXEC;
|
||||
}
|
||||
|
||||
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
|
||||
|
@ -93,6 +93,12 @@ static void unmap_region(struct mm_struct *mm,
|
||||
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
|
||||
* w: (no) no w: (no) no w: (copy) copy w: (no) no
|
||||
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
|
||||
*
|
||||
* On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
|
||||
* MAP_PRIVATE (with Enhanced PAN supported):
|
||||
* r: (no) no
|
||||
* w: (no) no
|
||||
* x: (yes) yes
|
||||
*/
|
||||
pgprot_t protection_map[16] __ro_after_init = {
|
||||
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
|
||||
|
Loading…
Reference in New Issue
Block a user