forked from Minki/linux
2nd set of arm64 updates for 4.16:
Spectre v1 mitigation: - back-end version of array_index_mask_nospec() - masking of the syscall number to restrict speculation through the syscall table - masking of __user pointers prior to deference in uaccess routines Spectre v2 mitigation update: - using the new firmware SMC calling convention specification update - removing the current PSCI GET_VERSION firmware call mitigation as vendors are deploying new SMCCC-capable firmware - additional branch predictor hardening for synchronous exceptions and interrupts while in user mode Meltdown v3 mitigation update for Cavium Thunder X: unaffected but hardware erratum gets in the way. The kernel now starts with the page tables mapped as global and switches to non-global if kpti needs to be enabled. Other: - Theoretical trylock bug fixed -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAlp8lqcACgkQa9axLQDI XvH2lxAAnsYqthpGQ11MtDJB+/UiBAFkg9QWPDkwrBDvNhgpll+J0VQuCN1QJ2GX qQ8rkv8uV+y4Fqr8hORGJy5At+0aI63ZCJ72RGkZTzJAtbFbFGIDHP7RhAEIGJBS Lk9kDZ7k39wLEx30UXIFYTTVzyHar397TdI7vkTcngiTzZ8MdFATfN/hiKO906q3 14pYnU9Um4aHUdcJ+FocL3dxvdgniuuMBWoNiYXyOCZXjmbQOnDNU2UrICroV8lS mB+IHNEhX1Gl35QzNBtC0ET+aySfHBMJmM5oln+uVUljIGx6En1WLj6mrHYcx8U2 rIBm5qO/X/4iuzYPGkxwQtpjq3wPYxsSUnMdKJrsUZqAfy2QeIhFx6XUtJsZPB2J /lgls5xSXMOS7oiOQtmVjcDLBURDmYXGwljXR4n4jLm4CT1V9qSLcKHu1gdFU9Mq VuMUdPOnQub1vqKndi154IoYDTo21jAib2ktbcxpJfSJnDYoit4Gtnv7eWY+M3Pd Toaxi8htM2HSRwbvslHYGW8ZcVpI79Jit+ti7CsFg7m9Lvgs0zxcnNui4uPYDymT jh2JYxuirIJbX9aGGhnmkNhq9REaeZJg9LA2JM8S77FCHN3bnlSdaG6wy899J6EI lK4anCuPQKKKhUia/dc1MeKwrmmC18EfPyGUkOzywg/jGwGCmZM= =Y0TT -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull more arm64 updates from Catalin Marinas: "As I mentioned in the last pull request, there's a second batch of security updates for arm64 with mitigations for Spectre/v1 and an improved one for Spectre/v2 (via a newly defined firmware interface API). Spectre v1 mitigation: - back-end version of array_index_mask_nospec() - masking of the syscall number to restrict speculation through the syscall table - masking of __user pointers prior to deference in uaccess routines Spectre v2 mitigation update: - using the new firmware SMC calling convention specification update - removing the current PSCI GET_VERSION firmware call mitigation as vendors are deploying new SMCCC-capable firmware - additional branch predictor hardening for synchronous exceptions and interrupts while in user mode Meltdown v3 mitigation update: - Cavium Thunder X is unaffected but a hardware erratum gets in the way. The kernel now starts with the page tables mapped as global and switches to non-global if kpti needs to be enabled. Other: - Theoretical trylock bug fixed" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (38 commits) arm64: Kill PSCI_GET_VERSION as a variant-2 workaround arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support arm/arm64: smccc: Implement SMCCC v1.1 inline primitive arm/arm64: smccc: Make function identifiers an unsigned quantity firmware/psci: Expose SMCCC version through psci_ops firmware/psci: Expose PSCI conduit arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support arm/arm64: KVM: Turn kvm_psci_version into a static inline arm/arm64: KVM: Advertise SMCCC v1.1 arm/arm64: KVM: Implement PSCI 1.0 support arm/arm64: KVM: Add smccc accessors to PSCI code arm/arm64: KVM: Add PSCI_VERSION helper arm/arm64: KVM: Consolidate the PSCI include files arm64: KVM: Increment PC after handling an SMC trap arm: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls arm64: entry: Apply BP hardening for suspicious interrupts from EL0 arm64: entry: Apply BP hardening for high-priority synchronous exceptions arm64: futex: Mask __user pointers prior to dereference ...
This commit is contained in:
commit
c013632192
@ -306,4 +306,11 @@ static inline void kvm_fpsimd_flush_cpu_state(void) {}
|
||||
|
||||
static inline void kvm_arm_vhe_guest_enter(void) {}
|
||||
static inline void kvm_arm_vhe_guest_exit(void) {}
|
||||
|
||||
static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
{
|
||||
/* No way to detect it yet, pretend it is not there. */
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* __ARM_KVM_HOST_H__ */
|
||||
|
@ -1,27 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_PSCI_H__
|
||||
#define __ARM_KVM_PSCI_H__
|
||||
|
||||
#define KVM_ARM_PSCI_0_1 1
|
||||
#define KVM_ARM_PSCI_0_2 2
|
||||
|
||||
int kvm_psci_version(struct kvm_vcpu *vcpu);
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM_KVM_PSCI_H__ */
|
@ -21,7 +21,7 @@
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
kvm_vcpu_hvc_get_imm(vcpu));
|
||||
vcpu->stat.hvc_exit_stat++;
|
||||
|
||||
ret = kvm_psci_call(vcpu);
|
||||
ret = kvm_hvc_call_handler(vcpu);
|
||||
if (ret < 0) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
kvm_inject_undefined(vcpu);
|
||||
/*
|
||||
* "If an SMC instruction executed at Non-secure EL1 is
|
||||
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
|
||||
* Trap exception, not a Secure Monitor Call exception [...]"
|
||||
*
|
||||
* We need to advance the PC after the trap, as it would
|
||||
* otherwise return to the same address...
|
||||
*/
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -115,6 +115,24 @@
|
||||
hint #16
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Value prediction barrier
|
||||
*/
|
||||
.macro csdb
|
||||
hint #20
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out
|
||||
* of bounds.
|
||||
*/
|
||||
.macro mask_nospec64, idx, limit, tmp
|
||||
sub \tmp, \idx, \limit
|
||||
bic \tmp, \tmp, \idx
|
||||
and \idx, \idx, \tmp, asr #63
|
||||
csdb
|
||||
.endm
|
||||
|
||||
/*
|
||||
* NOP sequence
|
||||
*/
|
||||
@ -514,7 +532,7 @@ alternative_endif
|
||||
* phys: physical address, preserved
|
||||
* ttbr: returns the TTBR value
|
||||
*/
|
||||
.macro phys_to_ttbr, phys, ttbr
|
||||
.macro phys_to_ttbr, ttbr, phys
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
orr \ttbr, \phys, \phys, lsr #46
|
||||
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
|
||||
@ -523,6 +541,29 @@ alternative_endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro phys_to_pte, pte, phys
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
/*
|
||||
* We assume \phys is 64K aligned and this is guaranteed by only
|
||||
* supporting this configuration with 64K pages.
|
||||
*/
|
||||
orr \pte, \phys, \phys, lsr #36
|
||||
and \pte, \pte, #PTE_ADDR_MASK
|
||||
#else
|
||||
mov \pte, \phys
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro pte_to_phys, phys, pte
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
ubfiz \phys, \pte, #(48 - 16 - 12), #16
|
||||
bfxil \phys, \pte, #16, #32
|
||||
lsl \phys, \phys, #16
|
||||
#else
|
||||
and \phys, \pte, #PTE_ADDR_MASK
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/**
|
||||
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
|
||||
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
|
||||
|
@ -32,6 +32,7 @@
|
||||
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
||||
|
||||
#define psb_csync() asm volatile("hint #17" : : : "memory")
|
||||
#define csdb() asm volatile("hint #20" : : : "memory")
|
||||
|
||||
#define mb() dsb(sy)
|
||||
#define rmb() dsb(ld)
|
||||
@ -40,6 +41,27 @@
|
||||
#define dma_rmb() dmb(oshld)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
|
||||
/*
|
||||
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
|
||||
* and 0 otherwise.
|
||||
*/
|
||||
#define array_index_mask_nospec array_index_mask_nospec
|
||||
static inline unsigned long array_index_mask_nospec(unsigned long idx,
|
||||
unsigned long sz)
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
asm volatile(
|
||||
" cmp %1, %2\n"
|
||||
" sbc %0, xzr, xzr\n"
|
||||
: "=r" (mask)
|
||||
: "r" (idx), "Ir" (sz)
|
||||
: "cc");
|
||||
|
||||
csdb();
|
||||
return mask;
|
||||
}
|
||||
|
||||
#define __smp_mb() dmb(ish)
|
||||
#define __smp_rmb() dmb(ishld)
|
||||
#define __smp_wmb() dmb(ishst)
|
||||
|
@ -48,9 +48,10 @@ do { \
|
||||
} while (0)
|
||||
|
||||
static inline int
|
||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
||||
{
|
||||
int oldval = 0, ret, tmp;
|
||||
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
||||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 val, tmp;
|
||||
u32 __user *uaddr;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
uaccess_enable();
|
||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||
" prfm pstl1strm, %2\n"
|
||||
|
@ -123,16 +123,8 @@
|
||||
/*
|
||||
* Initial memory map attributes.
|
||||
*/
|
||||
#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG)
|
||||
#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
|
||||
#else
|
||||
#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS
|
||||
#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS
|
||||
#endif
|
||||
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#if ARM64_SWAPPER_USES_SECTION_MAPS
|
||||
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
|
||||
|
@ -415,4 +415,10 @@ static inline void kvm_arm_vhe_guest_exit(void)
|
||||
{
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
{
|
||||
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
@ -1,27 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_KVM_PSCI_H__
|
||||
#define __ARM64_KVM_PSCI_H__
|
||||
|
||||
#define KVM_ARM_PSCI_0_1 1
|
||||
#define KVM_ARM_PSCI_0_2 2
|
||||
|
||||
int kvm_psci_version(struct kvm_vcpu *vcpu);
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM64_KVM_PSCI_H__ */
|
@ -37,13 +37,11 @@
|
||||
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG)
|
||||
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG)
|
||||
#else
|
||||
#define PROT_DEFAULT _PROT_DEFAULT
|
||||
#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
|
||||
#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
|
||||
|
||||
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
|
||||
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
|
||||
|
||||
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
|
||||
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
@ -55,22 +53,22 @@
|
||||
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG)
|
||||
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
|
||||
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
|
||||
|
||||
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
|
||||
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
|
||||
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
|
||||
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
|
||||
|
||||
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
|
||||
#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
|
||||
|
||||
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
|
||||
|
@ -21,6 +21,9 @@
|
||||
|
||||
#define TASK_SIZE_64 (UL(1) << VA_BITS)
|
||||
|
||||
#define KERNEL_DS UL(-1)
|
||||
#define USER_DS (TASK_SIZE_64 - 1)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
|
@ -87,8 +87,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
" cbnz %w1, 1f\n"
|
||||
" add %w1, %w0, %3\n"
|
||||
" casa %w0, %w1, %2\n"
|
||||
" and %w1, %w1, #0xffff\n"
|
||||
" eor %w1, %w1, %w0, lsr #16\n"
|
||||
" sub %w1, %w1, %3\n"
|
||||
" eor %w1, %w1, %w0\n"
|
||||
"1:")
|
||||
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
|
||||
: "I" (1 << TICKET_SHIFT)
|
||||
|
@ -35,16 +35,20 @@
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/extable.h>
|
||||
|
||||
#define KERNEL_DS (-1UL)
|
||||
#define get_ds() (KERNEL_DS)
|
||||
|
||||
#define USER_DS TASK_SIZE_64
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current_thread_info()->addr_limit = fs;
|
||||
|
||||
/*
|
||||
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||
* the wrong address limit to access_ok under speculation.
|
||||
*/
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
/* On user-mode return, check fs is correct */
|
||||
set_thread_flag(TIF_FSCHECK);
|
||||
|
||||
@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs)
|
||||
* Returns 1 if the range is valid, 0 otherwise.
|
||||
*
|
||||
* This is equivalent to the following test:
|
||||
* (u65)addr + (u65)size <= current->addr_limit
|
||||
*
|
||||
* This needs 65-bit arithmetic.
|
||||
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
||||
*/
|
||||
#define __range_ok(addr, size) \
|
||||
({ \
|
||||
unsigned long __addr = (unsigned long)(addr); \
|
||||
unsigned long flag, roksum; \
|
||||
__chk_user_ptr(addr); \
|
||||
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
|
||||
: "=&r" (flag), "=&r" (roksum) \
|
||||
: "1" (__addr), "Ir" (size), \
|
||||
"r" (current_thread_info()->addr_limit) \
|
||||
: "cc"); \
|
||||
flag; \
|
||||
})
|
||||
static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long limit = current_thread_info()->addr_limit;
|
||||
|
||||
__chk_user_ptr(addr);
|
||||
asm volatile(
|
||||
// A + B <= C + 1 for all A,B,C, in four easy steps:
|
||||
// 1: X = A + B; X' = X % 2^64
|
||||
" adds %0, %0, %2\n"
|
||||
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
|
||||
" csel %1, xzr, %1, hi\n"
|
||||
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
|
||||
// to compensate for the carry flag being set in step 4. For
|
||||
// X > 2^64, X' merely has to remain nonzero, which it does.
|
||||
" csinv %0, %0, xzr, cc\n"
|
||||
// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
|
||||
// comes from the carry in being clear. Otherwise, we are
|
||||
// testing X' - C == 0, subject to the previous adjustments.
|
||||
" sbcs xzr, %0, %1\n"
|
||||
" cset %0, ls\n"
|
||||
: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* When dealing with data aborts, watchpoints, or instruction traps we may end
|
||||
@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs)
|
||||
*/
|
||||
#define untagged_addr(addr) sign_extend64(addr, 55)
|
||||
|
||||
#define access_ok(type, addr, size) __range_ok(addr, size)
|
||||
#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
#define _ASM_EXTABLE(from, to) \
|
||||
@ -220,6 +234,26 @@ static inline void uaccess_enable_not_uao(void)
|
||||
__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the
|
||||
* current addr_limit.
|
||||
*/
|
||||
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
||||
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
{
|
||||
void __user *safe_ptr;
|
||||
|
||||
asm volatile(
|
||||
" bics xzr, %1, %2\n"
|
||||
" csel %0, %1, xzr, eq\n"
|
||||
: "=&r" (safe_ptr)
|
||||
: "r" (ptr), "r" (current_thread_info()->addr_limit)
|
||||
: "cc");
|
||||
|
||||
csdb();
|
||||
return safe_ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* The "__xxx" versions of the user access functions do not verify the address
|
||||
* space - it must have been done previously with a separate "access_ok()"
|
||||
@ -272,28 +306,33 @@ do { \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
#define __get_user_check(x, ptr, err) \
|
||||
({ \
|
||||
int __gu_err = 0; \
|
||||
__get_user_err((x), (ptr), __gu_err); \
|
||||
__gu_err; \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
|
||||
__p = uaccess_mask_ptr(__p); \
|
||||
__get_user_err((x), __p, (err)); \
|
||||
} else { \
|
||||
(x) = 0; (err) = -EFAULT; \
|
||||
} \
|
||||
})
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__get_user_err((x), (ptr), (err)); \
|
||||
__get_user_check((x), (ptr), (err)); \
|
||||
(void)0; \
|
||||
})
|
||||
|
||||
#define get_user(x, ptr) \
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
|
||||
__get_user((x), __p) : \
|
||||
((x) = 0, -EFAULT); \
|
||||
int __gu_err = 0; \
|
||||
__get_user_check((x), (ptr), __gu_err); \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
||||
asm volatile( \
|
||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
||||
@ -336,43 +375,63 @@ do { \
|
||||
uaccess_disable_not_uao(); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user(x, ptr) \
|
||||
#define __put_user_check(x, ptr, err) \
|
||||
({ \
|
||||
int __pu_err = 0; \
|
||||
__put_user_err((x), (ptr), __pu_err); \
|
||||
__pu_err; \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
|
||||
__p = uaccess_mask_ptr(__p); \
|
||||
__put_user_err((x), __p, (err)); \
|
||||
} else { \
|
||||
(err) = -EFAULT; \
|
||||
} \
|
||||
})
|
||||
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__put_user_err((x), (ptr), (err)); \
|
||||
__put_user_check((x), (ptr), (err)); \
|
||||
(void)0; \
|
||||
})
|
||||
|
||||
#define put_user(x, ptr) \
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
|
||||
__put_user((x), __p) : \
|
||||
-EFAULT; \
|
||||
int __pu_err = 0; \
|
||||
__put_user_check((x), (ptr), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define put_user __put_user
|
||||
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_from_user __arch_copy_from_user
|
||||
#define raw_copy_from_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
#define raw_copy_to_user __arch_copy_to_user
|
||||
extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
||||
#define raw_copy_to_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_in_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_in_user(__uaccess_mask_ptr(to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
})
|
||||
|
||||
#define INLINE_COPY_TO_USER
|
||||
#define INLINE_COPY_FROM_USER
|
||||
|
||||
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
|
||||
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __clear_user(to, n);
|
||||
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
||||
return n;
|
||||
}
|
||||
#define clear_user __clear_user
|
||||
|
||||
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
|
||||
@ -386,7 +445,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __
|
||||
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
|
||||
{
|
||||
kasan_check_write(dst, size);
|
||||
return __copy_user_flushcache(dst, src, size);
|
||||
return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
|
||||
/* user mem (segment) */
|
||||
EXPORT_SYMBOL(__arch_copy_from_user);
|
||||
EXPORT_SYMBOL(__arch_copy_to_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(raw_copy_in_user);
|
||||
EXPORT_SYMBOL(__arch_clear_user);
|
||||
EXPORT_SYMBOL(__arch_copy_in_user);
|
||||
|
||||
/* physical memory */
|
||||
EXPORT_SYMBOL(memstart_addr);
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
.macro ventry target
|
||||
.rept 31
|
||||
@ -53,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start)
|
||||
vectors __kvm_hyp_vector
|
||||
.endr
|
||||
ENTRY(__bp_harden_hyp_vecs_end)
|
||||
ENTRY(__psci_hyp_bp_inval_start)
|
||||
sub sp, sp, #(8 * 18)
|
||||
stp x16, x17, [sp, #(16 * 0)]
|
||||
stp x14, x15, [sp, #(16 * 1)]
|
||||
stp x12, x13, [sp, #(16 * 2)]
|
||||
stp x10, x11, [sp, #(16 * 3)]
|
||||
stp x8, x9, [sp, #(16 * 4)]
|
||||
stp x6, x7, [sp, #(16 * 5)]
|
||||
stp x4, x5, [sp, #(16 * 6)]
|
||||
stp x2, x3, [sp, #(16 * 7)]
|
||||
stp x0, x1, [sp, #(16 * 8)]
|
||||
mov x0, #0x84000000
|
||||
smc #0
|
||||
ldp x16, x17, [sp, #(16 * 0)]
|
||||
ldp x14, x15, [sp, #(16 * 1)]
|
||||
ldp x12, x13, [sp, #(16 * 2)]
|
||||
ldp x10, x11, [sp, #(16 * 3)]
|
||||
ldp x8, x9, [sp, #(16 * 4)]
|
||||
ldp x6, x7, [sp, #(16 * 5)]
|
||||
ldp x4, x5, [sp, #(16 * 6)]
|
||||
ldp x2, x3, [sp, #(16 * 7)]
|
||||
ldp x0, x1, [sp, #(16 * 8)]
|
||||
add sp, sp, #(8 * 18)
|
||||
ENTRY(__psci_hyp_bp_inval_end)
|
||||
|
||||
ENTRY(__qcom_hyp_sanitize_link_stack_start)
|
||||
stp x29, x30, [sp, #-16]!
|
||||
@ -85,3 +62,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_start)
|
||||
.endr
|
||||
ldp x29, x30, [sp], #16
|
||||
ENTRY(__qcom_hyp_sanitize_link_stack_end)
|
||||
|
||||
.macro smccc_workaround_1 inst
|
||||
sub sp, sp, #(8 * 4)
|
||||
stp x2, x3, [sp, #(8 * 0)]
|
||||
stp x0, x1, [sp, #(8 * 2)]
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
\inst #0
|
||||
ldp x2, x3, [sp, #(8 * 0)]
|
||||
ldp x0, x1, [sp, #(8 * 2)]
|
||||
add sp, sp, #(8 * 4)
|
||||
.endm
|
||||
|
||||
ENTRY(__smccc_workaround_1_smc_start)
|
||||
smccc_workaround_1 smc
|
||||
ENTRY(__smccc_workaround_1_smc_end)
|
||||
|
||||
ENTRY(__smccc_workaround_1_hvc_start)
|
||||
smccc_workaround_1 hvc
|
||||
ENTRY(__smccc_workaround_1_hvc_end)
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <asm/virt.h>
|
||||
|
||||
.text
|
||||
.pushsection .idmap.text, "ax"
|
||||
.pushsection .idmap.text, "awx"
|
||||
|
||||
/*
|
||||
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
|
||||
|
@ -67,9 +67,12 @@ static int cpu_enable_trap_ctr_access(void *__unused)
|
||||
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
|
||||
extern char __qcom_hyp_sanitize_link_stack_start[];
|
||||
extern char __qcom_hyp_sanitize_link_stack_end[];
|
||||
extern char __smccc_workaround_1_smc_start[];
|
||||
extern char __smccc_workaround_1_smc_end[];
|
||||
extern char __smccc_workaround_1_hvc_start[];
|
||||
extern char __smccc_workaround_1_hvc_end[];
|
||||
|
||||
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
@ -112,10 +115,12 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
spin_unlock(&bp_lock);
|
||||
}
|
||||
#else
|
||||
#define __psci_hyp_bp_inval_start NULL
|
||||
#define __psci_hyp_bp_inval_end NULL
|
||||
#define __qcom_hyp_sanitize_link_stack_start NULL
|
||||
#define __qcom_hyp_sanitize_link_stack_end NULL
|
||||
#define __smccc_workaround_1_smc_start NULL
|
||||
#define __smccc_workaround_1_smc_end NULL
|
||||
#define __smccc_workaround_1_hvc_start NULL
|
||||
#define __smccc_workaround_1_hvc_end NULL
|
||||
|
||||
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
@ -142,17 +147,59 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
|
||||
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
|
||||
}
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/psci.h>
|
||||
|
||||
static int enable_psci_bp_hardening(void *data)
|
||||
static void call_smc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void call_hvc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static int enable_smccc_arch_workaround_1(void *data)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *entry = data;
|
||||
bp_hardening_cb_t cb;
|
||||
void *smccc_start, *smccc_end;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
if (psci_ops.get_version)
|
||||
install_bp_hardening_cb(entry,
|
||||
(bp_hardening_cb_t)psci_ops.get_version,
|
||||
__psci_hyp_bp_inval_start,
|
||||
__psci_hyp_bp_inval_end);
|
||||
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
|
||||
return 0;
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
|
||||
return 0;
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if (res.a0)
|
||||
return 0;
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_hvc_start;
|
||||
smccc_end = __smccc_workaround_1_hvc_end;
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if (res.a0)
|
||||
return 0;
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc_start;
|
||||
smccc_end = __smccc_workaround_1_smc_end;
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -333,22 +380,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
.enable = enable_psci_bp_hardening,
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
.enable = enable_psci_bp_hardening,
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
.enable = enable_psci_bp_hardening,
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
.enable = enable_psci_bp_hardening,
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
@ -362,12 +409,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
.enable = enable_psci_bp_hardening,
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
.enable = enable_psci_bp_hardening,
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
|
@ -856,12 +856,23 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
||||
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
int __unused)
|
||||
{
|
||||
char const *str = "command line option";
|
||||
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
/* Forced on command line? */
|
||||
/*
|
||||
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
||||
* ThunderX leads to apparent I-cache corruption of kernel text, which
|
||||
* ends as well as you might imagine. Don't even try.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
|
||||
str = "ARM64_WORKAROUND_CAVIUM_27456";
|
||||
__kpti_forced = -1;
|
||||
}
|
||||
|
||||
/* Forced? */
|
||||
if (__kpti_forced) {
|
||||
pr_info_once("kernel page table isolation forced %s by command line option\n",
|
||||
__kpti_forced > 0 ? "ON" : "OFF");
|
||||
pr_info_once("kernel page table isolation forced %s by %s\n",
|
||||
__kpti_forced > 0 ? "ON" : "OFF", str);
|
||||
return __kpti_forced > 0;
|
||||
}
|
||||
|
||||
@ -881,6 +892,30 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
ID_AA64PFR0_CSV3_SHIFT);
|
||||
}
|
||||
|
||||
static int kpti_install_ng_mappings(void *__unused)
|
||||
{
|
||||
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
|
||||
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
|
||||
kpti_remap_fn *remap_fn;
|
||||
|
||||
static bool kpti_applied = false;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (kpti_applied)
|
||||
return 0;
|
||||
|
||||
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
|
||||
|
||||
cpu_install_idmap();
|
||||
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
|
||||
cpu_uninstall_idmap();
|
||||
|
||||
if (!cpu)
|
||||
kpti_applied = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init parse_kpti(char *str)
|
||||
{
|
||||
bool enabled;
|
||||
@ -1004,6 +1039,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
|
||||
.def_scope = SCOPE_SYSTEM,
|
||||
.matches = unmap_kernel_at_el0,
|
||||
.enable = kpti_install_ng_mappings,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
|
@ -167,10 +167,10 @@ alternative_else_nop_endif
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
get_thread_info tsk
|
||||
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
|
||||
/* Save the task's original addr_limit and set USER_DS */
|
||||
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #TASK_SIZE_64
|
||||
mov x20, #USER_DS
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
|
||||
.endif /* \el == 0 */
|
||||
@ -382,6 +382,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
* x7 is reserved for the system call number in 32-bit mode.
|
||||
*/
|
||||
wsc_nr .req w25 // number of system calls
|
||||
xsc_nr .req x25 // number of system calls (zero-extended)
|
||||
wscno .req w26 // syscall number
|
||||
xscno .req x26 // syscall number (zero-extended)
|
||||
stbl .req x27 // syscall table pointer
|
||||
@ -770,7 +771,10 @@ el0_sp_pc:
|
||||
* Stack or PC alignment exception handling
|
||||
*/
|
||||
mrs x26, far_el1
|
||||
enable_daif
|
||||
enable_da_f
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
ct_user_exit
|
||||
mov x0, x26
|
||||
mov x1, x25
|
||||
@ -828,6 +832,11 @@ el0_irq_naked:
|
||||
#endif
|
||||
|
||||
ct_user_exit
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
tbz x22, #55, 1f
|
||||
bl do_el0_irq_bp_hardening
|
||||
1:
|
||||
#endif
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
@ -939,6 +948,7 @@ el0_svc_naked: // compat entry point
|
||||
b.ne __sys_trace
|
||||
cmp wscno, wsc_nr // check upper syscall limit
|
||||
b.hs ni_sys
|
||||
mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
|
||||
ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
|
||||
blr x16 // call sys_* routine
|
||||
b ret_fast_syscall
|
||||
@ -1017,16 +1027,9 @@ alternative_else_nop_endif
|
||||
orr \tmp, \tmp, #USER_ASID_FLAG
|
||||
msr ttbr1_el1, \tmp
|
||||
/*
|
||||
* We avoid running the post_ttbr_update_workaround here because the
|
||||
* user and kernel ASIDs don't have conflicting mappings, so any
|
||||
* "blessing" as described in:
|
||||
*
|
||||
* http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
|
||||
*
|
||||
* will not hurt correctness. Whilst this may partially defeat the
|
||||
* point of using split ASIDs in the first place, it avoids
|
||||
* the hit of invalidating the entire I-cache on every return to
|
||||
* userspace.
|
||||
* We avoid running the post_ttbr_update_workaround here because
|
||||
* it's only needed by Cavium ThunderX, which requires KPTI to be
|
||||
* disabled.
|
||||
*/
|
||||
.endm
|
||||
|
||||
|
@ -147,26 +147,6 @@ preserve_boot_args:
|
||||
b __inval_dcache_area // tail call
|
||||
ENDPROC(preserve_boot_args)
|
||||
|
||||
/*
|
||||
* Macro to arrange a physical address in a page table entry, taking care of
|
||||
* 52-bit addresses.
|
||||
*
|
||||
* Preserves: phys
|
||||
* Returns: pte
|
||||
*/
|
||||
.macro phys_to_pte, phys, pte
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
/*
|
||||
* We assume \phys is 64K aligned and this is guaranteed by only
|
||||
* supporting this configuration with 64K pages.
|
||||
*/
|
||||
orr \pte, \phys, \phys, lsr #36
|
||||
and \pte, \pte, #PTE_ADDR_MASK
|
||||
#else
|
||||
mov \pte, \phys
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Macro to create a table entry to the next page.
|
||||
*
|
||||
@ -181,7 +161,7 @@ ENDPROC(preserve_boot_args)
|
||||
*/
|
||||
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
|
||||
add \tmp1, \tbl, #PAGE_SIZE
|
||||
phys_to_pte \tmp1, \tmp2
|
||||
phys_to_pte \tmp2, \tmp1
|
||||
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
|
||||
lsr \tmp1, \virt, #\shift
|
||||
sub \ptrs, \ptrs, #1
|
||||
@ -207,7 +187,7 @@ ENDPROC(preserve_boot_args)
|
||||
* Returns: rtbl
|
||||
*/
|
||||
.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
|
||||
.Lpe\@: phys_to_pte \rtbl, \tmp1
|
||||
.Lpe\@: phys_to_pte \tmp1, \rtbl
|
||||
orr \tmp1, \tmp1, \flags // tmp1 = table entry
|
||||
str \tmp1, [\tbl, \index, lsl #3]
|
||||
add \rtbl, \rtbl, \inc // rtbl = pa next level
|
||||
@ -475,7 +455,7 @@ ENDPROC(__primary_switched)
|
||||
* end early head section, begin head code that is also used for
|
||||
* hotplug and needs to have the same protections as the text region
|
||||
*/
|
||||
.section ".idmap.text","ax"
|
||||
.section ".idmap.text","awx"
|
||||
|
||||
ENTRY(kimage_vaddr)
|
||||
.quad _text - TEXT_OFFSET
|
||||
@ -776,8 +756,8 @@ ENTRY(__enable_mmu)
|
||||
update_early_cpu_boot_status 0, x1, x2
|
||||
adrp x1, idmap_pg_dir
|
||||
adrp x2, swapper_pg_dir
|
||||
phys_to_ttbr x1, x3
|
||||
phys_to_ttbr x2, x4
|
||||
phys_to_ttbr x3, x1
|
||||
phys_to_ttbr x4, x2
|
||||
msr ttbr0_el1, x3 // load TTBR0
|
||||
msr ttbr1_el1, x4 // load TTBR1
|
||||
isb
|
||||
|
@ -34,12 +34,12 @@
|
||||
* each stage of the walk.
|
||||
*/
|
||||
.macro break_before_make_ttbr_switch zero_page, page_table, tmp
|
||||
phys_to_ttbr \zero_page, \tmp
|
||||
phys_to_ttbr \tmp, \zero_page
|
||||
msr ttbr1_el1, \tmp
|
||||
isb
|
||||
tlbi vmalle1
|
||||
dsb nsh
|
||||
phys_to_ttbr \page_table, \tmp
|
||||
phys_to_ttbr \tmp, \page_table
|
||||
msr ttbr1_el1, \tmp
|
||||
isb
|
||||
.endm
|
||||
|
@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter)
|
||||
ret
|
||||
ENDPROC(__cpu_suspend_enter)
|
||||
|
||||
.pushsection ".idmap.text", "ax"
|
||||
.pushsection ".idmap.text", "awx"
|
||||
ENTRY(cpu_resume)
|
||||
bl el2_setup // if in EL2 drop to EL1 cleanly
|
||||
bl __cpu_setup
|
||||
|
@ -22,13 +22,14 @@
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
#include <asm/esr.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
@ -51,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
kvm_vcpu_hvc_get_imm(vcpu));
|
||||
vcpu->stat.hvc_exit_stat++;
|
||||
|
||||
ret = kvm_psci_call(vcpu);
|
||||
ret = kvm_hvc_call_handler(vcpu);
|
||||
if (ret < 0) {
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
@ -62,7 +63,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
/*
|
||||
* "If an SMC instruction executed at Non-secure EL1 is
|
||||
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
|
||||
* Trap exception, not a Secure Monitor Call exception [...]"
|
||||
*
|
||||
* We need to advance the PC after the trap, as it would
|
||||
* otherwise return to the same address...
|
||||
*/
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ __do_hyp_init:
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.lo __kvm_handle_stub_hvc
|
||||
|
||||
phys_to_ttbr x0, x4
|
||||
phys_to_ttbr x4, x0
|
||||
msr ttbr0_el2, x4
|
||||
|
||||
mrs x4, tcr_el1
|
||||
|
@ -15,6 +15,7 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
@ -64,10 +65,11 @@ alternative_endif
|
||||
lsr x0, x1, #ESR_ELx_EC_SHIFT
|
||||
|
||||
cmp x0, #ESR_ELx_EC_HVC64
|
||||
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
|
||||
b.ne el1_trap
|
||||
|
||||
mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
|
||||
cbnz x1, el1_trap // called HVC
|
||||
mrs x1, vttbr_el2 // If vttbr is valid, the guest
|
||||
cbnz x1, el1_hvc_guest // called HVC
|
||||
|
||||
/* Here, we're pretty sure the host called HVC. */
|
||||
ldp x0, x1, [sp], #16
|
||||
@ -100,6 +102,20 @@ alternative_endif
|
||||
|
||||
eret
|
||||
|
||||
el1_hvc_guest:
|
||||
/*
|
||||
* Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
|
||||
* The workaround has already been applied on the host,
|
||||
* so let's quickly get back to the guest. We don't bother
|
||||
* restoring x1, as it can be clobbered anyway.
|
||||
*/
|
||||
ldr x1, [sp] // Guest's x0
|
||||
eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
cbnz w1, el1_trap
|
||||
mov x0, x1
|
||||
add sp, sp, #16
|
||||
eret
|
||||
|
||||
el1_trap:
|
||||
/*
|
||||
* x0: ESR_EC
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include <uapi/linux/psci.h>
|
||||
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
@ -348,18 +350,6 @@ again:
|
||||
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
|
||||
goto again;
|
||||
|
||||
if (exit_code == ARM_EXCEPTION_TRAP &&
|
||||
(kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
|
||||
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
|
||||
vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
|
||||
u64 val = PSCI_RET_NOT_SUPPORTED;
|
||||
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
|
||||
val = 2;
|
||||
|
||||
vcpu_set_reg(vcpu, 0, val);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
|
||||
exit_code == ARM_EXCEPTION_TRAP) {
|
||||
bool valid;
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
.text
|
||||
|
||||
/* Prototype: int __clear_user(void *addr, size_t sz)
|
||||
/* Prototype: int __arch_clear_user(void *addr, size_t sz)
|
||||
* Purpose : clear some user memory
|
||||
* Params : addr - user memory address to clear
|
||||
* : sz - number of bytes to clear
|
||||
@ -29,7 +29,7 @@
|
||||
*
|
||||
* Alignment fixed up by hardware.
|
||||
*/
|
||||
ENTRY(__clear_user)
|
||||
ENTRY(__arch_clear_user)
|
||||
uaccess_enable_not_uao x2, x3, x4
|
||||
mov x2, x1 // save the size for fixup return
|
||||
subs x1, x1, #8
|
||||
@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
|
||||
5: mov x0, #0
|
||||
uaccess_disable_not_uao x2, x3
|
||||
ret
|
||||
ENDPROC(__clear_user)
|
||||
ENDPROC(__arch_clear_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 2
|
||||
|
@ -64,14 +64,15 @@
|
||||
.endm
|
||||
|
||||
end .req x5
|
||||
ENTRY(raw_copy_in_user)
|
||||
|
||||
ENTRY(__arch_copy_in_user)
|
||||
uaccess_enable_not_uao x3, x4, x5
|
||||
add end, x0, x2
|
||||
#include "copy_template.S"
|
||||
uaccess_disable_not_uao x3, x4
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(raw_copy_in_user)
|
||||
ENDPROC(__arch_copy_in_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 2
|
||||
|
@ -240,7 +240,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
|
||||
if (fsc_type == ESR_ELx_FSC_PERM)
|
||||
return true;
|
||||
|
||||
if (addr < USER_DS && system_uses_ttbr0_pan())
|
||||
if (addr < TASK_SIZE && system_uses_ttbr0_pan())
|
||||
return fsc_type == ESR_ELx_FSC_FAULT &&
|
||||
(regs->pstate & PSR_PAN_BIT);
|
||||
|
||||
@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
mm_flags |= FAULT_FLAG_WRITE;
|
||||
}
|
||||
|
||||
if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
|
||||
if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
|
||||
@ -707,6 +707,12 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
|
||||
arm64_notify_die("", regs, &info, esr);
|
||||
}
|
||||
|
||||
asmlinkage void __exception do_el0_irq_bp_hardening(void)
|
||||
{
|
||||
/* PC has already been checked in entry.S */
|
||||
arm64_apply_bp_hardening();
|
||||
}
|
||||
|
||||
asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
|
||||
unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
@ -731,6 +737,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
|
||||
struct siginfo info;
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
if (instruction_pointer(regs) > TASK_SIZE)
|
||||
arm64_apply_bp_hardening();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
|
||||
pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
|
||||
tsk->comm, task_pid_nr(tsk),
|
||||
@ -790,6 +802,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
|
||||
if (interrupts_enabled(regs))
|
||||
trace_hardirqs_off();
|
||||
|
||||
if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
if (!inf->fn(addr, esr, regs)) {
|
||||
rv = 1;
|
||||
} else {
|
||||
|
@ -118,6 +118,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
|
||||
if ((old | new) & PTE_CONT)
|
||||
return false;
|
||||
|
||||
/* Transitioning from Global to Non-Global is safe */
|
||||
if (((old ^ new) == PTE_NG) && (new & PTE_NG))
|
||||
return true;
|
||||
|
||||
return ((old ^ new) & ~mask) == 0;
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ ENDPROC(cpu_do_suspend)
|
||||
*
|
||||
* x0: Address of context pointer
|
||||
*/
|
||||
.pushsection ".idmap.text", "ax"
|
||||
.pushsection ".idmap.text", "awx"
|
||||
ENTRY(cpu_do_resume)
|
||||
ldp x2, x3, [x0]
|
||||
ldp x4, x5, [x0, #16]
|
||||
@ -153,7 +153,7 @@ ENDPROC(cpu_do_resume)
|
||||
ENTRY(cpu_do_switch_mm)
|
||||
mrs x2, ttbr1_el1
|
||||
mmid x1, x1 // get mm->context.id
|
||||
phys_to_ttbr x0, x3
|
||||
phys_to_ttbr x3, x0
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
bfi x3, x1, #48, #16 // set the ASID field in TTBR0
|
||||
#endif
|
||||
@ -165,7 +165,18 @@ ENTRY(cpu_do_switch_mm)
|
||||
b post_ttbr_update_workaround // Back to C code...
|
||||
ENDPROC(cpu_do_switch_mm)
|
||||
|
||||
.pushsection ".idmap.text", "ax"
|
||||
.pushsection ".idmap.text", "awx"
|
||||
|
||||
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
|
||||
adrp \tmp1, empty_zero_page
|
||||
phys_to_ttbr \tmp2, \tmp1
|
||||
msr ttbr1_el1, \tmp2
|
||||
isb
|
||||
tlbi vmalle1
|
||||
dsb nsh
|
||||
isb
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
|
||||
*
|
||||
@ -175,16 +186,9 @@ ENDPROC(cpu_do_switch_mm)
|
||||
ENTRY(idmap_cpu_replace_ttbr1)
|
||||
save_and_disable_daif flags=x2
|
||||
|
||||
adrp x1, empty_zero_page
|
||||
phys_to_ttbr x1, x3
|
||||
msr ttbr1_el1, x3
|
||||
isb
|
||||
__idmap_cpu_set_reserved_ttbr1 x1, x3
|
||||
|
||||
tlbi vmalle1
|
||||
dsb nsh
|
||||
isb
|
||||
|
||||
phys_to_ttbr x0, x3
|
||||
phys_to_ttbr x3, x0
|
||||
msr ttbr1_el1, x3
|
||||
isb
|
||||
|
||||
@ -194,13 +198,197 @@ ENTRY(idmap_cpu_replace_ttbr1)
|
||||
ENDPROC(idmap_cpu_replace_ttbr1)
|
||||
.popsection
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
.pushsection ".idmap.text", "awx"
|
||||
|
||||
.macro __idmap_kpti_get_pgtable_ent, type
|
||||
dc cvac, cur_\()\type\()p // Ensure any existing dirty
|
||||
dmb sy // lines are written back before
|
||||
ldr \type, [cur_\()\type\()p] // loading the entry
|
||||
tbz \type, #0, next_\()\type // Skip invalid entries
|
||||
.endm
|
||||
|
||||
.macro __idmap_kpti_put_pgtable_ent_ng, type
|
||||
orr \type, \type, #PTE_NG // Same bit for blocks and pages
|
||||
str \type, [cur_\()\type\()p] // Update the entry and ensure it
|
||||
dc civac, cur_\()\type\()p // is visible to all CPUs.
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
|
||||
*
|
||||
* Called exactly once from stop_machine context by each CPU found during boot.
|
||||
*/
|
||||
__idmap_kpti_flag:
|
||||
.long 1
|
||||
ENTRY(idmap_kpti_install_ng_mappings)
|
||||
cpu .req w0
|
||||
num_cpus .req w1
|
||||
swapper_pa .req x2
|
||||
swapper_ttb .req x3
|
||||
flag_ptr .req x4
|
||||
cur_pgdp .req x5
|
||||
end_pgdp .req x6
|
||||
pgd .req x7
|
||||
cur_pudp .req x8
|
||||
end_pudp .req x9
|
||||
pud .req x10
|
||||
cur_pmdp .req x11
|
||||
end_pmdp .req x12
|
||||
pmd .req x13
|
||||
cur_ptep .req x14
|
||||
end_ptep .req x15
|
||||
pte .req x16
|
||||
|
||||
mrs swapper_ttb, ttbr1_el1
|
||||
adr flag_ptr, __idmap_kpti_flag
|
||||
|
||||
cbnz cpu, __idmap_kpti_secondary
|
||||
|
||||
/* We're the boot CPU. Wait for the others to catch up */
|
||||
sevl
|
||||
1: wfe
|
||||
ldaxr w18, [flag_ptr]
|
||||
eor w18, w18, num_cpus
|
||||
cbnz w18, 1b
|
||||
|
||||
/* We need to walk swapper, so turn off the MMU. */
|
||||
pre_disable_mmu_workaround
|
||||
mrs x18, sctlr_el1
|
||||
bic x18, x18, #SCTLR_ELx_M
|
||||
msr sctlr_el1, x18
|
||||
isb
|
||||
|
||||
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
|
||||
/* PGD */
|
||||
mov cur_pgdp, swapper_pa
|
||||
add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
|
||||
do_pgd: __idmap_kpti_get_pgtable_ent pgd
|
||||
tbnz pgd, #1, walk_puds
|
||||
__idmap_kpti_put_pgtable_ent_ng pgd
|
||||
next_pgd:
|
||||
add cur_pgdp, cur_pgdp, #8
|
||||
cmp cur_pgdp, end_pgdp
|
||||
b.ne do_pgd
|
||||
|
||||
/* Publish the updated tables and nuke all the TLBs */
|
||||
dsb sy
|
||||
tlbi vmalle1is
|
||||
dsb ish
|
||||
isb
|
||||
|
||||
/* We're done: fire up the MMU again */
|
||||
mrs x18, sctlr_el1
|
||||
orr x18, x18, #SCTLR_ELx_M
|
||||
msr sctlr_el1, x18
|
||||
isb
|
||||
|
||||
/* Set the flag to zero to indicate that we're all done */
|
||||
str wzr, [flag_ptr]
|
||||
ret
|
||||
|
||||
/* PUD */
|
||||
walk_puds:
|
||||
.if CONFIG_PGTABLE_LEVELS > 3
|
||||
pte_to_phys cur_pudp, pgd
|
||||
add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
|
||||
do_pud: __idmap_kpti_get_pgtable_ent pud
|
||||
tbnz pud, #1, walk_pmds
|
||||
__idmap_kpti_put_pgtable_ent_ng pud
|
||||
next_pud:
|
||||
add cur_pudp, cur_pudp, 8
|
||||
cmp cur_pudp, end_pudp
|
||||
b.ne do_pud
|
||||
b next_pgd
|
||||
.else /* CONFIG_PGTABLE_LEVELS <= 3 */
|
||||
mov pud, pgd
|
||||
b walk_pmds
|
||||
next_pud:
|
||||
b next_pgd
|
||||
.endif
|
||||
|
||||
/* PMD */
|
||||
walk_pmds:
|
||||
.if CONFIG_PGTABLE_LEVELS > 2
|
||||
pte_to_phys cur_pmdp, pud
|
||||
add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
|
||||
do_pmd: __idmap_kpti_get_pgtable_ent pmd
|
||||
tbnz pmd, #1, walk_ptes
|
||||
__idmap_kpti_put_pgtable_ent_ng pmd
|
||||
next_pmd:
|
||||
add cur_pmdp, cur_pmdp, #8
|
||||
cmp cur_pmdp, end_pmdp
|
||||
b.ne do_pmd
|
||||
b next_pud
|
||||
.else /* CONFIG_PGTABLE_LEVELS <= 2 */
|
||||
mov pmd, pud
|
||||
b walk_ptes
|
||||
next_pmd:
|
||||
b next_pud
|
||||
.endif
|
||||
|
||||
/* PTE */
|
||||
walk_ptes:
|
||||
pte_to_phys cur_ptep, pmd
|
||||
add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
|
||||
do_pte: __idmap_kpti_get_pgtable_ent pte
|
||||
__idmap_kpti_put_pgtable_ent_ng pte
|
||||
next_pte:
|
||||
add cur_ptep, cur_ptep, #8
|
||||
cmp cur_ptep, end_ptep
|
||||
b.ne do_pte
|
||||
b next_pmd
|
||||
|
||||
/* Secondary CPUs end up here */
|
||||
__idmap_kpti_secondary:
|
||||
/* Uninstall swapper before surgery begins */
|
||||
__idmap_cpu_set_reserved_ttbr1 x18, x17
|
||||
|
||||
/* Increment the flag to let the boot CPU we're ready */
|
||||
1: ldxr w18, [flag_ptr]
|
||||
add w18, w18, #1
|
||||
stxr w17, w18, [flag_ptr]
|
||||
cbnz w17, 1b
|
||||
|
||||
/* Wait for the boot CPU to finish messing around with swapper */
|
||||
sevl
|
||||
1: wfe
|
||||
ldxr w18, [flag_ptr]
|
||||
cbnz w18, 1b
|
||||
|
||||
/* All done, act like nothing happened */
|
||||
msr ttbr1_el1, swapper_ttb
|
||||
isb
|
||||
ret
|
||||
|
||||
.unreq cpu
|
||||
.unreq num_cpus
|
||||
.unreq swapper_pa
|
||||
.unreq swapper_ttb
|
||||
.unreq flag_ptr
|
||||
.unreq cur_pgdp
|
||||
.unreq end_pgdp
|
||||
.unreq pgd
|
||||
.unreq cur_pudp
|
||||
.unreq end_pudp
|
||||
.unreq pud
|
||||
.unreq cur_pmdp
|
||||
.unreq end_pmdp
|
||||
.unreq pmd
|
||||
.unreq cur_ptep
|
||||
.unreq end_ptep
|
||||
.unreq pte
|
||||
ENDPROC(idmap_kpti_install_ng_mappings)
|
||||
.popsection
|
||||
#endif
|
||||
|
||||
/*
|
||||
* __cpu_setup
|
||||
*
|
||||
* Initialise the processor for turning the MMU on. Return in x0 the
|
||||
* value of the SCTLR_EL1 register.
|
||||
*/
|
||||
.pushsection ".idmap.text", "ax"
|
||||
.pushsection ".idmap.text", "awx"
|
||||
ENTRY(__cpu_setup)
|
||||
tlbi vmalle1 // Invalidate local TLB
|
||||
dsb nsh
|
||||
|
@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu)
|
||||
return cpu == resident_cpu;
|
||||
}
|
||||
|
||||
struct psci_operations psci_ops;
|
||||
struct psci_operations psci_ops = {
|
||||
.conduit = PSCI_CONDUIT_NONE,
|
||||
.smccc_version = SMCCC_VERSION_1_0,
|
||||
};
|
||||
|
||||
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void)
|
||||
0, 0, 0);
|
||||
}
|
||||
|
||||
static void set_conduit(enum psci_conduit conduit)
|
||||
{
|
||||
switch (conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
invoke_psci_fn = __invoke_psci_fn_hvc;
|
||||
break;
|
||||
case PSCI_CONDUIT_SMC:
|
||||
invoke_psci_fn = __invoke_psci_fn_smc;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unexpected PSCI conduit %d\n", conduit);
|
||||
}
|
||||
|
||||
psci_ops.conduit = conduit;
|
||||
}
|
||||
|
||||
static int get_set_conduit_method(struct device_node *np)
|
||||
{
|
||||
const char *method;
|
||||
@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np)
|
||||
}
|
||||
|
||||
if (!strcmp("hvc", method)) {
|
||||
invoke_psci_fn = __invoke_psci_fn_hvc;
|
||||
set_conduit(PSCI_CONDUIT_HVC);
|
||||
} else if (!strcmp("smc", method)) {
|
||||
invoke_psci_fn = __invoke_psci_fn_smc;
|
||||
set_conduit(PSCI_CONDUIT_SMC);
|
||||
} else {
|
||||
pr_warn("invalid \"method\" property: %s\n", method);
|
||||
return -EINVAL;
|
||||
@ -493,6 +512,31 @@ static void __init psci_init_migrate(void)
|
||||
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
|
||||
}
|
||||
|
||||
static void __init psci_init_smccc(void)
|
||||
{
|
||||
u32 ver = ARM_SMCCC_VERSION_1_0;
|
||||
int feature;
|
||||
|
||||
feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
|
||||
|
||||
if (feature != PSCI_RET_NOT_SUPPORTED) {
|
||||
u32 ret;
|
||||
ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
|
||||
if (ret == ARM_SMCCC_VERSION_1_1) {
|
||||
psci_ops.smccc_version = SMCCC_VERSION_1_1;
|
||||
ver = ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Conveniently, the SMCCC and PSCI versions are encoded the
|
||||
* same way. No, this isn't accidental.
|
||||
*/
|
||||
pr_info("SMC Calling Convention v%d.%d\n",
|
||||
PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
|
||||
|
||||
}
|
||||
|
||||
static void __init psci_0_2_set_functions(void)
|
||||
{
|
||||
pr_info("Using standard PSCI v0.2 function IDs\n");
|
||||
@ -541,6 +585,7 @@ static int __init psci_probe(void)
|
||||
psci_init_migrate();
|
||||
|
||||
if (PSCI_VERSION_MAJOR(ver) >= 1) {
|
||||
psci_init_smccc();
|
||||
psci_init_cpu_suspend();
|
||||
psci_init_system_suspend();
|
||||
}
|
||||
@ -654,9 +699,9 @@ int __init psci_acpi_init(void)
|
||||
pr_info("probing for conduit method from ACPI.\n");
|
||||
|
||||
if (acpi_psci_use_hvc())
|
||||
invoke_psci_fn = __invoke_psci_fn_hvc;
|
||||
set_conduit(PSCI_CONDUIT_HVC);
|
||||
else
|
||||
invoke_psci_fn = __invoke_psci_fn_smc;
|
||||
set_conduit(PSCI_CONDUIT_SMC);
|
||||
|
||||
return psci_probe();
|
||||
}
|
||||
|
51
include/kvm/arm_psci.h
Normal file
51
include/kvm/arm_psci.h
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __KVM_ARM_PSCI_H__
|
||||
#define __KVM_ARM_PSCI_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <uapi/linux/psci.h>
|
||||
|
||||
#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
|
||||
#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
|
||||
#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
|
||||
|
||||
#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
|
||||
|
||||
/*
|
||||
* We need the KVM pointer independently from the vcpu as we can call
|
||||
* this from HYP, and need to apply kern_hyp_va on it...
|
||||
*/
|
||||
static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Our PSCI implementation stays the same across versions from
|
||||
* v0.2 onward, only adding the few mandatory functions (such
|
||||
* as FEATURES with 1.0) that are required by newer
|
||||
* revisions. It is thus safe to return the latest.
|
||||
*/
|
||||
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
|
||||
return KVM_ARM_PSCI_LATEST;
|
||||
|
||||
return KVM_ARM_PSCI_0_1;
|
||||
}
|
||||
|
||||
|
||||
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __KVM_ARM_PSCI_H__ */
|
@ -14,14 +14,16 @@
|
||||
#ifndef __LINUX_ARM_SMCCC_H
|
||||
#define __LINUX_ARM_SMCCC_H
|
||||
|
||||
#include <uapi/linux/const.h>
|
||||
|
||||
/*
|
||||
* This file provides common defines for ARM SMC Calling Convention as
|
||||
* specified in
|
||||
* http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
|
||||
*/
|
||||
|
||||
#define ARM_SMCCC_STD_CALL 0
|
||||
#define ARM_SMCCC_FAST_CALL 1
|
||||
#define ARM_SMCCC_STD_CALL _AC(0,U)
|
||||
#define ARM_SMCCC_FAST_CALL _AC(1,U)
|
||||
#define ARM_SMCCC_TYPE_SHIFT 31
|
||||
|
||||
#define ARM_SMCCC_SMC_32 0
|
||||
@ -60,6 +62,24 @@
|
||||
#define ARM_SMCCC_QUIRK_NONE 0
|
||||
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
|
||||
|
||||
#define ARM_SMCCC_VERSION_1_0 0x10000
|
||||
#define ARM_SMCCC_VERSION_1_1 0x10001
|
||||
|
||||
#define ARM_SMCCC_VERSION_FUNC_ID \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0)
|
||||
|
||||
#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 1)
|
||||
|
||||
#define ARM_SMCCC_ARCH_WORKAROUND_1 \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x8000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
|
||||
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
|
||||
|
||||
/* SMCCC v1.1 implementation madness follows */
|
||||
#ifdef CONFIG_ARM64
|
||||
|
||||
#define SMCCC_SMC_INST "smc #0"
|
||||
#define SMCCC_HVC_INST "hvc #0"
|
||||
|
||||
#elif defined(CONFIG_ARM)
|
||||
#include <asm/opcodes-sec.h>
|
||||
#include <asm/opcodes-virt.h>
|
||||
|
||||
#define SMCCC_SMC_INST __SMC(0)
|
||||
#define SMCCC_HVC_INST __HVC(0)
|
||||
|
||||
#endif
|
||||
|
||||
#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
|
||||
|
||||
#define __count_args(...) \
|
||||
___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
|
||||
#define __constraint_write_0 \
|
||||
"+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
|
||||
#define __constraint_write_1 \
|
||||
"+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
|
||||
#define __constraint_write_2 \
|
||||
"+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
|
||||
#define __constraint_write_3 \
|
||||
"+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
|
||||
#define __constraint_write_4 __constraint_write_3
|
||||
#define __constraint_write_5 __constraint_write_4
|
||||
#define __constraint_write_6 __constraint_write_5
|
||||
#define __constraint_write_7 __constraint_write_6
|
||||
|
||||
#define __constraint_read_0
|
||||
#define __constraint_read_1
|
||||
#define __constraint_read_2
|
||||
#define __constraint_read_3
|
||||
#define __constraint_read_4 "r" (r4)
|
||||
#define __constraint_read_5 __constraint_read_4, "r" (r5)
|
||||
#define __constraint_read_6 __constraint_read_5, "r" (r6)
|
||||
#define __constraint_read_7 __constraint_read_6, "r" (r7)
|
||||
|
||||
#define __declare_arg_0(a0, res) \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register u32 r0 asm("r0") = a0; \
|
||||
register unsigned long r1 asm("r1"); \
|
||||
register unsigned long r2 asm("r2"); \
|
||||
register unsigned long r3 asm("r3")
|
||||
|
||||
#define __declare_arg_1(a0, a1, res) \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register u32 r0 asm("r0") = a0; \
|
||||
register typeof(a1) r1 asm("r1") = a1; \
|
||||
register unsigned long r2 asm("r2"); \
|
||||
register unsigned long r3 asm("r3")
|
||||
|
||||
#define __declare_arg_2(a0, a1, a2, res) \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register u32 r0 asm("r0") = a0; \
|
||||
register typeof(a1) r1 asm("r1") = a1; \
|
||||
register typeof(a2) r2 asm("r2") = a2; \
|
||||
register unsigned long r3 asm("r3")
|
||||
|
||||
#define __declare_arg_3(a0, a1, a2, a3, res) \
|
||||
struct arm_smccc_res *___res = res; \
|
||||
register u32 r0 asm("r0") = a0; \
|
||||
register typeof(a1) r1 asm("r1") = a1; \
|
||||
register typeof(a2) r2 asm("r2") = a2; \
|
||||
register typeof(a3) r3 asm("r3") = a3
|
||||
|
||||
#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
|
||||
__declare_arg_3(a0, a1, a2, a3, res); \
|
||||
register typeof(a4) r4 asm("r4") = a4
|
||||
|
||||
#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
|
||||
__declare_arg_4(a0, a1, a2, a3, a4, res); \
|
||||
register typeof(a5) r5 asm("r5") = a5
|
||||
|
||||
#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
|
||||
__declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
|
||||
register typeof(a6) r6 asm("r6") = a6
|
||||
|
||||
#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
|
||||
__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
|
||||
register typeof(a7) r7 asm("r7") = a7
|
||||
|
||||
#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
|
||||
#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
|
||||
|
||||
#define ___constraints(count) \
|
||||
: __constraint_write_ ## count \
|
||||
: __constraint_read_ ## count \
|
||||
: "memory"
|
||||
#define __constraints(count) ___constraints(count)
|
||||
|
||||
/*
|
||||
* We have an output list that is not necessarily used, and GCC feels
|
||||
* entitled to optimise the whole sequence away. "volatile" is what
|
||||
* makes it stick.
|
||||
*/
|
||||
#define __arm_smccc_1_1(inst, ...) \
|
||||
do { \
|
||||
__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
|
||||
asm volatile(inst "\n" \
|
||||
__constraints(__count_args(__VA_ARGS__))); \
|
||||
if (___res) \
|
||||
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
|
||||
*
|
||||
* This is a variadic macro taking one to eight source arguments, and
|
||||
* an optional return structure.
|
||||
*
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
*
|
||||
* This macro is used to make SMC calls following SMC Calling Convention v1.1.
|
||||
* The content of the supplied param are copied to registers 0 to 7 prior
|
||||
* to the SMC instruction. The return values are updated with the content
|
||||
* from register 0 to 3 on return from the SMC instruction if not NULL.
|
||||
*/
|
||||
#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
|
||||
|
||||
/*
|
||||
* arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
|
||||
*
|
||||
* This is a variadic macro taking one to eight source arguments, and
|
||||
* an optional return structure.
|
||||
*
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
*
|
||||
* This macro is used to make HVC calls following SMC Calling Convention v1.1.
|
||||
* The content of the supplied param are copied to registers 0 to 7 prior
|
||||
* to the HVC instruction. The return values are updated with the content
|
||||
* from register 0 to 3 on return from the HVC instruction if not NULL.
|
||||
*/
|
||||
#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
#endif /*__LINUX_ARM_SMCCC_H*/
|
||||
|
@ -25,6 +25,17 @@ bool psci_tos_resident_on(int cpu);
|
||||
int psci_cpu_init_idle(unsigned int cpu);
|
||||
int psci_cpu_suspend_enter(unsigned long index);
|
||||
|
||||
enum psci_conduit {
|
||||
PSCI_CONDUIT_NONE,
|
||||
PSCI_CONDUIT_SMC,
|
||||
PSCI_CONDUIT_HVC,
|
||||
};
|
||||
|
||||
enum smccc_version {
|
||||
SMCCC_VERSION_1_0,
|
||||
SMCCC_VERSION_1_1,
|
||||
};
|
||||
|
||||
struct psci_operations {
|
||||
u32 (*get_version)(void);
|
||||
int (*cpu_suspend)(u32 state, unsigned long entry_point);
|
||||
@ -34,6 +45,8 @@ struct psci_operations {
|
||||
int (*affinity_info)(unsigned long target_affinity,
|
||||
unsigned long lowest_affinity_level);
|
||||
int (*migrate_info_type)(void);
|
||||
enum psci_conduit conduit;
|
||||
enum smccc_version smccc_version;
|
||||
};
|
||||
|
||||
extern struct psci_operations psci_ops;
|
||||
|
@ -88,6 +88,9 @@
|
||||
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
|
||||
#define PSCI_VERSION_MINOR(ver) \
|
||||
((ver) & PSCI_VERSION_MINOR_MASK)
|
||||
#define PSCI_VERSION(maj, min) \
|
||||
((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
|
||||
((min) & PSCI_VERSION_MINOR_MASK))
|
||||
|
||||
/* PSCI features decoding (>=1.0) */
|
||||
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/irqbypass.h>
|
||||
#include <trace/events/kvm.h>
|
||||
#include <kvm/arm_pmu.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
@ -46,7 +47,6 @@
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#ifdef REQUIRES_VIRT
|
||||
|
@ -15,16 +15,16 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
/*
|
||||
* This is an implementation of the Power State Coordination Interface
|
||||
@ -33,6 +33,38 @@
|
||||
|
||||
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
|
||||
|
||||
static u32 smccc_get_function(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 0);
|
||||
}
|
||||
|
||||
static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 1);
|
||||
}
|
||||
|
||||
static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 2);
|
||||
}
|
||||
|
||||
static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 3);
|
||||
}
|
||||
|
||||
static void smccc_set_retval(struct kvm_vcpu *vcpu,
|
||||
unsigned long a0,
|
||||
unsigned long a1,
|
||||
unsigned long a2,
|
||||
unsigned long a3)
|
||||
{
|
||||
vcpu_set_reg(vcpu, 0, a0);
|
||||
vcpu_set_reg(vcpu, 1, a1);
|
||||
vcpu_set_reg(vcpu, 2, a2);
|
||||
vcpu_set_reg(vcpu, 3, a3);
|
||||
}
|
||||
|
||||
static unsigned long psci_affinity_mask(unsigned long affinity_level)
|
||||
{
|
||||
if (affinity_level <= 3)
|
||||
@ -78,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
unsigned long context_id;
|
||||
phys_addr_t target_pc;
|
||||
|
||||
cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
|
||||
cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
|
||||
if (vcpu_mode_is_32bit(source_vcpu))
|
||||
cpu_id &= ~((u32) 0);
|
||||
|
||||
@ -91,14 +123,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
if (!vcpu)
|
||||
return PSCI_RET_INVALID_PARAMS;
|
||||
if (!vcpu->arch.power_off) {
|
||||
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
|
||||
if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
|
||||
return PSCI_RET_ALREADY_ON;
|
||||
else
|
||||
return PSCI_RET_INVALID_PARAMS;
|
||||
}
|
||||
|
||||
target_pc = vcpu_get_reg(source_vcpu, 2);
|
||||
context_id = vcpu_get_reg(source_vcpu, 3);
|
||||
target_pc = smccc_get_arg2(source_vcpu);
|
||||
context_id = smccc_get_arg3(source_vcpu);
|
||||
|
||||
kvm_reset_vcpu(vcpu);
|
||||
|
||||
@ -117,7 +149,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
||||
* the general puspose registers are undefined upon CPU_ON.
|
||||
*/
|
||||
vcpu_set_reg(vcpu, 0, context_id);
|
||||
smccc_set_retval(vcpu, context_id, 0, 0, 0);
|
||||
vcpu->arch.power_off = false;
|
||||
smp_mb(); /* Make sure the above is visible */
|
||||
|
||||
@ -137,8 +169,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu *tmp;
|
||||
|
||||
target_affinity = vcpu_get_reg(vcpu, 1);
|
||||
lowest_affinity_level = vcpu_get_reg(vcpu, 2);
|
||||
target_affinity = smccc_get_arg1(vcpu);
|
||||
lowest_affinity_level = smccc_get_arg2(vcpu);
|
||||
|
||||
/* Determine target affinity mask */
|
||||
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
|
||||
@ -200,18 +232,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
|
||||
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
|
||||
}
|
||||
|
||||
int kvm_psci_version(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
|
||||
return KVM_ARM_PSCI_0_2;
|
||||
|
||||
return KVM_ARM_PSCI_0_1;
|
||||
}
|
||||
|
||||
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||
u32 psci_fn = smccc_get_function(vcpu);
|
||||
unsigned long val;
|
||||
int ret = 1;
|
||||
|
||||
@ -221,7 +245,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
* Bits[31:16] = Major Version = 0
|
||||
* Bits[15:0] = Minor Version = 2
|
||||
*/
|
||||
val = 2;
|
||||
val = KVM_ARM_PSCI_0_2;
|
||||
break;
|
||||
case PSCI_0_2_FN_CPU_SUSPEND:
|
||||
case PSCI_0_2_FN64_CPU_SUSPEND:
|
||||
@ -278,14 +302,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
|
||||
vcpu_set_reg(vcpu, 0, val);
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 psci_fn = smccc_get_function(vcpu);
|
||||
u32 feature;
|
||||
unsigned long val;
|
||||
int ret = 1;
|
||||
|
||||
switch(psci_fn) {
|
||||
case PSCI_0_2_FN_PSCI_VERSION:
|
||||
val = KVM_ARM_PSCI_1_0;
|
||||
break;
|
||||
case PSCI_1_0_FN_PSCI_FEATURES:
|
||||
feature = smccc_get_arg1(vcpu);
|
||||
switch(feature) {
|
||||
case PSCI_0_2_FN_PSCI_VERSION:
|
||||
case PSCI_0_2_FN_CPU_SUSPEND:
|
||||
case PSCI_0_2_FN64_CPU_SUSPEND:
|
||||
case PSCI_0_2_FN_CPU_OFF:
|
||||
case PSCI_0_2_FN_CPU_ON:
|
||||
case PSCI_0_2_FN64_CPU_ON:
|
||||
case PSCI_0_2_FN_AFFINITY_INFO:
|
||||
case PSCI_0_2_FN64_AFFINITY_INFO:
|
||||
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
|
||||
case PSCI_0_2_FN_SYSTEM_OFF:
|
||||
case PSCI_0_2_FN_SYSTEM_RESET:
|
||||
case PSCI_1_0_FN_PSCI_FEATURES:
|
||||
case ARM_SMCCC_VERSION_FUNC_ID:
|
||||
val = 0;
|
||||
break;
|
||||
default:
|
||||
val = PSCI_RET_NOT_SUPPORTED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return kvm_psci_0_2_call(vcpu);
|
||||
}
|
||||
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||
u32 psci_fn = smccc_get_function(vcpu);
|
||||
unsigned long val;
|
||||
|
||||
switch (psci_fn) {
|
||||
@ -303,7 +369,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
|
||||
vcpu_set_reg(vcpu, 0, val);
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -321,9 +387,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||
* Errors:
|
||||
* -EINVAL: Unrecognized PSCI function
|
||||
*/
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
static int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (kvm_psci_version(vcpu)) {
|
||||
switch (kvm_psci_version(vcpu, vcpu->kvm)) {
|
||||
case KVM_ARM_PSCI_1_0:
|
||||
return kvm_psci_1_0_call(vcpu);
|
||||
case KVM_ARM_PSCI_0_2:
|
||||
return kvm_psci_0_2_call(vcpu);
|
||||
case KVM_ARM_PSCI_0_1:
|
||||
@ -332,3 +400,30 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
return -EINVAL;
|
||||
};
|
||||
}
|
||||
|
||||
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 func_id = smccc_get_function(vcpu);
|
||||
u32 val = PSCI_RET_NOT_SUPPORTED;
|
||||
u32 feature;
|
||||
|
||||
switch (func_id) {
|
||||
case ARM_SMCCC_VERSION_FUNC_ID:
|
||||
val = ARM_SMCCC_VERSION_1_1;
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
|
||||
feature = smccc_get_arg1(vcpu);
|
||||
switch(feature) {
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
if (kvm_arm_harden_branch_predictor())
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return kvm_psci_call(vcpu);
|
||||
}
|
||||
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user