Merge branches 'for-next/doc', 'for-next/sve', 'for-next/sysreg', 'for-next/gettimeofday', 'for-next/stacktrace', 'for-next/atomics', 'for-next/el1-exceptions', 'for-next/a510-erratum-2658417', 'for-next/defconfig', 'for-next/tpidr2_el0' and 'for-next/ftrace', remote-tracking branch 'arm64/for-next/perf' into for-next/core

* arm64/for-next/perf:
  arm64: asm/perf_regs.h: Avoid C++-style comment in UAPI header
  arm64/sve: Add Perf extensions documentation
  perf: arm64: Add SVE vector granule register to user regs
  MAINTAINERS: add maintainers for Alibaba' T-Head PMU driver
  drivers/perf: add DDR Sub-System Driveway PMU driver for Yitian 710 SoC
  docs: perf: Add description for Alibaba's T-Head PMU driver

* for-next/doc:
  : Documentation/arm64 updates
  arm64/sve: Document our actual ABI for clearing registers on syscall

* for-next/sve:
  : SVE updates
  arm64/sysreg: Add hwcap for SVE EBF16

* for-next/sysreg: (35 commits)
  : arm64 system registers generation (more conversions)
  arm64/sysreg: Fix a few missed conversions
  arm64/sysreg: Convert ID_AA64AFRn_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64DFR1_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64FDR0_EL1 to automatic generation
  arm64/sysreg: Use feature numbering for PMU and SPE revisions
  arm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names
  arm64/sysreg: Align field names in ID_AA64DFR0_EL1 with architecture
  arm64/sysreg: Add defintion for ALLINT
  arm64/sysreg: Convert SCXTNUM_EL1 to automatic generation
  arm64/sysreg: Convert TIPDR_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64PFR1_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64PFR0_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64MMFR2_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64MMFR1_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64MMFR0_EL1 to automatic generation
  arm64/sysreg: Convert HCRX_EL2 to automatic generation
  arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 SME enumeration
  arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 BTI enumeration
  arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 fractional version fields
  arm64/sysreg: Standardise naming for MTE feature enumeration
  ...

* for-next/gettimeofday:
  : Use self-synchronising counter access in gettimeofday() (if FEAT_ECV)
  arm64: vdso: use SYS_CNTVCTSS_EL0 for gettimeofday
  arm64: alternative: patch alternatives in the vDSO
  arm64: module: move find_section to header

* for-next/stacktrace:
  : arm64 stacktrace cleanups and improvements
  arm64: stacktrace: track hyp stacks in unwinder's address space
  arm64: stacktrace: track all stack boundaries explicitly
  arm64: stacktrace: remove stack type from fp translator
  arm64: stacktrace: rework stack boundary discovery
  arm64: stacktrace: add stackinfo_on_stack() helper
  arm64: stacktrace: move SDEI stack helpers to stacktrace code
  arm64: stacktrace: rename unwind_next_common() -> unwind_next_frame_record()
  arm64: stacktrace: simplify unwind_next_common()
  arm64: stacktrace: fix kerneldoc comments

* for-next/atomics:
  : arm64 atomics improvements
  arm64: atomic: always inline the assembly
  arm64: atomics: remove LL/SC trampolines

* for-next/el1-exceptions:
  : Improve the reporting of EL1 exceptions
  arm64: rework BTI exception handling
  arm64: rework FPAC exception handling
  arm64: consistently pass ESR_ELx to die()
  arm64: die(): pass 'err' as long
  arm64: report EL1 UNDEFs better

* for-next/a510-erratum-2658417:
  : Cortex-A510: 2658417: remove BF16 support due to incorrect result
  arm64: errata: remove BF16 HWCAP due to incorrect result on Cortex-A510
  arm64: cpufeature: Expose get_arm64_ftr_reg() outside cpufeature.c
  arm64: cpufeature: Force HWCAP to be based on the sysreg visible to user-space

* for-next/defconfig:
  : arm64 defconfig updates
  arm64: defconfig: Add Coresight as module
  arm64: Enable docker support in defconfig
  arm64: defconfig: Enable memory hotplug and hotremove config
  arm64: configs: Enable all PMUs provided by Arm

* for-next/tpidr2_el0:
  : arm64 ptrace() support for TPIDR2_EL0
  kselftest/arm64: Add coverage of TPIDR2_EL0 ptrace interface
  arm64/ptrace: Support access to TPIDR2_EL0
  arm64/ptrace: Document extension of NT_ARM_TLS to cover TPIDR2_EL0
  kselftest/arm64: Add test coverage for NT_ARM_TLS

* for-next/ftrace:
  : arm64 ftraces updates/fixes
  arm64: ftrace: fix module PLTs with mcount
  arm64: module: Remove unused plt_entry_is_initialized()
  arm64: module: Make plt_equals_entry() static
This commit is contained in:
Catalin Marinas 2022-09-30 09:17:57 +01:00
72 changed files with 1651 additions and 929 deletions

View File

@ -272,6 +272,9 @@ HWCAP2_WFXT
HWCAP2_EBF16
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
HWCAP2_SVE_EBF16
Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0010.
4. Unused AT_HWCAP bits
-----------------------

View File

@ -110,6 +110,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |

View File

@ -331,6 +331,9 @@ The regset data starts with struct user_za_header, containing:
been read if a PTRACE_GETREGSET of NT_ARM_ZA were executed for each thread
when the coredump was generated.
* The NT_ARM_TLS note will be extended to two registers, the second register
will contain TPIDR2_EL0 on systems that support SME and will be read as
zero with writes ignored otherwise.
9. System runtime configuration
--------------------------------

View File

@ -111,7 +111,7 @@ the SVE instruction set architecture.
* On syscall, V0..V31 are preserved (as without SVE). Thus, bits [127:0] of
Z0..Z31 are preserved. All other bits of Z0..Z31, and all of P0..P15 and FFR
become unspecified on return from a syscall.
become zero on return from a syscall.
* The SVE registers are not used to pass arguments to or receive results from
any syscall.

View File

@ -733,6 +733,19 @@ config ARM64_ERRATUM_2077057
If unsure, say Y.
config ARM64_ERRATUM_2658417
bool "Cortex-A510: 2658417: remove BF16 support due to incorrect result"
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 2658417.
Affected Cortex-A510 (r0p0 to r1p1) may produce the wrong result for
BFMMLA or VMMLA instructions in rare circumstances when a pair of
A510 CPUs are using shared neon hardware. As the sharing is not
discoverable by the kernel, hide the BF16 HWCAP to indicate that
user-space should not be using these instructions.
If unsure, say Y.
config ARM64_ERRATUM_2119858
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y

View File

@ -18,6 +18,7 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
@ -102,6 +103,8 @@ CONFIG_ARM_SCMI_CPUFREQ=y
CONFIG_ARM_TEGRA186_CPUFREQ=y
CONFIG_QORIQ_CPUFREQ=y
CONFIG_ACPI=y
CONFIG_ACPI_HOTPLUG_MEMORY=y
CONFIG_ACPI_HMAT=y
CONFIG_ACPI_APEI=y
CONFIG_ACPI_APEI_GHES=y
CONFIG_ACPI_APEI_PCIEAER=y
@ -126,6 +129,8 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_MEMORY_FAILURE=y
CONFIG_TRANSPARENT_HUGEPAGE=y
@ -139,12 +144,16 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IPV6=m
CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NETFILTER_XT_MARK=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_IP_VS=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
@ -1229,8 +1238,14 @@ CONFIG_PHY_UNIPHIER_USB3=y
CONFIG_PHY_TEGRA_XUSB=y
CONFIG_PHY_AM654_SERDES=m
CONFIG_PHY_J721E_WIZ=m
CONFIG_ARM_CCI_PMU=m
CONFIG_ARM_CCN=m
CONFIG_ARM_CMN=m
CONFIG_ARM_SMMU_V3_PMU=m
CONFIG_ARM_DSU_PMU=m
CONFIG_FSL_IMX8_DDR_PMU=m
CONFIG_ARM_SPE_PMU=m
CONFIG_ARM_DMC620_PMU=m
CONFIG_QCOM_L2_PMU=y
CONFIG_QCOM_L3_PMU=y
CONFIG_HISI_PMU=y
@ -1325,4 +1340,12 @@ CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
CONFIG_CORESIGHT=m
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m
CONFIG_CORESIGHT_CATU=m
CONFIG_CORESIGHT_SINK_TPIU=m
CONFIG_CORESIGHT_SINK_ETBV10=m
CONFIG_CORESIGHT_STM=m
CONFIG_CORESIGHT_CPU_DEBUG=m
CONFIG_CORESIGHT_CTI=m
CONFIG_MEMTEST=y

View File

@ -384,8 +384,8 @@ alternative_cb_end
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi
bfi \tcr, \tmp0, \pos, #3
@ -512,7 +512,7 @@ alternative_endif
*/
.macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0
@ -524,7 +524,7 @@ alternative_endif
*/
.macro reset_amuserenr_el0, tmpreg
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
.Lskip_\@:
@ -612,7 +612,7 @@ alternative_endif
.macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz \tmp, .Lskipoffs_\@
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
.Lskipoffs_\@ :

View File

@ -12,19 +12,6 @@
#include <linux/stringify.h>
#ifdef CONFIG_ARM64_LSE_ATOMICS
#define __LL_SC_FALLBACK(asm_ops) \
" b 3f\n" \
" .subsection 1\n" \
"3:\n" \
asm_ops "\n" \
" b 4f\n" \
" .previous\n" \
"4:\n"
#else
#define __LL_SC_FALLBACK(asm_ops) asm_ops
#endif
#ifndef CONFIG_CC_HAS_K_CONSTRAINT
#define K
#endif
@ -36,38 +23,36 @@ asm_ops "\n" \
*/
#define ATOMIC_OP(op, asm_op, constraint) \
static inline void \
static __always_inline void \
__ll_sc_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
asm volatile("// atomic_" #op "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n") \
" cbnz %w1, 1b\n" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \
}
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline int \
static __always_inline int \
__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
asm volatile("// atomic_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" st" #rel "xr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n" \
" " #mb ) \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@ -76,20 +61,19 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
static inline int \
static __always_inline int \
__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int val, result; \
\
asm volatile("// atomic_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %w0, %3\n" \
" " #asm_op " %w1, %w0, %w4\n" \
" st" #rel "xr %w2, %w1, %3\n" \
" cbnz %w2, 1b\n" \
" " #mb ) \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@ -135,38 +119,36 @@ ATOMIC_OPS(andnot, bic, )
#undef ATOMIC_OP
#define ATOMIC64_OP(op, asm_op, constraint) \
static inline void \
static __always_inline void \
__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ldxr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \
" stxr %w1, %0, %2\n" \
" cbnz %w1, 1b") \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \
}
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long \
static __always_inline long \
__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \
" st" #rel "xr %w1, %0, %2\n" \
" cbnz %w1, 1b\n" \
" " #mb ) \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@ -175,20 +157,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long \
static __always_inline long \
__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
\
asm volatile("// atomic64_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %0, %3\n" \
" " #asm_op " %1, %0, %4\n" \
" st" #rel "xr %w2, %1, %3\n" \
" cbnz %w2, 1b\n" \
" " #mb ) \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \
: cl); \
@ -233,14 +214,13 @@ ATOMIC64_OPS(andnot, bic, )
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline s64
static __always_inline s64
__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
{
s64 result;
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
__LL_SC_FALLBACK(
" prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
@ -248,7 +228,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
" dmb ish\n"
"2:")
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
: "cc", "memory");
@ -257,7 +237,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
}
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
static inline u##sz \
static __always_inline u##sz \
__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long old, \
u##sz new) \
@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
old = (u##sz)old; \
\
asm volatile( \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %[v]\n" \
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
" st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
"2:") \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(u##sz *)ptr) \
: [old] __stringify(constraint) "r" (old), [new] "r" (new) \
@ -316,7 +295,7 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, rel, cl) \
static inline long \
static __always_inline long \
__ll_sc__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
unsigned long tmp, ret; \
\
asm volatile("// __cmpxchg_double" #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ldxp %0, %1, %2\n" \
" eor %0, %0, %3\n" \
@ -336,7 +314,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
" st" #rel "xp %w0, %5, %6, %2\n" \
" cbnz %w0, 1b\n" \
" " #mb "\n" \
"2:") \
"2:" \
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
: cl); \

View File

@ -11,7 +11,8 @@
#define __ASM_ATOMIC_LSE_H
#define ATOMIC_OP(op, asm_op) \
static inline void __lse_atomic_##op(int i, atomic_t *v) \
static __always_inline void \
__lse_atomic_##op(int i, atomic_t *v) \
{ \
asm volatile( \
__LSE_PREAMBLE \
@ -25,7 +26,7 @@ ATOMIC_OP(or, stset)
ATOMIC_OP(xor, steor)
ATOMIC_OP(add, stadd)
static inline void __lse_atomic_sub(int i, atomic_t *v)
static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
{
__lse_atomic_add(-i, v);
}
@ -33,7 +34,8 @@ static inline void __lse_atomic_sub(int i, atomic_t *v)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
static __always_inline int \
__lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
int old; \
\
@ -63,7 +65,8 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS
#define ATOMIC_FETCH_OP_SUB(name) \
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
static __always_inline int \
__lse_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
return __lse_atomic_fetch_add##name(-i, v); \
}
@ -76,12 +79,14 @@ ATOMIC_FETCH_OP_SUB( )
#undef ATOMIC_FETCH_OP_SUB
#define ATOMIC_OP_ADD_SUB_RETURN(name) \
static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
static __always_inline int \
__lse_atomic_add_return##name(int i, atomic_t *v) \
{ \
return __lse_atomic_fetch_add##name(i, v) + i; \
} \
\
static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
static __always_inline int \
__lse_atomic_sub_return##name(int i, atomic_t *v) \
{ \
return __lse_atomic_fetch_sub(i, v) - i; \
}
@ -93,13 +98,14 @@ ATOMIC_OP_ADD_SUB_RETURN( )
#undef ATOMIC_OP_ADD_SUB_RETURN
static inline void __lse_atomic_and(int i, atomic_t *v)
static __always_inline void __lse_atomic_and(int i, atomic_t *v)
{
return __lse_atomic_andnot(~i, v);
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
static __always_inline int \
__lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
return __lse_atomic_fetch_andnot##name(~i, v); \
}
@ -112,7 +118,8 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND
#define ATOMIC64_OP(op, asm_op) \
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
static __always_inline void \
__lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \
asm volatile( \
__LSE_PREAMBLE \
@ -126,7 +133,7 @@ ATOMIC64_OP(or, stset)
ATOMIC64_OP(xor, steor)
ATOMIC64_OP(add, stadd)
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
{
__lse_atomic64_add(-i, v);
}
@ -134,7 +141,8 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
static __always_inline long \
__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
{ \
s64 old; \
\
@ -164,7 +172,8 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_FETCH_OP_SUB(name) \
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
static __always_inline long \
__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
{ \
return __lse_atomic64_fetch_add##name(-i, v); \
}
@ -177,12 +186,14 @@ ATOMIC64_FETCH_OP_SUB( )
#undef ATOMIC64_FETCH_OP_SUB
#define ATOMIC64_OP_ADD_SUB_RETURN(name) \
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
static __always_inline long \
__lse_atomic64_add_return##name(s64 i, atomic64_t *v) \
{ \
return __lse_atomic64_fetch_add##name(i, v) + i; \
} \
\
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
static __always_inline long \
__lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
{ \
return __lse_atomic64_fetch_sub##name(i, v) - i; \
}
@ -194,13 +205,14 @@ ATOMIC64_OP_ADD_SUB_RETURN( )
#undef ATOMIC64_OP_ADD_SUB_RETURN
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
{
return __lse_atomic64_andnot(~i, v);
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
static __always_inline long \
__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \
return __lse_atomic64_fetch_andnot##name(~i, v); \
}
@ -212,7 +224,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND
static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long tmp;

View File

@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void)
#define arch_slab_minalign() arch_slab_minalign()
#endif
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_EL0_DMINLINE_SHIFT | \
CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
#define ICACHEF_ALIASING 0

View File

@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
u64 mask = GENMASK_ULL(field + 3, field);
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
if (val == ID_AA64DFR0_PMUVER_IMP_DEF)
if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
val = 0;
if (val > cap) {
@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
{
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
}
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
}
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
}
static inline bool id_aa64pfr0_sve(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
return val > 0;
}
static inline bool id_aa64pfr1_sme(u64 pfr1)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT);
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
return val > 0;
}
static inline bool id_aa64pfr1_mte(u64 pfr1)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
return val >= ID_AA64PFR1_MTE;
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
}
void __init setup_cpu_features(void);
@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope)
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_CSV2_SHIFT);
ID_AA64PFR0_EL1_CSV2_SHIFT);
return csv2_val == 3;
}
@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN4_SHIFT);
ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
}
static inline bool system_supports_64kb_granule(void)
@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN64_SHIFT);
ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
}
static inline bool system_supports_16kb_granule(void)
@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN16_SHIFT);
ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
}
static inline bool system_supports_mixed_endian_el0(void)
@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_BIGENDEL_SHIFT);
ID_AA64MMFR0_EL1_BIGEND_SHIFT);
return val == 0x1;
}
@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
{
switch (parange) {
case ID_AA64MMFR0_PARANGE_32: return 32;
case ID_AA64MMFR0_PARANGE_36: return 36;
case ID_AA64MMFR0_PARANGE_40: return 40;
case ID_AA64MMFR0_PARANGE_42: return 42;
case ID_AA64MMFR0_PARANGE_44: return 44;
case ID_AA64MMFR0_PARANGE_48: return 48;
case ID_AA64MMFR0_PARANGE_52: return 52;
case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
/*
* A future PE could use a value unknown to the kernel.
* However, by the "D10.1.4 Principles of the ID scheme
@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void)
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_HADBS_SHIFT);
ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
}
static inline bool cpu_has_pan(void)
{
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_PAN_SHIFT);
ID_AA64MMFR1_EL1_PAN_SHIFT);
}
#ifdef CONFIG_ARM64_AMU_EXTN
@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
int vmid_bits;
vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_VMIDBITS_SHIFT);
if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16)
ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
return 16;
/*
@ -907,6 +907,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
return 8;
}
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
extern struct arm64_ftr_override id_aa64mmfr1_override;
extern struct arm64_ftr_override id_aa64pfr0_override;
extern struct arm64_ftr_override id_aa64pfr1_override;

View File

@ -40,7 +40,7 @@
.macro __init_el2_debug
mrs x1, id_aa64dfr0_el1
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp x0, #1
b.lt .Lskip_pmu_\@ // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
@ -49,7 +49,7 @@
csel x2, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cbz x0, .Lskip_spe_\@ // Skip if SPE not present
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
@ -65,7 +65,7 @@
.Lskip_spe_\@:
/* Trace buffer */
ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
mrs_s x0, SYS_TRBIDR_EL1
@ -83,7 +83,7 @@
/* LORegions */
.macro __init_el2_lor
mrs x1, id_aa64mmfr1_el1
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
cbz x0, .Lskip_lor_\@
msr_s SYS_LORC_EL1, xzr
.Lskip_lor_\@:
@ -97,7 +97,7 @@
/* GICv3 system register access */
.macro __init_el2_gicv3
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
cbz x0, .Lskip_gicv3_\@
mrs_s x0, SYS_ICC_SRE_EL2
@ -132,12 +132,12 @@
/* Disable any fine grained traps */
.macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1
ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
cbz x1, .Lskip_fgt_\@
mov x0, xzr
mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cmp x1, #3
b.lt .Lset_debug_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */
@ -149,7 +149,7 @@
mov x0, xzr
mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
cbz x1, .Lset_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */
@ -162,7 +162,7 @@
msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz x1, .Lskip_fgt_\@
msr_s SYS_HAFGRTR_EL2, xzr

View File

@ -58,8 +58,9 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs,
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs, unsigned long esr);
void do_el0_bti(struct pt_regs *regs);
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
struct pt_regs *regs);
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
@ -72,7 +73,8 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
void do_cp15instr(unsigned long esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);

View File

@ -142,7 +142,7 @@ static inline int get_num_brps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT);
ID_AA64DFR0_EL1_BRPs_SHIFT);
}
/* Determine number of WRP registers available. */
@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT);
ID_AA64DFR0_EL1_WRPs_SHIFT);
}
#endif /* __ASM_BREAKPOINT_H */

View File

@ -119,6 +119,7 @@
#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16)
#define KERNEL_HWCAP_SVE_EBF16 __khwcap2_feature(SVE_EBF16)
/*
* This yields a mask that user programs can use to figure out what

View File

@ -16,9 +16,9 @@
static inline u64 kvm_get_parange(u64 mmfr0)
{
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT);
if (parange > ID_AA64MMFR0_PARANGE_MAX)
parange = ID_AA64MMFR0_PARANGE_MAX;
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
return parange;
}

View File

@ -58,11 +58,20 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
}
struct plt_entry get_plt_entry(u64 dst, void *pc);
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
static inline bool plt_entry_is_initialized(const struct plt_entry *e)
static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
{
return e->adrp || e->add || e->br;
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (strcmp(name, secstrs + s->sh_name) == 0)
return s;
}
return NULL;
}
#endif /* __ASM_MODULE_H */

View File

@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task);
* The top of the current task's task stack
*/
#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */

View File

@ -43,22 +43,5 @@ unsigned long do_sdei_event(struct pt_regs *regs,
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
struct stack_info;
bool _on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info);
static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
return _on_sdei_stack(sp, size, info);
return false;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SDEI_H */

View File

@ -22,39 +22,86 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
static inline bool on_irq_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
static inline struct stack_info stackinfo_get_irq(void)
{
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
return (struct stack_info) {
.low = low,
.high = high,
};
}
static inline bool on_task_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info)
static inline bool on_irq_stack(unsigned long sp, unsigned long size)
{
struct stack_info info = stackinfo_get_irq();
return stackinfo_on_stack(&info, sp, size);
}
static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
return (struct stack_info) {
.low = low,
.high = high,
};
}
static inline bool on_task_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size)
{
struct stack_info info = stackinfo_get_task(tsk);
return stackinfo_on_stack(&info, sp, size);
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
static inline struct stack_info stackinfo_get_overflow(void)
{
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
return (struct stack_info) {
.low = low,
.high = high,
};
}
#else
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info) { return false; }
#define stackinfo_get_overflow() stackinfo_get_unknown()
#endif
#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
static inline struct stack_info stackinfo_get_sdei_normal(void)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static inline struct stack_info stackinfo_get_sdei_critical(void)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
#else
#define stackinfo_get_sdei_normal() stackinfo_get_unknown()
#define stackinfo_get_sdei_critical() stackinfo_get_unknown()
#endif
#endif /* __ASM_STACKTRACE_H */

View File

@ -2,13 +2,6 @@
/*
* Common arm64 stack unwinder code.
*
* To implement a new arm64 stack unwinder:
* 1) Include this header
*
* 2) Call into unwind_next_common() from your top level unwind
* function, passing it the validation and translation callbacks
* (though the later can be NULL if no translation is required).
*
* See: arch/arm64/kernel/stacktrace.c for the reference implementation.
*
* Copyright (C) 2012 ARM Ltd.
@ -16,78 +9,60 @@
#ifndef __ASM_STACKTRACE_COMMON_H
#define __ASM_STACKTRACE_COMMON_H
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/kprobes.h>
#include <linux/types.h>
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
STACK_TYPE_IRQ,
STACK_TYPE_OVERFLOW,
STACK_TYPE_SDEI_NORMAL,
STACK_TYPE_SDEI_CRITICAL,
STACK_TYPE_HYP,
__NR_STACK_TYPES
};
struct stack_info {
unsigned long low;
unsigned long high;
enum stack_type type;
};
/*
* A snapshot of a frame record or fp/lr register values, along with some
* accounting information necessary for robust unwinding.
/**
* struct unwind_state - state used for robust unwinding.
*
* @fp: The fp value in the frame record (or the real fp)
* @pc: The lr value in the frame record (or the real lr)
*
* @stacks_done: Stacks which have been entirely unwound, for which it is no
* longer valid to unwind to.
*
* @prev_fp: The fp that pointed to this frame record, or a synthetic value
* of 0. This is used to ensure that within a stack, each
* subsequent frame record is at an increasing address.
* @prev_type: The type of stack this frame record was on, or a synthetic
* value of STACK_TYPE_UNKNOWN. This is used to detect a
* transition from one stack to another.
*
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
* associated with the most recently encountered replacement lr
* value.
*
* @task: The task being unwound.
*
* @stack: The stack currently being unwound.
* @stacks: An array of stacks which can be unwound.
* @nr_stacks: The number of stacks in @stacks.
*/
struct unwind_state {
unsigned long fp;
unsigned long pc;
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
unsigned long prev_fp;
enum stack_type prev_type;
#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
#endif
struct task_struct *task;
struct stack_info stack;
struct stack_info *stacks;
int nr_stacks;
};
static inline bool on_stack(unsigned long sp, unsigned long size,
unsigned long low, unsigned long high,
enum stack_type type, struct stack_info *info)
static inline struct stack_info stackinfo_get_unknown(void)
{
if (!low)
return (struct stack_info) {
.low = 0,
.high = 0,
};
}
static inline bool stackinfo_on_stack(const struct stack_info *info,
unsigned long sp, unsigned long size)
{
if (!info->low)
return false;
if (sp < low || sp + size < sp || sp + size > high)
if (sp < info->low || sp + size < sp || sp + size > info->high)
return false;
if (info) {
info->low = low;
info->high = high;
info->type = type;
}
return true;
}
@ -99,99 +74,101 @@ static inline void unwind_init_common(struct unwind_state *state,
state->kr_cur = NULL;
#endif
/*
* Prime the first unwind.
*
* In unwind_next() we'll check that the FP points to a valid stack,
* which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
* treated as a transition to whichever stack that happens to be. The
* prev_fp value won't be used, but we set it to 0 such that it is
* definitely not an accessible stack address.
*/
bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
state->prev_fp = 0;
state->prev_type = STACK_TYPE_UNKNOWN;
state->stack = stackinfo_get_unknown();
}
/*
* stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to
* a kernel address.
*
* @fp: the frame pointer to be updated to its kernel address.
* @type: the stack type associated with frame pointer @fp
*
* Returns true and success and @fp is updated to the corresponding
* kernel virtual address; otherwise returns false.
*/
typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp,
enum stack_type type);
/*
* on_accessible_stack_fn() - Check whether a stack range is on any
* of the possible stacks.
*
* @tsk: task whose stack is being unwound
* @sp: stack address being checked
* @size: size of the stack range being checked
* @info: stack unwinding context
*/
typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info);
static inline int unwind_next_common(struct unwind_state *state,
struct stack_info *info,
on_accessible_stack_fn accessible,
stack_trace_translate_fp_fn translate_fp)
static struct stack_info *unwind_find_next_stack(const struct unwind_state *state,
unsigned long sp,
unsigned long size)
{
unsigned long fp = state->fp, kern_fp = fp;
struct task_struct *tsk = state->task;
for (int i = 0; i < state->nr_stacks; i++) {
struct stack_info *info = &state->stacks[i];
if (stackinfo_on_stack(info, sp, size))
return info;
}
return NULL;
}
/**
* unwind_consume_stack() - Check if an object is on an accessible stack,
* updating stack boundaries so that future unwind steps cannot consume this
* object again.
*
* @state: the current unwind state.
* @sp: the base address of the object.
* @size: the size of the object.
*
* Return: 0 upon success, an error code otherwise.
*/
static inline int unwind_consume_stack(struct unwind_state *state,
unsigned long sp,
unsigned long size)
{
struct stack_info *next;
if (stackinfo_on_stack(&state->stack, sp, size))
goto found;
next = unwind_find_next_stack(state, sp, size);
if (!next)
return -EINVAL;
/*
* Stack transitions are strictly one-way, and once we've
* transitioned from one stack to another, it's never valid to
* unwind back to the old stack.
*
* Remove the current stack from the list of stacks so that it cannot
* be found on a subsequent transition.
*
* Note that stacks can nest in several valid orders, e.g.
*
* TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
* TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
* HYP -> OVERFLOW
*
* ... so we do not check the specific order of stack
* transitions.
*/
state->stack = *next;
*next = stackinfo_get_unknown();
found:
/*
* Future unwind steps can only consume stack above this frame record.
* Update the current stack to start immediately above it.
*/
state->stack.low = sp + size;
return 0;
}
/**
* unwind_next_frame_record() - Unwind to the next frame record.
*
* @state: the current unwind state.
*
* Return: 0 upon success, an error code otherwise.
*/
static inline int
unwind_next_frame_record(struct unwind_state *state)
{
unsigned long fp = state->fp;
int err;
if (fp & 0x7)
return -EINVAL;
if (!accessible(tsk, fp, 16, info))
return -EINVAL;
if (test_bit(info->type, state->stacks_done))
return -EINVAL;
err = unwind_consume_stack(state, fp, 16);
if (err)
return err;
/*
* If fp is not from the current address space perform the necessary
* translation before dereferencing it to get the next fp.
* Record this frame record's values.
*/
if (translate_fp && !translate_fp(&kern_fp, info->type))
return -EINVAL;
/*
* As stacks grow downward, any valid record on the same stack must be
* at a strictly higher address than the prior record.
*
* Stacks can nest in several valid orders, e.g.
*
* TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
* TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
* HYP -> OVERFLOW
*
* ... but the nesting itself is strict. Once we transition from one
* stack to another, it's never valid to unwind back to that first
* stack.
*/
if (info->type == state->prev_type) {
if (fp <= state->prev_fp)
return -EINVAL;
} else {
__set_bit(state->prev_type, state->stacks_done);
}
/*
* Record this frame record's values and location. The prev_fp and
* prev_type are only meaningful to the next unwind_next() invocation.
*/
state->fp = READ_ONCE(*(unsigned long *)(kern_fp));
state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8));
state->prev_fp = fp;
state->prev_type = info->type;
state->fp = READ_ONCE(*(unsigned long *)(fp));
state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
return 0;
}

View File

@ -20,8 +20,8 @@
#include <asm/stacktrace/common.h>
/*
* kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
/**
* kvm_nvhe_unwind_init() - Start an unwind from the given nVHE HYP fp and pc
*
* @state : unwind_state to initialize
* @fp : frame pointer at which to start the unwinding.

View File

@ -190,19 +190,6 @@
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
@ -436,19 +423,11 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SMIDR_EL1_IMPLEMENTER_SHIFT 24
#define SMIDR_EL1_SMPS_SHIFT 15
#define SMIDR_EL1_AFFINITY_SHIFT 0
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
@ -537,7 +516,6 @@
#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2)
#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
@ -690,164 +668,30 @@
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_DIT_SHIFT 48
#define ID_AA64PFR0_AMU_SHIFT 44
#define ID_AA64PFR0_MPAM_SHIFT 40
#define ID_AA64PFR0_SEL2_SHIFT 36
#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
#define ID_AA64PFR0_EL3_SHIFT 12
#define ID_AA64PFR0_EL2_SHIFT 8
#define ID_AA64PFR0_EL1_SHIFT 4
#define ID_AA64PFR0_EL0_SHIFT 0
#define ID_AA64PFR0_AMU 0x1
#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_RAS_V1 0x1
#define ID_AA64PFR0_RAS_V1P1 0x2
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
#define ID_AA64PFR1_SME_SHIFT 24
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8
#define ID_AA64PFR1_SSBS_SHIFT 4
#define ID_AA64PFR1_BT_SHIFT 0
#define ID_AA64PFR1_SSBS_PSTATE_NI 0
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
#define ID_AA64PFR1_SME 1
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3
#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60
#define ID_AA64MMFR0_FGT_SHIFT 56
#define ID_AA64MMFR0_EXS_SHIFT 44
#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40
#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36
#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
#define ID_AA64MMFR0_SNSMEM_SHIFT 12
#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
#define ID_AA64MMFR0_ASID_SHIFT 4
#define ID_AA64MMFR0_PARANGE_SHIFT 0
#define ID_AA64MMFR0_ASID_8 0x0
#define ID_AA64MMFR0_ASID_16 0x2
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_NI 0xf
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_PARANGE_32 0x0
#define ID_AA64MMFR0_PARANGE_36 0x1
#define ID_AA64MMFR0_PARANGE_40 0x2
#define ID_AA64MMFR0_PARANGE_42 0x3
#define ID_AA64MMFR0_PARANGE_44 0x4
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
#define ARM64_MIN_PARANGE_BITS 32
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
#else
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
#define ID_AA64MMFR1_TIDCP1_SHIFT 52
#define ID_AA64MMFR1_HCX_SHIFT 40
#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
#define ID_AA64MMFR1_XNX_SHIFT 28
#define ID_AA64MMFR1_SPECSEI_SHIFT 24
#define ID_AA64MMFR1_PAN_SHIFT 20
#define ID_AA64MMFR1_LOR_SHIFT 16
#define ID_AA64MMFR1_HPD_SHIFT 12
#define ID_AA64MMFR1_VHE_SHIFT 8
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
#define ID_AA64MMFR1_VMIDBITS_8 0
#define ID_AA64MMFR1_VMIDBITS_16 2
#define ID_AA64MMFR1_TIDCP1_NI 0
#define ID_AA64MMFR1_TIDCP1_IMP 1
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_EVT_SHIFT 56
#define ID_AA64MMFR2_BBM_SHIFT 52
#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_IDS_SHIFT 36
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_ST_SHIFT 28
#define ID_AA64MMFR2_NV_SHIFT 24
#define ID_AA64MMFR2_CCIDX_SHIFT 20
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8
#define ID_AA64MMFR2_UAO_SHIFT 4
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
#define ID_AA64DFR0_MTPMU_SHIFT 48
#define ID_AA64DFR0_TRBE_SHIFT 44
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
#define ID_AA64DFR0_BRPS_SHIFT 12
#define ID_AA64DFR0_PMUVER_SHIFT 8
#define ID_AA64DFR0_TRACEVER_SHIFT 4
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
#define ID_AA64DFR0_PMUVER_8_0 0x1
#define ID_AA64DFR0_PMUVER_8_1 0x4
#define ID_AA64DFR0_PMUVER_8_4 0x5
#define ID_AA64DFR0_PMUVER_8_5 0x6
#define ID_AA64DFR0_PMUVER_8_7 0x7
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVER_8_2 0x1
#define ID_AA64DFR0_PMSVER_8_3 0x2
#define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_PERFMON_8_0 0x3
@ -955,20 +799,20 @@
#define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
#elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
#endif
#define MVFR2_FPMISC_SHIFT 4
@ -1028,9 +872,6 @@
#define TRFCR_ELx_ExTRE BIT(1)
#define TRFCR_ELx_E0TRE BIT(0)
/* HCRX_EL2 definitions */
#define HCRX_EL2_SMPME_MASK (1 << 5)
/* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */
#define ICH_MISR_EOI (1 << 0)

View File

@ -18,7 +18,7 @@
struct pt_regs;
void die(const char *msg, struct pt_regs *regs, int err);
void die(const char *msg, struct pt_regs *regs, long err);
struct siginfo;
void arm64_notify_die(const char *str, struct pt_regs *regs,

View File

@ -26,6 +26,9 @@
(void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \
})
extern char vdso_start[], vdso_end[];
extern char vdso32_start[], vdso32_end[];
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_H */

View File

@ -7,8 +7,10 @@
#ifndef __ASSEMBLY__
#include <asm/alternative.h>
#include <asm/barrier.h>
#include <asm/unistd.h>
#include <asm/sysreg.h>
#define VDSO_HAS_CLOCK_GETRES 1
@ -78,11 +80,20 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return 0;
/*
* This isb() is required to prevent that the counter value
* If FEAT_ECV is available, use the self-synchronizing counter.
* Otherwise the isb is required to prevent that the counter value
* is speculated.
*/
isb();
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
*/
asm volatile(
ALTERNATIVE("isb\n"
"mrs %0, cntvct_el0",
"nop\n"
__mrs_s("%0", SYS_CNTVCTSS_EL0),
ARM64_HAS_ECV)
: "=r" (res)
:
: "memory");
arch_counter_enforce_ordering(res);
return res;

View File

@ -92,5 +92,6 @@
#define HWCAP2_SME_FA64 (1 << 30)
#define HWCAP2_WFXT (1UL << 31)
#define HWCAP2_EBF16 (1UL << 32)
#define HWCAP2_SVE_EBF16 (1UL << 33)
#endif /* _UAPI__ASM_HWCAP_H */

View File

@ -10,11 +10,14 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elf.h>
#include <asm/cacheflush.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/module.h>
#include <asm/sections.h>
#include <asm/vdso.h>
#include <linux/stop_machine.h>
#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
@ -192,6 +195,30 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
}
}
void apply_alternatives_vdso(void)
{
struct alt_region region;
const struct elf64_hdr *hdr;
const struct elf64_shdr *shdr;
const struct elf64_shdr *alt;
DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
hdr = (struct elf64_hdr *)vdso_start;
shdr = (void *)hdr + hdr->e_shoff;
alt = find_section(hdr, shdr, ".altinstructions");
if (!alt)
return;
region = (struct alt_region){
.begin = (void *)hdr + alt->sh_offset,
.end = (void *)hdr + alt->sh_offset + alt->sh_size,
};
__apply_alternatives(&region, false, &all_capabilities[0]);
}
/*
* We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here.
@ -225,6 +252,7 @@ static int __apply_alternatives_multi_stop(void *unused)
void __init apply_alternatives_all(void)
{
apply_alternatives_vdso();
/* better not try code patching on a live SMP system */
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}

View File

@ -121,6 +121,22 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
}
static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
static void __maybe_unused
cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
{
struct arm64_ftr_reg *regp;
regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
if (!regp)
return;
raw_spin_lock(&reg_user_mask_modification);
if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
raw_spin_unlock(&reg_user_mask_modification);
}
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@ -691,6 +707,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2658417
{
.desc = "ARM erratum 2658417",
.capability = ARM64_WORKAROUND_2658417,
/* Cortex-A510 r0p0 - r1p1 */
ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
.cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif
{
}

View File

@ -243,35 +243,35 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0),
ARM64_FTR_END,
};
@ -316,9 +316,9 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0),
/*
* Page size not being supported at Stage-2 is not fatal. You
* just give up KVM if PAGE_SIZE isn't supported there. Go fix
@ -334,9 +334,9 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* fields are inconsistent across vCPUs, then it isn't worth
* trying to bring KVM up.
*/
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1),
/*
* We already refuse to boot CPUs that don't support our configured
* page size, so we can only detect mismatches for a page size other
@ -344,55 +344,55 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* exist in the wild so, even though we don't like it, we'll have to go
* along with it and treat them as non-strict.
*/
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0),
/*
* Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs
*/
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TIDCP1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0),
ARM64_FTR_END,
};
@ -434,17 +434,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0),
/*
* We can instantiate multiple PMU instances with different levels
* of support.
*/
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6),
ARM64_FTR_END,
};
@ -750,7 +750,7 @@ static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
* returns - Upon success, matching ftr_reg entry for id.
* - NULL on failure but with an WARN_ON().
*/
static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
{
struct arm64_ftr_reg *reg;
@ -1401,17 +1401,40 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
return val >= entry->min_field_value;
}
static u64
read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope)
{
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
return read_sanitised_ftr_reg(entry->sys_reg);
else
return __read_sysreg_by_encoding(entry->sys_reg);
}
static bool
has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{
int mask;
struct arm64_ftr_reg *regp;
u64 val = read_scoped_sysreg(entry, scope);
regp = get_arm64_ftr_reg(entry->sys_reg);
if (!regp)
return false;
mask = cpuid_feature_extract_unsigned_field_width(regp->user_mask,
entry->field_pos,
entry->field_width);
if (!mask)
return false;
return feature_matches(val, entry);
}
static bool
has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{
u64 val;
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
val = read_sanitised_ftr_reg(entry->sys_reg);
else
val = __read_sysreg_by_encoding(entry->sys_reg);
u64 val = read_scoped_sysreg(entry, scope);
return feature_matches(val, entry);
}
@ -1492,7 +1515,7 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
return cpuid_feature_extract_signed_field(pfr0,
ID_AA64PFR0_FP_SHIFT) < 0;
ID_AA64PFR0_EL1_FP_SHIFT) < 0;
}
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
@ -1571,7 +1594,7 @@ bool kaslr_requires_kpti(void)
if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
if (cpuid_feature_extract_unsigned_field(mmfr2,
ID_AA64MMFR2_E0PD_SHIFT))
ID_AA64MMFR2_EL1_E0PD_SHIFT))
return false;
}
@ -2093,7 +2116,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
.field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@ -2104,7 +2127,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR0_EL1,
.field_pos = ID_AA64MMFR0_ECV_SHIFT,
.field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@ -2116,7 +2139,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
.field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@ -2130,7 +2153,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
.field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 3,
@ -2168,9 +2191,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_32bit_el0,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.field_pos = ID_AA64PFR0_EL1_EL0_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
.min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
},
#ifdef CONFIG_KVM
{
@ -2180,9 +2203,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL1_SHIFT,
.field_pos = ID_AA64PFR0_EL1_EL1_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
.min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
},
{
.desc = "Protected KVM",
@ -2201,7 +2224,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
* more details.
*/
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_CSV3_SHIFT,
.field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT,
.field_width = 4,
.min_field_value = 1,
.matches = unmap_kernel_at_el0,
@ -2244,9 +2267,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SVE,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_SVE_SHIFT,
.field_pos = ID_AA64PFR0_EL1_SVE_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR0_SVE,
.min_field_value = ID_AA64PFR0_EL1_SVE_IMP,
.matches = has_cpuid_feature,
.cpu_enable = sve_kernel_enable,
},
@ -2259,9 +2282,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_RAS_SHIFT,
.field_pos = ID_AA64PFR0_EL1_RAS_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR0_RAS_V1,
.min_field_value = ID_AA64PFR0_EL1_RAS_IMP,
.cpu_enable = cpu_clear_disr,
},
#endif /* CONFIG_ARM64_RAS_EXTN */
@ -2278,9 +2301,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_amu,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_AMU_SHIFT,
.field_pos = ID_AA64PFR0_EL1_AMU_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR0_AMU,
.min_field_value = ID_AA64PFR0_EL1_AMU_IMP,
.cpu_enable = cpu_amu_enable,
},
#endif /* CONFIG_ARM64_AMU_EXTN */
@ -2303,7 +2326,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_STAGE2_FWB,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_FWB_SHIFT,
.field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT,
.field_width = 4,
.min_field_value = 1,
.matches = has_cpuid_feature,
@ -2314,7 +2337,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_ARMv8_4_TTL,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_TTL_SHIFT,
.field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT,
.field_width = 4,
.min_field_value = 1,
.matches = has_cpuid_feature,
@ -2344,7 +2367,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HW_DBM,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR1_HADBS_SHIFT,
.field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT,
.field_width = 4,
.min_field_value = 2,
.matches = has_hw_dbm,
@ -2367,10 +2390,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_SSBS_SHIFT,
.field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
.min_field_value = ID_AA64PFR1_EL1_SSBS_IMP,
},
#ifdef CONFIG_ARM64_CNP
{
@ -2380,7 +2403,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_useable_cnp,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_CNP_SHIFT,
.field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT,
.field_width = 4,
.min_field_value = 1,
.cpu_enable = cpu_enable_cnp,
@ -2485,7 +2508,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = can_use_gic_priorities,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
.field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
.field_width = 4,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@ -2499,7 +2522,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_width = 4,
.field_pos = ID_AA64MMFR2_E0PD_SHIFT,
.field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT,
.matches = has_cpuid_feature,
.min_field_value = 1,
.cpu_enable = cpu_enable_e0pd,
@ -2528,9 +2551,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.cpu_enable = bti_enable,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_BT_SHIFT,
.field_pos = ID_AA64PFR1_EL1_BT_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR1_BT_BTI,
.min_field_value = ID_AA64PFR1_EL1_BT_IMP,
.sign = FTR_UNSIGNED,
},
#endif
@ -2541,9 +2564,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT,
.field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR1_MTE,
.min_field_value = ID_AA64PFR1_EL1_MTE_MTE2,
.sign = FTR_UNSIGNED,
.cpu_enable = cpu_enable_mte,
},
@ -2553,9 +2576,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT,
.field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR1_MTE_ASYMM,
.min_field_value = ID_AA64PFR1_EL1_MTE_MTE3,
.sign = FTR_UNSIGNED,
},
#endif /* CONFIG_ARM64_MTE */
@ -2577,9 +2600,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SME,
.sys_reg = SYS_ID_AA64PFR1_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR1_SME_SHIFT,
.field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64PFR1_SME,
.min_field_value = ID_AA64PFR1_EL1_SME_IMP,
.matches = has_cpuid_feature,
.cpu_enable = sme_kernel_enable,
},
@ -2614,9 +2637,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR1_TIDCP1_SHIFT,
.field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT,
.field_width = 4,
.min_field_value = ID_AA64MMFR1_TIDCP1_IMP,
.min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP,
.matches = has_cpuid_feature,
.cpu_enable = cpu_trap_el0_impdef,
},
@ -2624,7 +2647,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
};
#define HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \
.matches = has_cpuid_feature, \
.matches = has_user_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
.field_width = width, \
@ -2708,11 +2731,11 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
@ -2725,38 +2748,39 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_EL1_SVE_IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BitPerm_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
#endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_SSBS2, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_BTI
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_IMP, CAP_HWCAP, KERNEL_HWCAP_BTI),
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
#ifdef CONFIG_ARM64_MTE
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3),
#endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
@ -3102,7 +3126,7 @@ static void verify_hyp_capabilities(void)
/* Verify IPA range */
parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT);
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
if (ipa_max < get_kvm_ipa_limit()) {
pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());

View File

@ -115,6 +115,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_FA64] = "smefa64",
[KERNEL_HWCAP_WFXT] = "wfxt",
[KERNEL_HWCAP_EBF16] = "ebf16",
[KERNEL_HWCAP_SVE_EBF16] = "sveebf16",
};
#ifdef CONFIG_COMPAT

View File

@ -28,7 +28,7 @@
u8 debug_monitors_arch(void)
{
return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
ID_AA64DFR0_DEBUGVER_SHIFT);
ID_AA64DFR0_EL1_DebugVer_SHIFT);
}
/*

View File

@ -379,11 +379,20 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
static void noinstr el1_undef(struct pt_regs *regs)
static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_undefinstr(regs);
do_undefinstr(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_bti(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
@ -402,7 +411,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_ptrauth_fault(regs, esr);
do_el1_fpac(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
@ -425,7 +434,10 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
break;
case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_UNKNOWN:
el1_undef(regs);
el1_undef(regs, esr);
break;
case ESR_ELx_EC_BTI:
el1_bti(regs, esr);
break;
case ESR_ELx_EC_BREAKPT_CUR:
case ESR_ELx_EC_SOFTSTP_CUR:
@ -582,11 +594,11 @@ static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs);
}
static void noinstr el0_undef(struct pt_regs *regs)
static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_undefinstr(regs);
do_undefinstr(regs, esr);
exit_to_user_mode(regs);
}
@ -594,7 +606,7 @@ static void noinstr el0_bti(struct pt_regs *regs)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_bti(regs);
do_el0_bti(regs);
exit_to_user_mode(regs);
}
@ -629,7 +641,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX);
do_ptrauth_fault(regs, esr);
do_el0_fpac(regs, esr);
exit_to_user_mode(regs);
}
@ -670,7 +682,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
el0_pc(regs, esr);
break;
case ESR_ELx_EC_UNKNOWN:
el0_undef(regs);
el0_undef(regs, esr);
break;
case ESR_ELx_EC_BTI:
el0_bti(regs);
@ -788,7 +800,7 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_CP14_MR:
case ESR_ELx_EC_CP14_LS:
case ESR_ELx_EC_CP14_64:
el0_undef(regs);
el0_undef(regs, esr);
break;
case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64:

View File

@ -217,11 +217,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old = 0, new;
new = aarch64_insn_gen_nop();
/*
* When using mcount, callsites in modules may have been initalized to
* call an arbitrary module PLT (which redirects to the _mcount stub)
* rather than the ftrace PLT we'll use at runtime (which redirects to
* the ftrace trampoline). We can ignore the old PLT when initializing
* the callsite.
*
* Note: 'mod' is only set at module load time.
*/
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
return aarch64_insn_patch_text_nosync((void *)pc, new);
}
if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL;
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
}

View File

@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry)
*/
#if VA_BITS > 48
mrs_s x0, SYS_ID_AA64MMFR2_EL1
tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT
tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
mov x0, #VA_BITS
mov x25, #VA_BITS_MIN
csel x25, x25, x0, eq
@ -656,10 +656,10 @@ SYM_FUNC_END(__secondary_too_slow)
*/
SYM_FUNC_START(__enable_mmu)
mrs x3, ID_AA64MMFR0_EL1
ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN
b.lt __no_granule_support
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support
phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0
@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
b.ne 2f
mrs_s x0, SYS_ID_AA64MMFR2_EL1
and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz x0, 2f
update_early_cpu_boot_status \

View File

@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync)
SYM_CODE_END(elx_sync)
SYM_CODE_START_LOCAL(__finalise_el2)
check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve
check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve
.Linit_sve: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps
@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_ZCR_EL2, x1 // length for EL1.
.Lskip_sve:
check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme
check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme
.Linit_sme: /* SME register access and priority mapping */
mrs x0, cptr_el2 // Disable SME traps
@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x1, .Lskip_sme
mrs_s x1, SYS_HCRX_EL2
@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
tbnz x1, #0, 1f
// Needs to be VHE capable, obviously
check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f
check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f
1: mov_q x0, HVC_STUB_ERR
eret

View File

@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = {
.name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override,
.fields = {
FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter),
FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter),
{}
},
};
@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = {
.name = "id_aa64pfr0",
.override = &id_aa64pfr0_override,
.fields = {
FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter),
FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
{}
},
};
@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = {
.name = "id_aa64pfr1",
.override = &id_aa64pfr1_override,
.fields = {
FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ),
FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL),
FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter),
FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ),
FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
{}
},
};

View File

@ -37,7 +37,8 @@ struct plt_entry get_plt_entry(u64 dst, void *pc)
return plt;
}
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
static bool plt_entries_equal(const struct plt_entry *a,
const struct plt_entry *b)
{
u64 p, q;

View File

@ -476,21 +476,6 @@ overflow:
return -ENOEXEC;
}
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (strcmp(name, secstrs + s->sh_name) == 0)
return s;
}
return NULL;
}
static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
{
*plt = get_plt_entry(addr, plt);

View File

@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{
return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
}
static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT);
if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0)
ID_AA64DFR0_EL1_PMUVer_SHIFT);
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0)
return;
cpu_pmu->pmuver = pmuver;
@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR_EL1 register for sysfs */
if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
else
cpu_pmu->reg_pmmir = 0;

View File

@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
/* If the CPU has CSV2 set, we're safe */
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */
@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope)
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_ECBHB_SHIFT);
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
}
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,

View File

@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
on_irq_stack(addr, sizeof(unsigned long), NULL);
on_irq_stack(addr, sizeof(unsigned long));
}
/**
@ -666,10 +666,18 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
static int tls_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
int ret;
if (target == current)
tls_preserve_current_state();
return membuf_store(&to, target->thread.uw.tp_value);
ret = membuf_store(&to, target->thread.uw.tp_value);
if (system_supports_tpidr2())
ret = membuf_store(&to, target->thread.tpidr2_el0);
else
ret = membuf_zero(&to, sizeof(u64));
return ret;
}
static int tls_set(struct task_struct *target, const struct user_regset *regset,
@ -677,13 +685,20 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
unsigned long tls = target->thread.uw.tp_value;
unsigned long tls[2];
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
tls[0] = target->thread.uw.tp_value;
if (system_supports_sme())
tls[1] = target->thread.tpidr2_el0;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
if (ret)
return ret;
target->thread.uw.tp_value = tls;
target->thread.uw.tp_value = tls[0];
if (system_supports_sme())
target->thread.tpidr2_el0 = tls[1];
return ret;
}
@ -1392,7 +1407,7 @@ static const struct user_regset aarch64_regsets[] = {
},
[REGSET_TLS] = {
.core_note_type = NT_ARM_TLS,
.n = 1,
.n = 2,
.size = sizeof(void *),
.align = sizeof(void *),
.regset_get = tls_get,

View File

@ -162,38 +162,6 @@ static int init_sdei_scs(void)
return err;
}
static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
}
static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
}
bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
if (on_sdei_critical_stack(sp, size, info))
return true;
if (on_sdei_normal_stack(sp, size, info))
return true;
return false;
}
unsigned long sdei_arch_get_entry_point(int conduit)
{
/*

View File

@ -67,31 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state,
state->pc = thread_saved_pc(task);
}
/*
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
if (on_task_stack(tsk, sp, size, info))
return true;
if (tsk != current || preemptible())
return false;
if (on_irq_stack(sp, size, info))
return true;
if (on_overflow_stack(sp, size, info))
return true;
if (on_sdei_stack(sp, size, info))
return true;
return false;
}
/*
* Unwind from one frame record (A) to the next frame record (B).
*
@ -103,14 +78,13 @@ static int notrace unwind_next(struct unwind_state *state)
{
struct task_struct *tsk = state->task;
unsigned long fp = state->fp;
struct stack_info info;
int err;
/* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
err = unwind_next_common(state, &info, on_accessible_stack, NULL);
err = unwind_next_frame_record(state);
if (err)
return err;
@ -190,11 +164,47 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
barrier();
}
/*
* Per-cpu stacks are only accessible when unwinding the current task in a
* non-preemptible context.
*/
#define STACKINFO_CPU(name) \
({ \
((task == current) && !preemptible()) \
? stackinfo_get_##name() \
: stackinfo_get_unknown(); \
})
/*
* SDEI stacks are only accessible when unwinding the current task in an NMI
* context.
*/
#define STACKINFO_SDEI(name) \
({ \
((task == current) && in_nmi()) \
? stackinfo_get_sdei_##name() \
: stackinfo_get_unknown(); \
})
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
struct unwind_state state;
struct stack_info stacks[] = {
stackinfo_get_task(task),
STACKINFO_CPU(irq),
#if defined(CONFIG_VMAP_STACK)
STACKINFO_CPU(overflow),
#endif
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
STACKINFO_SDEI(normal),
STACKINFO_SDEI(critical),
#endif
};
struct unwind_state state = {
.stacks = stacks,
.nr_stacks = ARRAY_SIZE(stacks),
};
if (regs) {
if (task != current)

View File

@ -180,12 +180,12 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
#define S_SMP " SMP"
static int __die(const char *str, int err, struct pt_regs *regs)
static int __die(const char *str, long err, struct pt_regs *regs)
{
static int die_counter;
int ret;
pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
@ -206,7 +206,7 @@ static DEFINE_RAW_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
*/
void die(const char *str, struct pt_regs *regs, int err)
void die(const char *str, struct pt_regs *regs, long err)
{
int ret;
unsigned long flags;
@ -485,7 +485,7 @@ void arm64_notify_segfault(unsigned long addr)
force_signal_inject(SIGSEGV, code, addr, 0);
}
void do_undefinstr(struct pt_regs *regs)
void do_undefinstr(struct pt_regs *regs, unsigned long esr)
{
/* check for AArch32 breakpoint instructions */
if (!aarch32_break_handler(regs))
@ -494,28 +494,38 @@ void do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
BUG_ON(!user_mode(regs));
if (!user_mode(regs))
die("Oops - Undefined instruction", regs, esr);
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
NOKPROBE_SYMBOL(do_undefinstr);
void do_bti(struct pt_regs *regs)
void do_el0_bti(struct pt_regs *regs)
{
BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
}
NOKPROBE_SYMBOL(do_bti);
void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
void do_el1_bti(struct pt_regs *regs, unsigned long esr)
{
die("Oops - BTI", regs, esr);
}
NOKPROBE_SYMBOL(do_el1_bti);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
{
/*
* Unexpected FPAC exception or pointer authentication failure in
* the kernel: kill the task before it does any more harm.
*/
BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
}
NOKPROBE_SYMBOL(do_ptrauth_fault);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
{
/*
* Unexpected FPAC exception in the kernel: kill the task before it
* does any more harm.
*/
die("Oops - FPAC", regs, esr);
}
NOKPROBE_SYMBOL(do_el1_fpac)
#define __user_cache_maint(insn, address, res) \
if (address >= TASK_SIZE_MAX) { \
@ -758,7 +768,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs)
hook_base = cp15_64_hooks;
break;
default:
do_undefinstr(regs);
do_undefinstr(regs, esr);
return;
}
@ -773,7 +783,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs)
* EL0. Fall back to our usual undefined instruction handler
* so that we handle these consistently.
*/
do_undefinstr(regs);
do_undefinstr(regs, esr);
}
NOKPROBE_SYMBOL(do_cp15instr);
#endif
@ -793,7 +803,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs)
* back to our usual undefined instruction handler so that we handle
* these consistently.
*/
do_undefinstr(regs);
do_undefinstr(regs, esr);
}
NOKPROBE_SYMBOL(do_sysinstr);
@ -970,7 +980,7 @@ static int bug_handler(struct pt_regs *regs, unsigned long esr)
{
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
die("Oops - BUG", regs, 0);
die("Oops - BUG", regs, esr);
break;
case BUG_TRAP_TYPE_WARN:
@ -1038,7 +1048,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr)
* This is something that might be fixed at some point in the future.
*/
if (!recover)
die("Oops - KASAN", regs, 0);
die("Oops - KASAN", regs, esr);
/* If thread survives, skip over the brk instruction and continue: */
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);

View File

@ -29,9 +29,6 @@
#include <asm/signal32.h>
#include <asm/vdso.h>
extern char vdso_start[], vdso_end[];
extern char vdso32_start[], vdso32_end[];
enum vdso_abi {
VDSO_ABI_AA64,
VDSO_ABI_AA32,

View File

@ -48,6 +48,13 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
.dynamic : { *(.dynamic) } :text :dynamic
.rela.dyn : ALIGN(8) { *(.rela .rela*) }

View File

@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
* If SPE is present on this CPU and is available at current EL,
* we may need to check if the host state needs to be saved.
*/
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
/* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
}

View File

@ -35,9 +35,9 @@
* - Data Independent Timing
*/
#define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
)
/*
@ -49,11 +49,11 @@
* Supported by KVM
*/
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \
)
/*
@ -62,8 +62,8 @@
* - Speculative Store Bypassing
*/
#define PVM_ID_AA64PFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \
ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \
ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \
ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
)
/*
@ -74,10 +74,10 @@
* - Non-context synchronizing exception entry and exit
*/
#define PVM_ID_AA64MMFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
)
/*
@ -86,8 +86,8 @@
* - 16-bit ASID
*/
#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
)
/*
@ -100,12 +100,12 @@
* - Enhanced Translation Synchronization
*/
#define PVM_ID_AA64MMFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \
ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) \
)
/*
@ -120,14 +120,14 @@
* - E0PDx mechanism
*/
#define PVM_ID_AA64MMFR2_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \
)
/*

View File

@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0;
/* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
/*
* Linux guests assume support for floating-point and Advanced SIMD. Do
* not change the trapping behavior for these from the KVM default.
*/
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
PVM_ID_AA64PFR0_ALLOW));
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
PVM_ID_AA64PFR0_ALLOW));
/* Trap RAS unless all current versions are supported */
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) <
ID_AA64PFR0_RAS_V1P1) {
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
ID_AA64PFR0_EL1_RAS_V1P1) {
hcr_set |= HCR_TERR | HCR_TEA;
hcr_clear |= HCR_FIEN;
}
/* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) {
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM;
}
/* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids))
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
cptr_set |= CPTR_EL2_TZ;
vcpu->arch.hcr_el2 |= hcr_set;
@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
u64 hcr_clear = 0;
/* Memory Tagging: Trap and Treat as Untagged if not supported. */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) {
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
hcr_set |= HCR_TID5;
hcr_clear |= HCR_DCT | HCR_ATA;
}
@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0;
/* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) {
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
MDCR_EL2_HPMN_MASK;
}
/* Trap Debug */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids))
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
/* Trap OS Double Lock */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids))
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
mdcr_set |= MDCR_EL2_TDOSA;
/* Trap SPE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) {
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPMS;
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
}
/* Trap Trace Filter */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids))
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids))
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
cptr_set |= CPTR_EL2_TTA;
vcpu->arch.mdcr_el2 |= mdcr_set;
@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
u64 mdcr_set = 0;
/* Trap Debug Communications Channel registers */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids))
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
mdcr_set |= MDCR_EL2_TDCC;
vcpu->arch.mdcr_el2 |= mdcr_set;
@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
u64 hcr_set = 0;
/* Trap LOR */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids))
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
hcr_set |= HCR_TLOR;
vcpu->arch.hcr_el2 |= hcr_set;

View File

@ -39,41 +39,32 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
static bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
static struct stack_info stackinfo_get_overflow(void)
{
unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
return (struct stack_info) {
.low = low,
.high = high,
};
}
static bool on_hyp_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
static struct stack_info stackinfo_get_hyp(void)
{
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
unsigned long high = params->stack_hyp_va;
unsigned long low = high - PAGE_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
}
static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
return (on_overflow_stack(sp, size, info) ||
on_hyp_stack(sp, size, info));
return (struct stack_info) {
.low = low,
.high = high,
};
}
static int unwind_next(struct unwind_state *state)
{
struct stack_info info;
return unwind_next_common(state, &info, on_accessible_stack, NULL);
return unwind_next_frame_record(state);
}
static void notrace unwind(struct unwind_state *state,
@ -129,7 +120,14 @@ static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
*/
static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
{
struct unwind_state state;
struct stack_info stacks[] = {
stackinfo_get_overflow(),
stackinfo_get_hyp(),
};
struct unwind_state state = {
.stacks = stacks,
.nr_stacks = ARRAY_SIZE(stacks),
};
int idx = 0;
kvm_nvhe_unwind_init(&state, fp, pc);

View File

@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
/* Spectre and Meltdown mitigation in KVM */
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2),
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)kvm->arch.pfr0_csv2);
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3),
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)kvm->arch.pfr0_csv3);
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
if (!kvm_has_mte(kvm))
allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
return id_aa64pfr1_el1_sys_val & allow_mask;
}
@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
* No support for AArch32 guests, therefore, pKVM has no sanitized copy
* of AArch32 feature id registers.
*/
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
return pvm_access_raz_wi(vcpu, p, r);
}

View File

@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data {
static bool kvm_phys_is_valid(u64 phys)
{
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
}
static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)

View File

@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) {
case ID_AA64DFR0_PMUVER_8_0:
case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0);
case ID_AA64DFR0_PMUVER_8_1:
case ID_AA64DFR0_PMUVER_8_4:
case ID_AA64DFR0_PMUVER_8_5:
case ID_AA64DFR0_PMUVER_8_7:
case ID_AA64DFR0_EL1_PMUVer_V3P1:
case ID_AA64DFR0_EL1_PMUVer_V3P4:
case ID_AA64DFR0_EL1_PMUVer_V3P5:
case ID_AA64DFR0_EL1_PMUVer_V3P7:
return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{
struct arm_pmu_entry *entry;
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
return;
mutex_lock(&arm_pmus_lock);
@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
if (event->pmu) {
pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == 0 ||
pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL;
}
@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ
*/
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4)
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32;
}

View File

@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT);
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
/*
* IPA size beyond 48 bits could not be supported
* on either 4K or 16K page size. Hence let's cap
@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void)
* on the system.
*/
if (PAGE_SIZE != SZ_64K)
parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
/*
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
* Stage-2. If not, things will stop very quickly.
*/
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break;
default:

View File

@ -21,6 +21,54 @@
#include <asm/stacktrace/nvhe.h>
static struct stack_info stackinfo_get_overflow(void)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
unsigned long high = low + OVERFLOW_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static struct stack_info stackinfo_get_overflow_kern_va(void)
{
unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static struct stack_info stackinfo_get_hyp(void)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base;
unsigned long high = low + PAGE_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static struct stack_info stackinfo_get_hyp_kern_va(void)
{
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
unsigned long high = low + PAGE_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
/*
* kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
*
@ -34,73 +82,45 @@
* Returns true on success and updates @addr to its corresponding kernel VA;
* otherwise returns false.
*/
static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
enum stack_type type)
static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info;
unsigned long hyp_base, kern_base, hyp_offset;
struct stack_info stack_hyp, stack_kern;
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
stack_hyp = stackinfo_get_hyp();
stack_kern = stackinfo_get_hyp_kern_va();
if (stackinfo_on_stack(&stack_hyp, *addr, size))
goto found;
switch (type) {
case STACK_TYPE_HYP:
kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
hyp_base = (unsigned long)stacktrace_info->stack_base;
break;
case STACK_TYPE_OVERFLOW:
kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
break;
default:
return false;
}
stack_hyp = stackinfo_get_overflow();
stack_kern = stackinfo_get_overflow_kern_va();
if (stackinfo_on_stack(&stack_hyp, *addr, size))
goto found;
hyp_offset = *addr - hyp_base;
*addr = kern_base + hyp_offset;
return false;
found:
*addr = *addr - stack_hyp.low + stack_kern.low;
return true;
}
static bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
/*
* Convert a KVN nVHE HYP frame record address to a kernel VA
*/
static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}
static bool on_hyp_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base;
unsigned long high = low + PAGE_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
}
static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
return (on_overflow_stack(sp, size, info) ||
on_hyp_stack(sp, size, info));
return kvm_nvhe_stack_kern_va(addr, 16);
}
static int unwind_next(struct unwind_state *state)
{
struct stack_info info;
/*
* The FP is in the hypervisor VA space. Convert it to the kernel VA
* space so it can be unwound by the regular unwind functions.
*/
if (!kvm_nvhe_stack_kern_record_va(&state->fp))
return -EINVAL;
return unwind_next_common(state, &info, on_accessible_stack,
kvm_nvhe_stack_kern_va);
return unwind_next_frame_record(state);
}
static void unwind(struct unwind_state *state,
@ -158,7 +178,14 @@ static void kvm_nvhe_dump_backtrace_end(void)
static void hyp_dump_backtrace(unsigned long hyp_offset)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info;
struct unwind_state state;
struct stack_info stacks[] = {
stackinfo_get_overflow_kern_va(),
stackinfo_get_hyp_kern_va(),
};
struct unwind_state state = {
.stacks = stacks,
.nr_stacks = ARRAY_SIZE(stacks),
};
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);

View File

@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
kvm_inject_undefined(vcpu);
return false;
}
@ -1077,22 +1077,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
switch (id) {
case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
}
break;
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
ID_AA64DFR0_EL1_PMUVer_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
/* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break;
case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */
@ -1196,21 +1196,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* it doesn't promise more than what is actually provided (the
* guest could otherwise be covered in ectoplasmic residue).
*/
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
if (csv2 > 1 ||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL;
/* Same thing for CSV3 */
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
if (csv3 > 1 ||
(csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
return -EINVAL;
/* We can only differ with CSV[23], and anything else is an error */
val ^= read_id_reg(vcpu, rd, false);
val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
(0xFUL << ID_AA64PFR0_CSV3_SHIFT));
val &= ~((0xFUL << ID_AA64PFR0_EL1_CSV2_SHIFT) |
(0xFUL << ID_AA64PFR0_EL1_CSV3_SHIFT));
if (val)
return -EINVAL;
@ -1825,11 +1825,11 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
} else {
u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
| (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
return true;
}

View File

@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void)
{
u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
ID_AA64MMFR0_ASID_SHIFT);
ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) {
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
fallthrough;
case ID_AA64MMFR0_ASID_8:
case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8;
break;
case ID_AA64MMFR0_ASID_16:
case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16;
}

View File

@ -360,7 +360,7 @@ void __init arm64_memblock_init(void)
extern u16 memstart_offset_seed;
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size -
BIT(id_aa64mmfr0_parange_to_phys_shift(parange));

View File

@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void)
pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
return cpuid_feature_extract_unsigned_field(pfr1,
ID_AA64PFR1_BT_SHIFT);
ID_AA64PFR1_EL1_BT_SHIFT);
}
/*

View File

@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup)
* (ID_AA64PFR1_EL1[11:8] > 1).
*/
mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE
ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2
b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */

View File

@ -68,6 +68,7 @@ WORKAROUND_2038923
WORKAROUND_2064142
WORKAROUND_2077057
WORKAROUND_2457168
WORKAROUND_2658417
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE

View File

@ -46,6 +46,127 @@
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else.
Sysreg ID_AA64PFR0_EL1 3 0 0 4 0
Enum 63:60 CSV3
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CSV2
0b0000 NI
0b0001 IMP
0b0010 CSV2_2
0b0011 CSV2_3
EndEnum
Enum 55:52 RME
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 DIT
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AMU
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 43:40 MPAM
0b0000 0
0b0001 1
EndEnum
Enum 39:36 SEL2
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 SVE
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 RAS
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 27:24 GIC
0b0000 NI
0b0001 IMP
0b0010 V4P1
EndEnum
Enum 23:20 AdvSIMD
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 19:16 FP
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 15:12 EL3
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 11:8 EL2
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 7:4 EL1
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 3:0 EL0
0b0001 IMP
0b0010 AARCH32
EndEnum
EndSysreg
Sysreg ID_AA64PFR1_EL1 3 0 0 4 1
Res0 63:40
Enum 39:36 NMI
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 CSV2_frac
0b0000 NI
0b0001 CSV2_1p1
0b0010 CSV2_1p2
EndEnum
Enum 31:28 RNDR_trap
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SME
0b0000 NI
0b0001 IMP
EndEnum
Res0 23:20
Enum 19:16 MPAM_frac
0b0000 MINOR_0
0b0001 MINOR_1
EndEnum
Enum 15:12 RAS_frac
0b0000 NI
0b0001 RASv1p1
EndEnum
Enum 11:8 MTE
0b0000 NI
0b0001 IMP
0b0010 MTE2
0b0011 MTE3
EndEnum
Enum 7:4 SSBS
0b0000 NI
0b0001 IMP
0b0010 SSBS2
EndEnum
Enum 3:0 BT
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4
Res0 63:60
Enum 59:56 F64MM
@ -98,7 +219,9 @@ Enum 63 FA64
0b1 IMP
EndEnum
Res0 62:60
Field 59:56 SMEver
Enum 59:56 SMEver
0b0000 IMP
EndEnum
Enum 55:52 I16I64
0b0000 NI
0b1111 IMP
@ -129,6 +252,89 @@ EndEnum
Res0 31:0
EndSysreg
Sysreg ID_AA64DFR0_EL1 3 0 0 5 0
Enum 63:60 HPMN0
0b0000 UNPREDICTABLE
0b0001 DEF
EndEnum
Res0 59:56
Enum 55:52 BRBE
0b0000 NI
0b0001 IMP
0b0010 BRBE_V1P1
EndEnum
Enum 51:48 MTPMU
0b0000 NI_IMPDEF
0b0001 IMP
0b1111 NI
EndEnum
Enum 47:44 TraceBuffer
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TraceFilt
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 DoubleLock
0b0000 IMP
0b1111 NI
EndEnum
Enum 35:32 PMSVer
0b0000 NI
0b0001 IMP
0b0010 V1P1
0b0011 V1P2
0b0100 V1P3
EndEnum
Field 31:28 CTX_CMPs
Res0 27:24
Field 23:20 WRPs
Res0 19:16
Field 15:12 BRPs
Enum 11:8 PMUVer
0b0000 NI
0b0001 IMP
0b0100 V3P1
0b0101 V3P4
0b0110 V3P5
0b0111 V3P7
0b1000 V3P8
0b1111 IMP_DEF
EndEnum
Enum 7:4 TraceVer
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 DebugVer
0b0110 IMP
0b0111 VHE
0b1000 V8P2
0b1001 V8P4
0b1010 V8P8
EndEnum
EndSysreg
Sysreg ID_AA64DFR1_EL1 3 0 0 5 1
Res0 63:0
EndSysreg
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
Res0 63:32
Field 31:28 IMPDEF7
Field 27:24 IMPDEF6
Field 23:20 IMPDEF5
Field 19:16 IMPDEF4
Field 15:12 IMPDEF3
Field 11:8 IMPDEF2
Field 7:4 IMPDEF1
Field 3:0 IMPDEF0
EndSysreg
Sysreg ID_AA64AFR1_EL1 3 0 0 5 5
Res0 63:0
EndSysreg
Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0
Enum 63:60 RNDR
0b0000 NI
@ -313,6 +519,217 @@ Enum 3:0 WFxT
EndEnum
EndSysreg
Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0
Enum 63:60 ECV
0b0000 NI
0b0001 IMP
0b0010 CNTPOFF
EndEnum
Enum 59:56 FGT
0b0000 NI
0b0001 IMP
EndEnum
Res0 55:48
Enum 47:44 EXS
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TGRAN4_2
0b0000 TGRAN4
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 39:36 TGRAN64_2
0b0000 TGRAN64
0b0001 NI
0b0010 IMP
EndEnum
Enum 35:32 TGRAN16_2
0b0000 TGRAN16
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 31:28 TGRAN4
0b0000 IMP
0b0001 52_BIT
0b1111 NI
EndEnum
Enum 27:24 TGRAN64
0b0000 IMP
0b1111 NI
EndEnum
Enum 23:20 TGRAN16
0b0000 NI
0b0001 IMP
0b0010 52_BIT
EndEnum
Enum 19:16 BIGENDEL0
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 SNSMEM
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 BIGEND
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 ASIDBITS
0b0000 8
0b0010 16
EndEnum
Enum 3:0 PARANGE
0b0000 32
0b0001 36
0b0010 40
0b0011 42
0b0100 44
0b0101 48
0b0110 52
EndEnum
EndSysreg
Sysreg ID_AA64MMFR1_EL1 3 0 0 7 1
Enum 63:60 ECBHB
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CMOW
0b0000 NI
0b0001 IMP
EndEnum
Enum 55:52 TIDCP1
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 nTLBPA
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AFP
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 HCX
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 ETS
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 TWED
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 XNX
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SpecSEI
0b0000 NI
0b0001 IMP
EndEnum
Enum 23:20 PAN
0b0000 NI
0b0001 IMP
0b0010 PAN2
0b0011 PAN3
EndEnum
Enum 19:16 LO
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 HPDS
0b0000 NI
0b0001 IMP
0b0010 HPDS2
EndEnum
Enum 11:8 VH
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 VMIDBits
0b0000 8
0b0010 16
EndEnum
Enum 3:0 HAFDBS
0b0000 NI
0b0001 AF
0b0010 DBM
EndEnum
EndSysreg
Sysreg ID_AA64MMFR2_EL1 3 0 0 7 2
Enum 63:60 E0PD
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 EVT
0b0000 NI
0b0001 IMP
0b0010 TTLBxS
EndEnum
Enum 55:52 BBM
0b0000 0
0b0001 1
0b0010 2
EndEnum
Enum 51:48 TTL
0b0000 NI
0b0001 IMP
EndEnum
Res0 47:44
Enum 43:40 FWB
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 IDS
0b0000 0x0
0b0001 0x18
EndEnum
Enum 35:32 AT
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 ST
0b0000 39
0b0001 48_47
EndEnum
Enum 27:24 NV
0b0000 NI
0b0001 IMP
0b0010 NV2
EndEnum
Enum 23:20 CCIDX
0b0000 32
0b0001 64
EndEnum
Enum 19:16 VARange
0b0000 48
0b0001 52
EndEnum
Enum 15:12 IESB
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 LSM
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 UAO
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 CnP
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg SCTLR_EL1 3 0 1 0 0
Field 63 TIDCP
Field 62 SPINMASK
@ -427,6 +844,12 @@ Sysreg SMCR_EL1 3 0 1 2 6
Fields SMCR_ELx
EndSysreg
Sysreg ALLINT 3 0 4 3 0
Res0 63:14
Field 13 ALLINT
Res0 12:0
EndSysreg
Sysreg FAR_EL1 3 0 6 0 0
Field 63:0 ADDR
EndSysreg
@ -440,6 +863,14 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
Sysreg TPIDR_EL1 3 0 13 0 4
Field 63:0 ThreadID
EndSysreg
Sysreg SCXTNUM_EL1 3 0 13 0 7
Field 63:0 SoftwareContextNumber
EndSysreg
Sysreg CLIDR_EL1 3 1 0 0 1
Res0 63:47
Field 46:33 Ttypen
@ -514,6 +945,22 @@ Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx
EndSysreg
Sysreg HCRX_EL2 3 4 1 2 2
Res0 63:12
Field 11 MSCEn
Field 10 MCE2
Field 9 CMOW
Field 8 VFNMI
Field 7 VINMI
Field 6 TALLINT
Field 5 SMPME
Field 4 FGTnXS
Field 3 FnXS
Field 2 EnASR
Field 1 EnALS
Field 0 EnAS0
EndSysreg
Sysreg SMPRIMAP_EL2 3 4 1 2 5
Field 63:60 P15
Field 59:56 P14

View File

@ -23,8 +23,8 @@ efi_status_t check_platform_features(void)
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return EFI_SUCCESS;
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else

View File

@ -966,7 +966,7 @@ static inline bool cpu_supports_sysreg_trace(void)
{
u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
return ((dfr0 >> ID_AA64DFR0_EL1_TraceVer_SHIFT) & 0xfUL) > 0;
}
static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
@ -1054,7 +1054,7 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
u64 trfcr;
drvdata->trfcr = 0;
if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT))
return;
/*

View File

@ -20,7 +20,8 @@
static inline bool is_trbe_available(void)
{
u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT);
unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0,
ID_AA64DFR0_EL1_TraceBuffer_SHIFT);
return trbe >= 0b0001;
}

View File

@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
}
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
cd->ttbr = virt_to_phys(mm->pgd);
@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
* addresses larger than what we support.
*/
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld);
if (smmu->oas < oas)
return false;
/* We can support bigger ASIDs than the CPU, but not smaller */
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits)
return false;

View File

@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void)
{
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3;
}

View File

@ -674,9 +674,9 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
static u64 arm_spe_pmsevfr_res0(u16 pmsver)
{
switch (pmsver) {
case ID_AA64DFR0_PMSVER_8_2:
case ID_AA64DFR0_EL1_PMSVer_IMP:
return SYS_PMSEVFR_EL1_RES0_8_2;
case ID_AA64DFR0_PMSVER_8_3:
case ID_AA64DFR0_EL1_PMSVer_V1P1:
/* Return the highest version we support in default */
default:
return SYS_PMSEVFR_EL1_RES0_8_3;
@ -958,7 +958,7 @@ static void __arm_spe_pmu_dev_probe(void *info)
struct device *dev = &spe_pmu->pdev->dev;
fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
ID_AA64DFR0_PMSVER_SHIFT);
ID_AA64DFR0_EL1_PMSVer_SHIFT);
if (!fld) {
dev_err(dev,
"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",

View File

@ -1,2 +1,3 @@
ptrace
syscall-abi
tpidr2

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2021 ARM Limited
TEST_GEN_PROGS := syscall-abi tpidr2
TEST_GEN_PROGS := ptrace syscall-abi tpidr2
include ../../lib.mk

View File

@ -0,0 +1,241 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
#include "../../kselftest.h"
#define EXPECTED_TESTS 7
#define MAX_TPIDRS 2
static bool have_sme(void)
{
return getauxval(AT_HWCAP2) & HWCAP2_SME;
}
static void test_tpidr(pid_t child)
{
uint64_t read_val[MAX_TPIDRS];
uint64_t write_val[MAX_TPIDRS];
struct iovec read_iov, write_iov;
bool test_tpidr2 = false;
int ret, i;
read_iov.iov_base = read_val;
write_iov.iov_base = write_val;
/* Should be able to read a single TPIDR... */
read_iov.iov_len = sizeof(uint64_t);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
ksft_test_result(ret == 0, "read_tpidr_one\n");
/* ...write a new value.. */
write_iov.iov_len = sizeof(uint64_t);
write_val[0] = read_val[0]++;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0, "write_tpidr_one\n");
/* ...then read it back */
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
ksft_test_result(ret == 0 && write_val[0] == read_val[0],
"verify_tpidr_one\n");
/* If we have TPIDR2 we should be able to read it */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
if (ret == 0) {
/* If we have SME there should be two TPIDRs */
if (read_iov.iov_len >= sizeof(read_val))
test_tpidr2 = true;
if (have_sme() && test_tpidr2) {
ksft_test_result(test_tpidr2, "count_tpidrs\n");
} else {
ksft_test_result(read_iov.iov_len % sizeof(uint64_t) == 0,
"count_tpidrs\n");
}
} else {
ksft_test_result_fail("count_tpidrs\n");
}
if (test_tpidr2) {
/* Try to write new values to all known TPIDRs... */
write_iov.iov_len = sizeof(write_val);
for (i = 0; i < MAX_TPIDRS; i++)
write_val[i] = read_val[i] + 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0 &&
write_iov.iov_len == sizeof(write_val),
"tpidr2_write\n");
/* ...then read them back */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
if (have_sme()) {
/* Should read back the written value */
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
memcmp(read_val, write_val,
sizeof(read_val)) == 0,
"tpidr2_read\n");
} else {
/* TPIDR2 should read as zero */
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
read_val[0] == write_val[0] &&
read_val[1] == 0,
"tpidr2_read\n");
}
/* Writing only TPIDR... */
write_iov.iov_len = sizeof(uint64_t);
memcpy(write_val, read_val, sizeof(read_val));
write_val[0] += 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
if (ret == 0) {
/* ...should leave TPIDR2 untouched */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS,
&read_iov);
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
memcmp(read_val, write_val,
sizeof(read_val)) == 0,
"write_tpidr_only\n");
} else {
ksft_test_result_fail("write_tpidr_only\n");
}
} else {
ksft_test_result_skip("tpidr2_write\n");
ksft_test_result_skip("tpidr2_read\n");
ksft_test_result_skip("write_tpidr_only\n");
}
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno));
if (raise(SIGSTOP))
ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno));
return EXIT_SUCCESS;
}
static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
int status;
siginfo_t si;
/* Attach to the child */
while (1) {
int sig;
pid = wait(&status);
if (pid == -1) {
perror("wait");
goto error;
}
/*
* This should never happen but it's hard to flag in
* the framework.
*/
if (pid != child)
continue;
if (WIFEXITED(status) || WIFSIGNALED(status))
ksft_exit_fail_msg("Child died unexpectedly\n");
if (!WIFSTOPPED(status))
goto error;
sig = WSTOPSIG(status);
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
if (errno == ESRCH)
goto disappeared;
if (errno == EINVAL) {
sig = 0; /* bust group-stop */
goto cont;
}
ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
strerror(errno));
goto error;
}
if (sig == SIGSTOP && si.si_code == SI_TKILL &&
si.si_pid == pid)
break;
cont:
if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
if (errno == ESRCH)
goto disappeared;
ksft_test_result_fail("PTRACE_CONT: %s\n",
strerror(errno));
goto error;
}
}
ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
test_tpidr(child);
ret = EXIT_SUCCESS;
error:
kill(child, SIGKILL);
disappeared:
return ret;
}
int main(void)
{
int ret = EXIT_SUCCESS;
pid_t child;
srandom(getpid());
ksft_print_header();
ksft_set_plan(EXPECTED_TESTS);
child = fork();
if (!child)
return do_child();
if (do_parent(child))
ret = EXIT_FAILURE;
ksft_print_cnts();
return ret;
}