Merge branch 'for-next/mte' into for-next/core

* for-next/mte:
  docs: sysfs-devices-system-cpu: document "asymm" value for mte_tcf_preferred
  arm64/mte: Remove asymmetric mode from the prctl() interface
  kasan: fix a missing header include of static_keys.h
  arm64/mte: Add userspace interface for enabling asymmetric mode
  arm64/mte: Add hwcap for asymmetric mode
  arm64/mte: Add a little bit of documentation for mte_update_sctlr_user()
  arm64/mte: Document ABI for asymmetric mode
  arm64: mte: avoid clearing PSTATE.TCO on entry unless necessary
  kasan: split kasan_*enabled() functions into a separate header
This commit is contained in:
Will Deacon 2022-03-14 19:01:23 +00:00
commit bf587af2ab
15 changed files with 127 additions and 39 deletions

View File

@ -662,6 +662,7 @@ Description: Preferred MTE tag checking mode
================ ==============================================
"sync" Prefer synchronous mode
"asymm" Prefer asymmetric mode
"async" Prefer asynchronous mode
================ ==============================================

View File

@ -259,6 +259,11 @@ HWCAP2_RPRES
Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.
HWCAP2_MTE3
Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0011, as described
by Documentation/arm64/memory-tagging-extension.rst.
4. Unused AT_HWCAP bits
-----------------------

View File

@ -76,6 +76,9 @@ configurable behaviours:
with ``.si_code = SEGV_MTEAERR`` and ``.si_addr = 0`` (the faulting
address is unknown).
- *Asymmetric* - Reads are handled as for synchronous mode while writes
are handled as for asynchronous mode.
The user can select the above modes, per thread, using the
``prctl(PR_SET_TAGGED_ADDR_CTRL, flags, 0, 0, 0)`` system call where ``flags``
contains any number of the following values in the ``PR_MTE_TCF_MASK``
@ -140,18 +143,25 @@ tag checking mode as the CPU's preferred tag checking mode.
The preferred tag checking mode for each CPU is controlled by
``/sys/devices/system/cpu/cpu<N>/mte_tcf_preferred``, to which a
privileged user may write the value ``async`` or ``sync``. The default
preferred mode for each CPU is ``async``.
privileged user may write the value ``async``, ``sync`` or ``asymm``. The
default preferred mode for each CPU is ``async``.
To allow a program to potentially run in the CPU's preferred tag
checking mode, the user program may set multiple tag check fault mode
bits in the ``flags`` argument to the ``prctl(PR_SET_TAGGED_ADDR_CTRL,
flags, 0, 0, 0)`` system call. If the CPU's preferred tag checking
mode is in the task's set of provided tag checking modes (this will
always be the case at present because the kernel only supports two
tag checking modes, but future kernels may support more modes), that
mode will be selected. Otherwise, one of the modes in the task's mode
set will be selected in a currently unspecified manner.
flags, 0, 0, 0)`` system call. If both synchronous and asynchronous
modes are requested then asymmetric mode may also be selected by the
kernel. If the CPU's preferred tag checking mode is in the task's set
of provided tag checking modes, that mode will be selected. Otherwise,
one of the modes in the task's mode will be selected by the kernel
from the task's mode set using the preference order:
1. Asynchronous
2. Asymmetric
3. Synchronous
Note that there is no way for userspace to request multiple modes and
also disable asymmetric mode.
Initial process state
---------------------

View File

@ -108,6 +108,7 @@
#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
#define KERNEL_HWCAP_MTE3 __khwcap2_feature(MTE3)
/*
* This yields a mask that user programs can use to figure out what

View File

@ -11,7 +11,9 @@
#ifndef __ASSEMBLY__
#include <linux/bitfield.h>
#include <linux/kasan-enabled.h>
#include <linux/page-flags.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <asm/pgtable-types.h>
@ -86,6 +88,26 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
#endif /* CONFIG_ARM64_MTE */
static inline void mte_disable_tco_entry(struct task_struct *task)
{
if (!system_supports_mte())
return;
/*
* Re-enable tag checking (TCO set on exception entry). This is only
* necessary if MTE is enabled in either the kernel or the userspace
* task in synchronous or asymmetric mode (SCTLR_EL1.TCF0 bit 0 is set
* for both). With MTE disabled in the kernel and disabled or
* asynchronous in userspace, tag check faults (including in uaccesses)
* are not reported, therefore there is no need to re-enable checking.
* This is beneficial on microarchitectures where re-enabling TCO is
* expensive.
*/
if (kasan_hw_tags_enabled() ||
(task->thread.sctlr_user & (1UL << SCTLR_EL1_TCF0_SHIFT)))
asm volatile(SET_PSTATE_TCO(0));
}
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);

View File

@ -21,6 +21,7 @@
#define MTE_CTRL_TCF_SYNC (1UL << 16)
#define MTE_CTRL_TCF_ASYNC (1UL << 17)
#define MTE_CTRL_TCF_ASYMM (1UL << 18)
#ifndef __ASSEMBLY__

View File

@ -78,5 +78,6 @@
#define HWCAP2_ECV (1 << 19)
#define HWCAP2_AFP (1 << 20)
#define HWCAP2_RPRES (1 << 21)
#define HWCAP2_MTE3 (1 << 22)
#endif /* _UAPI__ASM_HWCAP_H */

View File

@ -2479,6 +2479,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#endif
#ifdef CONFIG_ARM64_MTE
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3),
#endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),

View File

@ -97,6 +97,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_ECV] = "ecv",
[KERNEL_HWCAP_AFP] = "afp",
[KERNEL_HWCAP_RPRES] = "rpres",
[KERNEL_HWCAP_MTE3] = "mte3",
};
#ifdef CONFIG_COMPAT

View File

@ -6,6 +6,7 @@
*/
#include <linux/context_tracking.h>
#include <linux/kasan.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h>
@ -56,6 +57,7 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
{
__enter_from_kernel_mode(regs);
mte_check_tfsr_entry();
mte_disable_tco_entry(current);
}
/*
@ -103,6 +105,7 @@ static __always_inline void __enter_from_user_mode(void)
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
mte_disable_tco_entry(current);
}
static __always_inline void enter_from_user_mode(struct pt_regs *regs)

View File

@ -308,13 +308,6 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
msr_s SYS_ICC_PMR_EL1, x20
alternative_else_nop_endif
#endif
/* Re-enable tag checking (TCO set on exception entry) */
#ifdef CONFIG_ARM64_MTE
alternative_if ARM64_MTE
SET_PSTATE_TCO(0)
alternative_else_nop_endif
#endif
/*

View File

@ -186,6 +186,11 @@ void mte_check_tfsr_el1(void)
}
#endif
/*
* This is where we actually resolve the system and process MTE mode
* configuration into an actual value in SCTLR_EL1 that affects
* userspace.
*/
static void mte_update_sctlr_user(struct task_struct *task)
{
/*
@ -199,9 +204,20 @@ static void mte_update_sctlr_user(struct task_struct *task)
unsigned long pref, resolved_mte_tcf;
pref = __this_cpu_read(mte_tcf_preferred);
/*
* If there is no overlap between the system preferred and
* program requested values go with what was requested.
*/
resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
sctlr &= ~SCTLR_EL1_TCF0_MASK;
if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
/*
* Pick an actual setting. The order in which we check for
* set bits and map into register values determines our
* default order.
*/
if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM)
sctlr |= SCTLR_EL1_TCF0_ASYMM;
else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
sctlr |= SCTLR_EL1_TCF0_ASYNC;
else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
sctlr |= SCTLR_EL1_TCF0_SYNC;
@ -253,6 +269,9 @@ void mte_thread_switch(struct task_struct *next)
mte_update_sctlr_user(next);
mte_update_gcr_excl(next);
/* TCO may not have been disabled on exception entry for the current task. */
mte_disable_tco_entry(next);
/*
* Check if an async tag exception occurred at EL1.
*
@ -293,6 +312,17 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
if (arg & PR_MTE_TCF_SYNC)
mte_ctrl |= MTE_CTRL_TCF_SYNC;
/*
* If the system supports it and both sync and async modes are
* specified then implicitly enable asymmetric mode.
* Userspace could see a mix of both sync and async anyway due
* to differing or changing defaults on CPUs.
*/
if (cpus_have_cap(ARM64_MTE_ASYMM) &&
(arg & PR_MTE_TCF_ASYNC) &&
(arg & PR_MTE_TCF_SYNC))
mte_ctrl |= MTE_CTRL_TCF_ASYMM;
task->thread.mte_ctrl = mte_ctrl;
if (task == current) {
preempt_disable();
@ -467,6 +497,8 @@ static ssize_t mte_tcf_preferred_show(struct device *dev,
return sysfs_emit(buf, "async\n");
case MTE_CTRL_TCF_SYNC:
return sysfs_emit(buf, "sync\n");
case MTE_CTRL_TCF_ASYMM:
return sysfs_emit(buf, "asymm\n");
default:
return sysfs_emit(buf, "???\n");
}
@ -482,6 +514,8 @@ static ssize_t mte_tcf_preferred_store(struct device *dev,
tcf = MTE_CTRL_TCF_ASYNC;
else if (sysfs_streq(buf, "sync"))
tcf = MTE_CTRL_TCF_SYNC;
else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm"))
tcf = MTE_CTRL_TCF_ASYMM;
else
return -EINVAL;

View File

@ -635,7 +635,8 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
return -EINVAL;
if (system_supports_mte())
valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
| PR_MTE_TAG_MASK;
if (arg & ~valid_mask)
return -EINVAL;

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KASAN_ENABLED_H
#define _LINUX_KASAN_ENABLED_H
#include <linux/static_key.h>
#ifdef CONFIG_KASAN_HW_TAGS
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
static __always_inline bool kasan_enabled(void)
{
return static_branch_likely(&kasan_flag_enabled);
}
static inline bool kasan_hw_tags_enabled(void)
{
return kasan_enabled();
}
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_enabled(void)
{
return IS_ENABLED(CONFIG_KASAN);
}
static inline bool kasan_hw_tags_enabled(void)
{
return false;
}
#endif /* CONFIG_KASAN_HW_TAGS */
#endif /* LINUX_KASAN_ENABLED_H */

View File

@ -3,6 +3,7 @@
#define _LINUX_KASAN_H
#include <linux/bug.h>
#include <linux/kasan-enabled.h>
#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
@ -83,33 +84,11 @@ static inline void kasan_disable_current(void) {}
#ifdef CONFIG_KASAN_HW_TAGS
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
static __always_inline bool kasan_enabled(void)
{
return static_branch_likely(&kasan_flag_enabled);
}
static inline bool kasan_hw_tags_enabled(void)
{
return kasan_enabled();
}
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
void kasan_free_pages(struct page *page, unsigned int order);
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_enabled(void)
{
return IS_ENABLED(CONFIG_KASAN);
}
static inline bool kasan_hw_tags_enabled(void)
{
return false;
}
static __always_inline void kasan_alloc_pages(struct page *page,
unsigned int order, gfp_t flags)
{