arm64 fixes for -rc2
- Fix handling of watchpoints triggered by uaccess routines - Fix initialisation of gigantic pages for CMA buffers - Raise minimum clang version for BTI to avoid miscompilation - Fix data race in SVE vector length configuration code - Ensure address tags are ignored in kern_addr_valid() - Dump register state on fatal BTI exception - kexec_file() cleanup to use struct_size() macro -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAl7sl1IQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNNESB/0UhaH4JI3UPu1DAESsHyYqh6Jdb8TljMxd uiAkBT8hJhgq5gUQLBicFpo5AAnEgGuQcG3OMWxWG8PG1MevHemKV1TW38QtbJlA p6jwMriSXiwe6199ImoIH6tujshKnoSeo33B470N6i1TzEz8lmS9ml5v1epIT7q4 1wEPSX+o1Zi/CQKjrfW1UQW7I9C5G26BXusQH4b2Sz8B7RIzHA/+/LqUjcYVtSph yjJn1jHhc0GhC7qZUgea4Rz6WZNGlBebGBksIqssvBNhO/St4UmWGmhiUS3YX7rn cP7DicrrWVwCib2xUr3fwxxPki4rlV+xHIINVINrOwE1BpqRENl5 =Gthp -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Unfortunately, we still have a number of outstanding issues so there will be more fixes to come, but this lot are a good start. - Fix handling of watchpoints triggered by uaccess routines - Fix initialisation of gigantic pages for CMA buffers - Raise minimum clang version for BTI to avoid miscompilation - Fix data race in SVE vector length configuration code - Ensure address tags are ignored in kern_addr_valid() - Dump register state on fatal BTI exception - kexec_file() cleanup to use struct_size() macro" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: hw_breakpoint: Don't invoke overflow handler on uaccess watchpoints arm64: kexec_file: Use struct_size() in kmalloc() arm64: mm: reserve hugetlb CMA after numa_init arm64: bti: Require clang >= 10.0.1 for in-kernel BTI support arm64: sve: Fix build failure when ARM64_SVE=y and SYSCTL=n arm64: pgtable: Clear the GP bit for non-executable kernel pages arm64: mm: reset address tag set by kasan sw tagging arm64: traps: Dump registers prior to panic() in bad_mode() arm64/sve: Eliminate data races on sve_default_vl docs/arm64: Fix typo'd #define in sve.rst arm64: remove TEXT_OFFSET randomization
This commit is contained in:
commit
84bc1993e2
@ -186,7 +186,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
|
|||||||
|
|
||||||
flags:
|
flags:
|
||||||
|
|
||||||
PR_SVE_SET_VL_INHERIT
|
PR_SVE_VL_INHERIT
|
||||||
|
|
||||||
Inherit the current vector length across execve(). Otherwise, the
|
Inherit the current vector length across execve(). Otherwise, the
|
||||||
vector length is reset to the system default at execve(). (See
|
vector length is reset to the system default at execve(). (See
|
||||||
@ -247,7 +247,7 @@ prctl(PR_SVE_GET_VL)
|
|||||||
|
|
||||||
The following flag may be OR-ed into the result:
|
The following flag may be OR-ed into the result:
|
||||||
|
|
||||||
PR_SVE_SET_VL_INHERIT
|
PR_SVE_VL_INHERIT
|
||||||
|
|
||||||
Vector length will be inherited across execve().
|
Vector length will be inherited across execve().
|
||||||
|
|
||||||
@ -393,7 +393,7 @@ The regset data starts with struct user_sve_header, containing:
|
|||||||
* At every execve() call, the new vector length of the new process is set to
|
* At every execve() call, the new vector length of the new process is set to
|
||||||
the system default vector length, unless
|
the system default vector length, unless
|
||||||
|
|
||||||
* PR_SVE_SET_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the
|
* PR_SVE_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the
|
||||||
calling thread, or
|
calling thread, or
|
||||||
|
|
||||||
* a deferred vector length change is pending, established via the
|
* a deferred vector length change is pending, established via the
|
||||||
|
@ -1630,6 +1630,8 @@ config ARM64_BTI_KERNEL
|
|||||||
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
|
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
|
||||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
|
||||||
depends on !CC_IS_GCC || GCC_VERSION >= 100100
|
depends on !CC_IS_GCC || GCC_VERSION >= 100100
|
||||||
|
# https://reviews.llvm.org/rGb8ae3fdfa579dbf366b1bb1cbfdbf8c51db7fa55
|
||||||
|
depends on !CC_IS_CLANG || CLANG_VERSION >= 100001
|
||||||
depends on !(CC_IS_CLANG && GCOV_KERNEL)
|
depends on !(CC_IS_CLANG && GCOV_KERNEL)
|
||||||
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
|
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
|
||||||
help
|
help
|
||||||
|
@ -8,21 +8,6 @@ config PID_IN_CONTEXTIDR
|
|||||||
instructions during context switch. Say Y here only if you are
|
instructions during context switch. Say Y here only if you are
|
||||||
planning to use hardware trace tools with this kernel.
|
planning to use hardware trace tools with this kernel.
|
||||||
|
|
||||||
config ARM64_RANDOMIZE_TEXT_OFFSET
|
|
||||||
bool "Randomize TEXT_OFFSET at build time"
|
|
||||||
help
|
|
||||||
Say Y here if you want the image load offset (AKA TEXT_OFFSET)
|
|
||||||
of the kernel to be randomized at build-time. When selected,
|
|
||||||
this option will cause TEXT_OFFSET to be randomized upon any
|
|
||||||
build of the kernel, and the offset will be reflected in the
|
|
||||||
text_offset field of the resulting Image. This can be used to
|
|
||||||
fuzz-test bootloaders which respect text_offset.
|
|
||||||
|
|
||||||
This option is intended for bootloader and/or kernel testing
|
|
||||||
only. Bootloaders must make no assumptions regarding the value
|
|
||||||
of TEXT_OFFSET and platforms must not require a specific
|
|
||||||
value.
|
|
||||||
|
|
||||||
config DEBUG_EFI
|
config DEBUG_EFI
|
||||||
depends on EFI && DEBUG_INFO
|
depends on EFI && DEBUG_INFO
|
||||||
bool "UEFI debugging"
|
bool "UEFI debugging"
|
||||||
|
@ -121,13 +121,7 @@ endif
|
|||||||
head-y := arch/arm64/kernel/head.o
|
head-y := arch/arm64/kernel/head.o
|
||||||
|
|
||||||
# The byte offset of the kernel image in RAM from the start of RAM.
|
# The byte offset of the kernel image in RAM from the start of RAM.
|
||||||
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
|
|
||||||
TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
|
|
||||||
int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
|
|
||||||
rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
|
|
||||||
else
|
|
||||||
TEXT_OFFSET := 0x0
|
TEXT_OFFSET := 0x0
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
|
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
|
||||||
KASAN_SHADOW_SCALE_SHIFT := 4
|
KASAN_SHADOW_SCALE_SHIFT := 4
|
||||||
|
@ -416,7 +416,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
|||||||
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
|
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
|
||||||
|
|
||||||
#define pgprot_nx(prot) \
|
#define pgprot_nx(prot) \
|
||||||
__pgprot_modify(prot, 0, PTE_PXN)
|
__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark the prot value as uncacheable and unbufferable.
|
* Mark the prot value as uncacheable and unbufferable.
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
#include <linux/compiler.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/cpu_pm.h>
|
#include <linux/cpu_pm.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
@ -119,10 +120,20 @@ struct fpsimd_last_state_struct {
|
|||||||
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
|
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
|
||||||
|
|
||||||
/* Default VL for tasks that don't set it explicitly: */
|
/* Default VL for tasks that don't set it explicitly: */
|
||||||
static int sve_default_vl = -1;
|
static int __sve_default_vl = -1;
|
||||||
|
|
||||||
|
static int get_sve_default_vl(void)
|
||||||
|
{
|
||||||
|
return READ_ONCE(__sve_default_vl);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_SVE
|
#ifdef CONFIG_ARM64_SVE
|
||||||
|
|
||||||
|
static void set_sve_default_vl(int val)
|
||||||
|
{
|
||||||
|
WRITE_ONCE(__sve_default_vl, val);
|
||||||
|
}
|
||||||
|
|
||||||
/* Maximum supported vector length across all CPUs (initially poisoned) */
|
/* Maximum supported vector length across all CPUs (initially poisoned) */
|
||||||
int __ro_after_init sve_max_vl = SVE_VL_MIN;
|
int __ro_after_init sve_max_vl = SVE_VL_MIN;
|
||||||
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
|
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
|
||||||
@ -338,13 +349,13 @@ static unsigned int find_supported_vector_length(unsigned int vl)
|
|||||||
return sve_vl_from_vq(__bit_to_vq(bit));
|
return sve_vl_from_vq(__bit_to_vq(bit));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
|
||||||
|
|
||||||
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
|
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
|
||||||
void *buffer, size_t *lenp, loff_t *ppos)
|
void *buffer, size_t *lenp, loff_t *ppos)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int vl = sve_default_vl;
|
int vl = get_sve_default_vl();
|
||||||
struct ctl_table tmp_table = {
|
struct ctl_table tmp_table = {
|
||||||
.data = &vl,
|
.data = &vl,
|
||||||
.maxlen = sizeof(vl),
|
.maxlen = sizeof(vl),
|
||||||
@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
|
|||||||
if (!sve_vl_valid(vl))
|
if (!sve_vl_valid(vl))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
sve_default_vl = find_supported_vector_length(vl);
|
set_sve_default_vl(find_supported_vector_length(vl));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,9 +394,9 @@ static int __init sve_sysctl_init(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* ! CONFIG_SYSCTL */
|
#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
|
||||||
static int __init sve_sysctl_init(void) { return 0; }
|
static int __init sve_sysctl_init(void) { return 0; }
|
||||||
#endif /* ! CONFIG_SYSCTL */
|
#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
|
||||||
|
|
||||||
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
|
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
|
||||||
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
|
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
|
||||||
@ -868,7 +879,7 @@ void __init sve_setup(void)
|
|||||||
* For the default VL, pick the maximum supported value <= 64.
|
* For the default VL, pick the maximum supported value <= 64.
|
||||||
* VL == 64 is guaranteed not to grow the signal frame.
|
* VL == 64 is guaranteed not to grow the signal frame.
|
||||||
*/
|
*/
|
||||||
sve_default_vl = find_supported_vector_length(64);
|
set_sve_default_vl(find_supported_vector_length(64));
|
||||||
|
|
||||||
bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
|
bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
|
||||||
SVE_VQ_MAX);
|
SVE_VQ_MAX);
|
||||||
@ -889,7 +900,7 @@ void __init sve_setup(void)
|
|||||||
pr_info("SVE: maximum available vector length %u bytes per vector\n",
|
pr_info("SVE: maximum available vector length %u bytes per vector\n",
|
||||||
sve_max_vl);
|
sve_max_vl);
|
||||||
pr_info("SVE: default vector length %u bytes per vector\n",
|
pr_info("SVE: default vector length %u bytes per vector\n",
|
||||||
sve_default_vl);
|
get_sve_default_vl());
|
||||||
|
|
||||||
/* KVM decides whether to support mismatched systems. Just warn here: */
|
/* KVM decides whether to support mismatched systems. Just warn here: */
|
||||||
if (sve_max_virtualisable_vl < sve_max_vl)
|
if (sve_max_virtualisable_vl < sve_max_vl)
|
||||||
@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void)
|
|||||||
* vector length configured: no kernel task can become a user
|
* vector length configured: no kernel task can become a user
|
||||||
* task without an exec and hence a call to this function.
|
* task without an exec and hence a call to this function.
|
||||||
* By the time the first call to this function is made, all
|
* By the time the first call to this function is made, all
|
||||||
* early hardware probing is complete, so sve_default_vl
|
* early hardware probing is complete, so __sve_default_vl
|
||||||
* should be valid.
|
* should be valid.
|
||||||
* If a bug causes this to go wrong, we make some noise and
|
* If a bug causes this to go wrong, we make some noise and
|
||||||
* try to fudge thread.sve_vl to a safe value here.
|
* try to fudge thread.sve_vl to a safe value here.
|
||||||
*/
|
*/
|
||||||
vl = current->thread.sve_vl_onexec ?
|
vl = current->thread.sve_vl_onexec ?
|
||||||
current->thread.sve_vl_onexec : sve_default_vl;
|
current->thread.sve_vl_onexec : get_sve_default_vl();
|
||||||
|
|
||||||
if (WARN_ON(!sve_vl_valid(vl)))
|
if (WARN_ON(!sve_vl_valid(vl)))
|
||||||
vl = SVE_VL_MIN;
|
vl = SVE_VL_MIN;
|
||||||
|
@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int watchpoint_report(struct perf_event *wp, unsigned long addr,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
int step = is_default_overflow_handler(wp);
|
||||||
|
struct arch_hw_breakpoint *info = counter_arch_bp(wp);
|
||||||
|
|
||||||
|
info->trigger = addr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we triggered a user watchpoint from a uaccess routine, then
|
||||||
|
* handle the stepping ourselves since userspace really can't help
|
||||||
|
* us with this.
|
||||||
|
*/
|
||||||
|
if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
|
||||||
|
step = 1;
|
||||||
|
else
|
||||||
|
perf_bp_event(wp, regs);
|
||||||
|
|
||||||
|
return step;
|
||||||
|
}
|
||||||
|
|
||||||
static int watchpoint_handler(unsigned long addr, unsigned int esr,
|
static int watchpoint_handler(unsigned long addr, unsigned int esr,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
|
|||||||
u64 val;
|
u64 val;
|
||||||
struct perf_event *wp, **slots;
|
struct perf_event *wp, **slots;
|
||||||
struct debug_info *debug_info;
|
struct debug_info *debug_info;
|
||||||
struct arch_hw_breakpoint *info;
|
|
||||||
struct arch_hw_breakpoint_ctrl ctrl;
|
struct arch_hw_breakpoint_ctrl ctrl;
|
||||||
|
|
||||||
slots = this_cpu_ptr(wp_on_reg);
|
slots = this_cpu_ptr(wp_on_reg);
|
||||||
@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
|
|||||||
if (dist != 0)
|
if (dist != 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
info = counter_arch_bp(wp);
|
step = watchpoint_report(wp, addr, regs);
|
||||||
info->trigger = addr;
|
|
||||||
perf_bp_event(wp, regs);
|
|
||||||
|
|
||||||
/* Do we need to handle the stepping? */
|
|
||||||
if (is_default_overflow_handler(wp))
|
|
||||||
step = 1;
|
|
||||||
}
|
}
|
||||||
if (min_dist > 0 && min_dist != -1) {
|
|
||||||
/* No exact match found. */
|
|
||||||
wp = slots[closest_match];
|
|
||||||
info = counter_arch_bp(wp);
|
|
||||||
info->trigger = addr;
|
|
||||||
perf_bp_event(wp, regs);
|
|
||||||
|
|
||||||
/* Do we need to handle the stepping? */
|
/* No exact match found? */
|
||||||
if (is_default_overflow_handler(wp))
|
if (min_dist > 0 && min_dist != -1)
|
||||||
step = 1;
|
step = watchpoint_report(slots[closest_match], addr, regs);
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!step)
|
if (!step)
|
||||||
|
@ -219,8 +219,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
|
|||||||
MEMBLOCK_NONE, &start, &end, NULL)
|
MEMBLOCK_NONE, &start, &end, NULL)
|
||||||
nr_ranges++;
|
nr_ranges++;
|
||||||
|
|
||||||
cmem = kmalloc(sizeof(struct crash_mem) +
|
cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
|
||||||
sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
|
|
||||||
if (!cmem)
|
if (!cmem)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -813,6 +813,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
|||||||
handler[reason], smp_processor_id(), esr,
|
handler[reason], smp_processor_id(), esr,
|
||||||
esr_get_class_string(esr));
|
esr_get_class_string(esr));
|
||||||
|
|
||||||
|
__show_regs(regs);
|
||||||
local_daif_mask();
|
local_daif_mask();
|
||||||
panic("bad mode");
|
panic("bad mode");
|
||||||
}
|
}
|
||||||
|
@ -404,11 +404,6 @@ void __init arm64_memblock_init(void)
|
|||||||
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
||||||
|
|
||||||
dma_contiguous_reserve(arm64_dma32_phys_limit);
|
dma_contiguous_reserve(arm64_dma32_phys_limit);
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_4K_PAGES
|
|
||||||
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init bootmem_init(void)
|
void __init bootmem_init(void)
|
||||||
@ -424,6 +419,16 @@ void __init bootmem_init(void)
|
|||||||
min_low_pfn = min;
|
min_low_pfn = min;
|
||||||
|
|
||||||
arm64_numa_init();
|
arm64_numa_init();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* must be done after arm64_numa_init() which calls numa_init() to
|
||||||
|
* initialize node_online_map that gets used in hugetlb_cma_reserve()
|
||||||
|
* while allocating required CMA size across online nodes.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_ARM64_4K_PAGES
|
||||||
|
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sparsemem tries to allocate bootmem in memory_present(), so must be
|
* Sparsemem tries to allocate bootmem in memory_present(), so must be
|
||||||
* done after the fixed reservations.
|
* done after the fixed reservations.
|
||||||
|
@ -723,6 +723,7 @@ int kern_addr_valid(unsigned long addr)
|
|||||||
pmd_t *pmdp, pmd;
|
pmd_t *pmdp, pmd;
|
||||||
pte_t *ptep, pte;
|
pte_t *ptep, pte;
|
||||||
|
|
||||||
|
addr = arch_kasan_reset_tag(addr);
|
||||||
if ((((long)addr) >> VA_BITS) != -1UL)
|
if ((((long)addr) >> VA_BITS) != -1UL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user