mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
arm64 updates for 4.19
A bunch of good stuff in here: - Wire up support for qspinlock, replacing our trusty ticket lock code - Add an IPI to flush_icache_range() to ensure that stale instructions fetched into the pipeline are discarded along with the I-cache lines - Support for the GCC "stackleak" plugin - Support for restartable sequences, plus an arm64 port for the selftest - Kexec/kdump support on systems booting with ACPI - Rewrite of our syscall entry code in C, which allows us to zero the GPRs on entry from userspace - Support for chained PMU counters, allowing 64-bit event counters to be constructed on current CPUs - Ensure scheduler topology information is kept up-to-date with CPU hotplug events - Re-enable support for huge vmalloc/IO mappings now that the core code has the correct hooks to use break-before-make sequences - Miscellaneous, non-critical fixes and cleanups -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJbbV41AAoJELescNyEwWM0WoEIALhrKtsIn6vqFlSs/w6aDuJL cMWmFxjTaKLmIq2+cJIdFLOJ3CH80Pu9gB+nEv/k+cZdCTfUVKfRf28HTpmYWsht bb4AhdHMC7yFW752BHk+mzJspeC8h/2Rm8wMuNVplZ3MkPrwo3vsiuJTofLhVL/y BihlU3+5sfBvCYIsWnuEZIev+/I/s/qm1ASiqIcKSrFRZP6VTt5f9TC75vFI8seW 7yc3odKb0CArexB8yBjiPNziehctQF42doxQyL45hezLfWw4qdgHOSiwyiOMxEz9 Fwwpp8Tx33SKLNJgqoqYznGW9PhYJ7n2Kslv19uchJrEV+mds82vdDNaWRULld4= =kQn6 -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Will Deacon: "A bunch of good stuff in here. Worth noting is that we've pulled in the x86/mm branch from -tip so that we can make use of the core ioremap changes which allow us to put down huge mappings in the vmalloc area without screwing up the TLB. Much of the positive diffstat is because of the rseq selftest for arm64. Summary: - Wire up support for qspinlock, replacing our trusty ticket lock code - Add an IPI to flush_icache_range() to ensure that stale instructions fetched into the pipeline are discarded along with the I-cache lines - Support for the GCC "stackleak" plugin - Support for restartable sequences, plus an arm64 port for the selftest - Kexec/kdump support on systems booting with ACPI - Rewrite of our syscall entry code in C, which allows us to zero the GPRs on entry from userspace - Support for chained PMU counters, allowing 64-bit event counters to be constructed on current CPUs - Ensure scheduler topology information is kept up-to-date with CPU hotplug events - Re-enable support for huge vmalloc/IO mappings now that the core code has the correct hooks to use break-before-make sequences - Miscellaneous, non-critical fixes and cleanups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (90 commits) arm64: alternative: Use true and false for boolean values arm64: kexec: Add comment to explain use of __flush_icache_range() arm64: sdei: Mark sdei stack helper functions as static arm64, kaslr: export offset in VMCOREINFO ELF notes arm64: perf: Add cap_user_time aarch64 efi/libstub: Only disable stackleak plugin for arm64 arm64: drop unused kernel_neon_begin_partial() macro arm64: kexec: machine_kexec should call __flush_icache_range arm64: svc: Ensure hardirq tracing is updated before return arm64: mm: Export __sync_icache_dcache() for xen-privcmd drivers/perf: arm-ccn: Use devm_ioremap_resource() to map memory arm64: Add support for STACKLEAK gcc plugin arm64: Add stack information to on_accessible_stack drivers/perf: hisi: update the sccl_id/ccl_id when MT is supported arm64: fix ACPI dependencies rseq/selftests: Add support for arm64 arm64: acpi: fix alignment fault in accessing ACPI efi/arm: map UEFI memory map even w/o runtime services enabled efi/arm: preserve early mapping of UEFI memory map longer for BGRT drivers: acpi: add dependency of EFI for arm64 ...
This commit is contained in:
commit
1202f4fdbc
@ -2270,6 +2270,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
|
||||
S: Maintained
|
||||
F: arch/arm64/
|
||||
X: arch/arm64/boot/dts/
|
||||
F: Documentation/arm64/
|
||||
|
||||
AS3645A LED FLASH CONTROLLER DRIVER
|
||||
|
@ -26,13 +26,13 @@
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/* arm64 compatibility macros */
|
||||
#define COMPAT_PSR_MODE_ABT ABT_MODE
|
||||
#define COMPAT_PSR_MODE_UND UND_MODE
|
||||
#define COMPAT_PSR_T_BIT PSR_T_BIT
|
||||
#define COMPAT_PSR_I_BIT PSR_I_BIT
|
||||
#define COMPAT_PSR_A_BIT PSR_A_BIT
|
||||
#define COMPAT_PSR_E_BIT PSR_E_BIT
|
||||
#define COMPAT_PSR_IT_MASK PSR_IT_MASK
|
||||
#define PSR_AA32_MODE_ABT ABT_MODE
|
||||
#define PSR_AA32_MODE_UND UND_MODE
|
||||
#define PSR_AA32_T_BIT PSR_T_BIT
|
||||
#define PSR_AA32_I_BIT PSR_I_BIT
|
||||
#define PSR_AA32_A_BIT PSR_A_BIT
|
||||
#define PSR_AA32_E_BIT PSR_E_BIT
|
||||
#define PSR_AA32_IT_MASK PSR_IT_MASK
|
||||
|
||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
|
||||
|
@ -233,7 +233,7 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32 armv6pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 armv6pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@ -251,7 +251,7 @@ static inline u32 armv6pmu_read_counter(struct perf_event *event)
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
}
|
||||
}
|
||||
|
||||
static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
clear_bit(event->hw.idx, cpuc->used_mask);
|
||||
}
|
||||
|
||||
static void armv6pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
unsigned long val, mask, evt, flags;
|
||||
@ -491,11 +497,11 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = armv6pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv6pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
|
||||
cpu_pmu->start = armv6pmu_start;
|
||||
cpu_pmu->stop = armv6pmu_stop;
|
||||
cpu_pmu->map_event = armv6_map_event;
|
||||
cpu_pmu->num_events = 3;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
}
|
||||
|
||||
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@ -542,11 +548,11 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = armv6pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv6pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
|
||||
cpu_pmu->start = armv6pmu_start;
|
||||
cpu_pmu->stop = armv6pmu_stop;
|
||||
cpu_pmu->map_event = armv6mpcore_map_event;
|
||||
cpu_pmu->num_events = 3;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -743,7 +743,7 @@ static inline void armv7_pmnc_select_counter(int idx)
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline u32 armv7pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 armv7pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
@ -763,7 +763,7 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
clear_bit(event->hw.idx, cpuc->used_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
|
||||
*/
|
||||
@ -1167,10 +1173,10 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = armv7pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv7pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
|
||||
cpu_pmu->start = armv7pmu_start;
|
||||
cpu_pmu->stop = armv7pmu_stop;
|
||||
cpu_pmu->reset = armv7pmu_reset;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
};
|
||||
|
||||
static void armv7_read_num_pmnc_events(void *info)
|
||||
@ -1638,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool krait_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
armv7pmu_clear_event_idx(cpuc, event);
|
||||
if (venum_event || krait_event) {
|
||||
bit = krait_event_to_bit(event, region, group);
|
||||
clear_bit(bit, cpuc->used_mask);
|
||||
@ -1967,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
bool venum_event = EVENT_VENUM(hwc->config_base);
|
||||
bool scorpion_event = EVENT_CPU(hwc->config_base);
|
||||
|
||||
armv7pmu_clear_event_idx(cpuc, event);
|
||||
if (venum_event || scorpion_event) {
|
||||
bit = scorpion_event_to_bit(event, region, group);
|
||||
clear_bit(bit, cpuc->used_mask);
|
||||
@ -2030,6 +2038,7 @@ static struct platform_driver armv7_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "armv7-pmu",
|
||||
.of_match_table = armv7_pmu_of_device_ids,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = armv7_pmu_device_probe,
|
||||
};
|
||||
|
@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
}
|
||||
}
|
||||
|
||||
static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
clear_bit(event->hw.idx, cpuc->used_mask);
|
||||
}
|
||||
|
||||
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
@ -316,7 +322,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 xscale1pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 xscale1pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@ -337,7 +343,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event)
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
|
||||
static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@ -370,11 +376,11 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = xscale1pmu_read_counter;
|
||||
cpu_pmu->write_counter = xscale1pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
|
||||
cpu_pmu->start = xscale1pmu_start;
|
||||
cpu_pmu->stop = xscale1pmu_stop;
|
||||
cpu_pmu->map_event = xscale_map_event;
|
||||
cpu_pmu->num_events = 3;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -679,7 +685,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 xscale2pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 xscale2pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@ -706,7 +712,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event)
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
|
||||
static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
@ -739,11 +745,11 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = xscale2pmu_read_counter;
|
||||
cpu_pmu->write_counter = xscale2pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
|
||||
cpu_pmu->start = xscale2pmu_start;
|
||||
cpu_pmu->stop = xscale2pmu_stop;
|
||||
cpu_pmu->map_event = xscale_map_event;
|
||||
cpu_pmu->num_events = 5;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ config ARM64
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
select ARCH_HAS_SYSCALL_WRAPPER
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_INLINE_READ_LOCK if !PREEMPT
|
||||
@ -42,8 +43,19 @@ config ARM64
|
||||
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
|
||||
@ -97,6 +109,7 @@ config ARM64
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_STACKLEAK
|
||||
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
@ -128,6 +141,7 @@ config ARM64
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RCU_TABLE_FREE
|
||||
select HAVE_RSEQ
|
||||
select HAVE_STACKPROTECTOR
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_KPROBES
|
||||
@ -773,6 +787,9 @@ config ARCH_SPARSEMEM_DEFAULT
|
||||
config ARCH_SELECT_MEMORY_MODEL
|
||||
def_bool ARCH_SPARSEMEM_ENABLE
|
||||
|
||||
config ARCH_FLATMEM_ENABLE
|
||||
def_bool !NUMA
|
||||
|
||||
config HAVE_ARCH_PFN_VALID
|
||||
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
|
||||
|
||||
@ -1244,6 +1261,7 @@ config EFI
|
||||
bool "UEFI runtime support"
|
||||
depends on OF && !CPU_BIG_ENDIAN
|
||||
depends on KERNEL_MODE_NEON
|
||||
select ARCH_SUPPORTS_ACPI
|
||||
select LIBFDT
|
||||
select UCS2_STRING
|
||||
select EFI_PARAMS_FROM_FDT
|
||||
|
@ -60,15 +60,16 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
||||
KBUILD_CPPFLAGS += -mbig-endian
|
||||
CHECKFLAGS += -D__AARCH64EB__
|
||||
AS += -EB
|
||||
# We must use the linux target here, since distributions don't tend to package
|
||||
# the ELF linker scripts with binutils, and this results in a build failure.
|
||||
LDFLAGS += -EB -maarch64linuxb
|
||||
# Prefer the baremetal ELF build target, but not all toolchains include
|
||||
# it so fall back to the standard linux version if needed.
|
||||
LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
|
||||
UTS_MACHINE := aarch64_be
|
||||
else
|
||||
KBUILD_CPPFLAGS += -mlittle-endian
|
||||
CHECKFLAGS += -D__AARCH64EL__
|
||||
AS += -EL
|
||||
LDFLAGS += -EL -maarch64linux # See comment above
|
||||
# Same as above, prefer ELF but fall back to linux target if needed.
|
||||
LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
|
||||
UTS_MACHINE := aarch64
|
||||
endif
|
||||
|
||||
|
@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
|
||||
generic-y += msi.h
|
||||
generic-y += preempt.h
|
||||
generic-y += qrwlock.h
|
||||
generic-y += qspinlock.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += segment.h
|
||||
generic-y += serial.h
|
||||
|
@ -12,10 +12,12 @@
|
||||
#ifndef _ASM_ACPI_H
|
||||
#define _ASM_ACPI_H
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/psci.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
@ -29,18 +31,22 @@
|
||||
|
||||
/* Basic configuration for ACPI */
|
||||
#ifdef CONFIG_ACPI
|
||||
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
|
||||
|
||||
/* ACPI table mapping after acpi_permanent_mmap is set */
|
||||
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
|
||||
acpi_size size)
|
||||
{
|
||||
/*
|
||||
* EFI's reserve_regions() call adds memory with the WB attribute
|
||||
* to memblock via early_init_dt_add_memory_arch().
|
||||
*/
|
||||
if (!memblock_is_memory(phys))
|
||||
return ioremap(phys, size);
|
||||
/* For normal memory we already have a cacheable mapping. */
|
||||
if (memblock_is_map_memory(phys))
|
||||
return (void __iomem *)__phys_to_virt(phys);
|
||||
|
||||
return ioremap_cache(phys, size);
|
||||
/*
|
||||
* We should still honor the memory's attribute here because
|
||||
* crash dump kernel possibly excludes some ACPI (reclaim)
|
||||
* regions from memblock list.
|
||||
*/
|
||||
return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
|
||||
}
|
||||
#define acpi_os_ioremap acpi_os_ioremap
|
||||
|
||||
@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu)
|
||||
* for compatibility.
|
||||
*/
|
||||
#define acpi_disable_cmcff 1
|
||||
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
|
||||
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||
{
|
||||
return __acpi_get_mem_attribute(addr);
|
||||
}
|
||||
#endif /* CONFIG_ACPI_APEI */
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int arm64_acpi_numa_init(void);
|
||||
int acpi_numa_get_nid(unsigned int cpu, u64 hwid);
|
||||
int acpi_numa_get_nid(unsigned int cpu);
|
||||
void acpi_map_cpus_to_nodes(void);
|
||||
#else
|
||||
static inline int arm64_acpi_numa_init(void) { return -ENOSYS; }
|
||||
static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; }
|
||||
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
|
||||
static inline void acpi_map_cpus_to_nodes(void) { }
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE
|
||||
|
@ -128,6 +128,19 @@ do { \
|
||||
__u.__val; \
|
||||
})
|
||||
|
||||
#define smp_cond_load_relaxed(ptr, cond_expr) \
|
||||
({ \
|
||||
typeof(ptr) __PTR = (ptr); \
|
||||
typeof(*ptr) VAL; \
|
||||
for (;;) { \
|
||||
VAL = READ_ONCE(*__PTR); \
|
||||
if (cond_expr) \
|
||||
break; \
|
||||
__cmpwait_relaxed(__PTR, VAL); \
|
||||
} \
|
||||
VAL; \
|
||||
})
|
||||
|
||||
#define smp_cond_load_acquire(ptr, cond_expr) \
|
||||
({ \
|
||||
typeof(ptr) __PTR = (ptr); \
|
||||
|
@ -21,12 +21,16 @@
|
||||
#define CTR_L1IP_SHIFT 14
|
||||
#define CTR_L1IP_MASK 3
|
||||
#define CTR_DMINLINE_SHIFT 16
|
||||
#define CTR_IMINLINE_SHIFT 0
|
||||
#define CTR_ERG_SHIFT 20
|
||||
#define CTR_CWG_SHIFT 24
|
||||
#define CTR_CWG_MASK 15
|
||||
#define CTR_IDC_SHIFT 28
|
||||
#define CTR_DIC_SHIFT 29
|
||||
|
||||
#define CTR_CACHE_MINLINE_MASK \
|
||||
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
|
||||
|
||||
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
|
||||
|
||||
#define ICACHE_POLICY_VPIPT 0
|
||||
|
@ -19,6 +19,7 @@
|
||||
#ifndef __ASM_CACHEFLUSH_H
|
||||
#define __ASM_CACHEFLUSH_H
|
||||
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
/*
|
||||
@ -71,7 +72,7 @@
|
||||
* - kaddr - page address
|
||||
* - size - region size
|
||||
*/
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void __flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern int invalidate_icache_range(unsigned long start, unsigned long end);
|
||||
extern void __flush_dcache_area(void *addr, size_t len);
|
||||
extern void __inval_dcache_area(void *addr, size_t len);
|
||||
@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len);
|
||||
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
|
||||
extern void sync_icache_aliases(void *kaddr, unsigned long len);
|
||||
|
||||
static inline void flush_icache_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
__flush_icache_range(start, end);
|
||||
|
||||
/*
|
||||
* IPI all online CPUs so that they undergo a context synchronization
|
||||
* event and are forced to refetch the new instructions.
|
||||
*/
|
||||
#ifdef CONFIG_KGDB
|
||||
/*
|
||||
* KGDB performs cache maintenance with interrupts disabled, so we
|
||||
* will deadlock trying to IPI the secondary CPUs. In theory, we can
|
||||
* set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
|
||||
* just means that KGDB will elide the maintenance altogether! As it
|
||||
* turns out, KGDB uses IPIs to round-up the secondary CPUs during
|
||||
* the patching operation, so we don't need extra IPIs here anyway.
|
||||
* In which case, add a KGDB-specific bodge and return early.
|
||||
*/
|
||||
if (kgdb_connected && irqs_disabled())
|
||||
return;
|
||||
#endif
|
||||
kick_all_cpus_sync();
|
||||
}
|
||||
|
||||
static inline void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
@ -49,7 +49,8 @@
|
||||
#define ARM64_HAS_CACHE_DIC 28
|
||||
#define ARM64_HW_DBM 29
|
||||
#define ARM64_SSBD 30
|
||||
#define ARM64_MISMATCHED_CACHE_TYPE 31
|
||||
|
||||
#define ARM64_NCAPS 31
|
||||
#define ARM64_NCAPS 32
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
@ -16,13 +16,15 @@
|
||||
#ifndef __ASM_FP_H
|
||||
#define __ASM_FP_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/stddef.h>
|
||||
@ -102,6 +104,16 @@ extern int sve_set_vector_length(struct task_struct *task,
|
||||
extern int sve_set_current_vl(unsigned long arg);
|
||||
extern int sve_get_current_vl(void);
|
||||
|
||||
static inline void sve_user_disable(void)
|
||||
{
|
||||
sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
|
||||
}
|
||||
|
||||
static inline void sve_user_enable(void)
|
||||
{
|
||||
sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Probing and setup functions.
|
||||
* Calls to these functions must be serialised with one another.
|
||||
@ -128,6 +140,9 @@ static inline int sve_get_current_vl(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void sve_user_disable(void) { BUILD_BUG(); }
|
||||
static inline void sve_user_enable(void) { BUILD_BUG(); }
|
||||
|
||||
static inline void sve_init_vq_map(void) { }
|
||||
static inline void sve_update_vq_map(void) { }
|
||||
static inline int sve_verify_vq_map(void) { return 0; }
|
||||
|
@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
|
||||
s32 aarch64_get_branch_offset(u32 insn);
|
||||
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
|
||||
|
||||
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
|
||||
|
||||
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
|
||||
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
|
||||
|
||||
|
@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
|
||||
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
|
||||
*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
||||
u32 mode;
|
||||
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
return mode > COMPAT_PSR_MODE_USR;
|
||||
mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
|
||||
return mode > PSR_AA32_MODE_USR;
|
||||
}
|
||||
|
||||
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
|
||||
@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
||||
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
|
||||
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
|
||||
} else {
|
||||
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||
sctlr |= (1 << 25);
|
||||
@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
|
||||
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
|
||||
|
||||
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
|
||||
}
|
||||
|
@ -19,11 +19,4 @@
|
||||
void kernel_neon_begin(void);
|
||||
void kernel_neon_end(void);
|
||||
|
||||
/*
|
||||
* Temporary macro to allow the crypto code to compile. Note that the
|
||||
* semantics of kernel_neon_begin_partial() are now different from the
|
||||
* original as it does not allow being called in an interrupt context.
|
||||
*/
|
||||
#define kernel_neon_begin_partial(num_regs) kernel_neon_begin()
|
||||
|
||||
#endif /* ! __ASM_NEON_H */
|
||||
|
@ -35,10 +35,14 @@ void __init numa_set_distance(int from, int to, int distance);
|
||||
void __init numa_free_distance(void);
|
||||
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
|
||||
void numa_store_cpu_info(unsigned int cpu);
|
||||
void numa_add_cpu(unsigned int cpu);
|
||||
void numa_remove_cpu(unsigned int cpu);
|
||||
|
||||
#else /* CONFIG_NUMA */
|
||||
|
||||
static inline void numa_store_cpu_info(unsigned int cpu) { }
|
||||
static inline void numa_add_cpu(unsigned int cpu) { }
|
||||
static inline void numa_remove_cpu(unsigned int cpu) { }
|
||||
static inline void arm64_numa_init(void) { }
|
||||
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
|
||||
|
||||
|
@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
{
|
||||
start_thread_common(regs, pc);
|
||||
regs->pstate = COMPAT_PSR_MODE_USR;
|
||||
regs->pstate = PSR_AA32_MODE_USR;
|
||||
if (pc & 1)
|
||||
regs->pstate |= COMPAT_PSR_T_BIT;
|
||||
regs->pstate |= PSR_AA32_T_BIT;
|
||||
|
||||
#ifdef __AARCH64EB__
|
||||
regs->pstate |= COMPAT_PSR_E_BIT;
|
||||
regs->pstate |= PSR_AA32_E_BIT;
|
||||
#endif
|
||||
|
||||
regs->compat_sp = sp;
|
||||
@ -266,5 +266,20 @@ extern void __init minsigstksz_setup(void);
|
||||
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
|
||||
#define SVE_GET_VL() sve_get_current_vl()
|
||||
|
||||
/*
|
||||
* For CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
*
|
||||
* These need to be macros because otherwise we get stuck in a nightmare
|
||||
* of header definitions for the use of task_stack_page.
|
||||
*/
|
||||
|
||||
#define current_top_of_stack() \
|
||||
({ \
|
||||
struct stack_info _info; \
|
||||
BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
|
||||
_info.high; \
|
||||
})
|
||||
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_PROCESSOR_H */
|
||||
|
@ -35,36 +35,39 @@
|
||||
#define COMPAT_PTRACE_GETHBPREGS 29
|
||||
#define COMPAT_PTRACE_SETHBPREGS 30
|
||||
|
||||
/* AArch32 CPSR bits */
|
||||
#define COMPAT_PSR_MODE_MASK 0x0000001f
|
||||
#define COMPAT_PSR_MODE_USR 0x00000010
|
||||
#define COMPAT_PSR_MODE_FIQ 0x00000011
|
||||
#define COMPAT_PSR_MODE_IRQ 0x00000012
|
||||
#define COMPAT_PSR_MODE_SVC 0x00000013
|
||||
#define COMPAT_PSR_MODE_ABT 0x00000017
|
||||
#define COMPAT_PSR_MODE_HYP 0x0000001a
|
||||
#define COMPAT_PSR_MODE_UND 0x0000001b
|
||||
#define COMPAT_PSR_MODE_SYS 0x0000001f
|
||||
#define COMPAT_PSR_T_BIT 0x00000020
|
||||
#define COMPAT_PSR_F_BIT 0x00000040
|
||||
#define COMPAT_PSR_I_BIT 0x00000080
|
||||
#define COMPAT_PSR_A_BIT 0x00000100
|
||||
#define COMPAT_PSR_E_BIT 0x00000200
|
||||
#define COMPAT_PSR_J_BIT 0x01000000
|
||||
#define COMPAT_PSR_Q_BIT 0x08000000
|
||||
#define COMPAT_PSR_V_BIT 0x10000000
|
||||
#define COMPAT_PSR_C_BIT 0x20000000
|
||||
#define COMPAT_PSR_Z_BIT 0x40000000
|
||||
#define COMPAT_PSR_N_BIT 0x80000000
|
||||
#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
|
||||
#define COMPAT_PSR_GE_MASK 0x000f0000
|
||||
/* SPSR_ELx bits for exceptions taken from AArch32 */
|
||||
#define PSR_AA32_MODE_MASK 0x0000001f
|
||||
#define PSR_AA32_MODE_USR 0x00000010
|
||||
#define PSR_AA32_MODE_FIQ 0x00000011
|
||||
#define PSR_AA32_MODE_IRQ 0x00000012
|
||||
#define PSR_AA32_MODE_SVC 0x00000013
|
||||
#define PSR_AA32_MODE_ABT 0x00000017
|
||||
#define PSR_AA32_MODE_HYP 0x0000001a
|
||||
#define PSR_AA32_MODE_UND 0x0000001b
|
||||
#define PSR_AA32_MODE_SYS 0x0000001f
|
||||
#define PSR_AA32_T_BIT 0x00000020
|
||||
#define PSR_AA32_F_BIT 0x00000040
|
||||
#define PSR_AA32_I_BIT 0x00000080
|
||||
#define PSR_AA32_A_BIT 0x00000100
|
||||
#define PSR_AA32_E_BIT 0x00000200
|
||||
#define PSR_AA32_DIT_BIT 0x01000000
|
||||
#define PSR_AA32_Q_BIT 0x08000000
|
||||
#define PSR_AA32_V_BIT 0x10000000
|
||||
#define PSR_AA32_C_BIT 0x20000000
|
||||
#define PSR_AA32_Z_BIT 0x40000000
|
||||
#define PSR_AA32_N_BIT 0x80000000
|
||||
#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
|
||||
#define PSR_AA32_GE_MASK 0x000f0000
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
|
||||
#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
|
||||
#else
|
||||
#define COMPAT_PSR_ENDSTATE 0
|
||||
#define PSR_AA32_ENDSTATE 0
|
||||
#endif
|
||||
|
||||
/* AArch32 CPSR bits, as seen in AArch32 */
|
||||
#define COMPAT_PSR_DIT_BIT 0x00200000
|
||||
|
||||
/*
|
||||
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
|
||||
* process is located in memory.
|
||||
@ -111,6 +114,30 @@
|
||||
#define compat_sp_fiq regs[29]
|
||||
#define compat_lr_fiq regs[30]
|
||||
|
||||
static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
|
||||
{
|
||||
unsigned long pstate;
|
||||
|
||||
pstate = psr & ~COMPAT_PSR_DIT_BIT;
|
||||
|
||||
if (psr & COMPAT_PSR_DIT_BIT)
|
||||
pstate |= PSR_AA32_DIT_BIT;
|
||||
|
||||
return pstate;
|
||||
}
|
||||
|
||||
static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
|
||||
{
|
||||
unsigned long psr;
|
||||
|
||||
psr = pstate & ~PSR_AA32_DIT_BIT;
|
||||
|
||||
if (pstate & PSR_AA32_DIT_BIT)
|
||||
psr |= COMPAT_PSR_DIT_BIT;
|
||||
|
||||
return psr;
|
||||
}
|
||||
|
||||
/*
|
||||
* This struct defines the way the registers are stored on the stack during an
|
||||
* exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
|
||||
@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define compat_thumb_mode(regs) \
|
||||
(((regs)->pstate & COMPAT_PSR_T_BIT))
|
||||
(((regs)->pstate & PSR_AA32_T_BIT))
|
||||
#else
|
||||
#define compat_thumb_mode(regs) (0)
|
||||
#endif
|
||||
|
@ -40,15 +40,18 @@ asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
|
||||
unsigned long sdei_arch_get_entry_point(int conduit);
|
||||
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
|
||||
|
||||
bool _on_sdei_stack(unsigned long sp);
|
||||
static inline bool on_sdei_stack(unsigned long sp)
|
||||
struct stack_info;
|
||||
|
||||
bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
|
||||
static inline bool on_sdei_stack(unsigned long sp,
|
||||
struct stack_info *info)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return false;
|
||||
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
|
||||
return false;
|
||||
if (in_nmi())
|
||||
return _on_sdei_stack(sp);
|
||||
return _on_sdei_stack(sp, info);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -16,123 +16,8 @@
|
||||
#ifndef __ASM_SPINLOCK_H
|
||||
#define __ASM_SPINLOCK_H
|
||||
|
||||
#include <asm/lse.h>
|
||||
#include <asm/spinlock_types.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* Spinlock implementation.
|
||||
*
|
||||
* The memory barriers are implicit with the load-acquire and store-release
|
||||
* instructions.
|
||||
*/
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp;
|
||||
arch_spinlock_t lockval, newval;
|
||||
|
||||
asm volatile(
|
||||
/* Atomically increment the next ticket. */
|
||||
ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %3\n"
|
||||
"1: ldaxr %w0, %3\n"
|
||||
" add %w1, %w0, %w5\n"
|
||||
" stxr %w2, %w1, %3\n"
|
||||
" cbnz %w2, 1b\n",
|
||||
/* LSE atomics */
|
||||
" mov %w2, %w5\n"
|
||||
" ldadda %w2, %w0, %3\n"
|
||||
__nops(3)
|
||||
)
|
||||
|
||||
/* Did we get the lock? */
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbz %w1, 3f\n"
|
||||
/*
|
||||
* No: spin on the owner. Send a local event to avoid missing an
|
||||
* unlock before the exclusive load.
|
||||
*/
|
||||
" sevl\n"
|
||||
"2: wfe\n"
|
||||
" ldaxrh %w2, %4\n"
|
||||
" eor %w1, %w2, %w0, lsr #16\n"
|
||||
" cbnz %w1, 2b\n"
|
||||
/* We got the lock. Critical section starts here. */
|
||||
"3:"
|
||||
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
|
||||
: "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp;
|
||||
arch_spinlock_t lockval;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldaxr %w0, %2\n"
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbnz %w1, 2f\n"
|
||||
" add %w0, %w0, %3\n"
|
||||
" stxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
"2:",
|
||||
/* LSE atomics */
|
||||
" ldr %w0, %2\n"
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbnz %w1, 1f\n"
|
||||
" add %w1, %w0, %3\n"
|
||||
" casa %w0, %w1, %2\n"
|
||||
" sub %w1, %w1, %3\n"
|
||||
" eor %w1, %w1, %w0\n"
|
||||
"1:")
|
||||
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
|
||||
: "I" (1 << TICKET_SHIFT)
|
||||
: "memory");
|
||||
|
||||
return !tmp;
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" ldrh %w1, %0\n"
|
||||
" add %w1, %w1, #1\n"
|
||||
" stlrh %w1, %0",
|
||||
/* LSE atomics */
|
||||
" mov %w1, #1\n"
|
||||
" staddlh %w1, %0\n"
|
||||
__nops(1))
|
||||
: "=Q" (lock->owner), "=&r" (tmp)
|
||||
:
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
{
|
||||
return lock.owner == lock.next;
|
||||
}
|
||||
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
return !arch_spin_value_unlocked(READ_ONCE(*lock));
|
||||
}
|
||||
|
||||
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
arch_spinlock_t lockval = READ_ONCE(*lock);
|
||||
return (lockval.next - lockval.owner) > 1;
|
||||
}
|
||||
#define arch_spin_is_contended arch_spin_is_contended
|
||||
|
||||
#include <asm/qrwlock.h>
|
||||
#include <asm/qspinlock.h>
|
||||
|
||||
/* See include/linux/spinlock.h */
|
||||
#define smp_mb__after_spinlock() smp_mb()
|
||||
|
@ -20,22 +20,7 @@
|
||||
# error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define TICKET_SHIFT 16
|
||||
|
||||
typedef struct {
|
||||
#ifdef __AARCH64EB__
|
||||
u16 next;
|
||||
u16 owner;
|
||||
#else
|
||||
u16 owner;
|
||||
u16 next;
|
||||
#endif
|
||||
} __aligned(4) arch_spinlock_t;
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
|
||||
|
||||
#include <asm-generic/qspinlock_types.h>
|
||||
#include <asm-generic/qrwlock_types.h>
|
||||
|
||||
#endif
|
||||
|
@ -32,6 +32,21 @@ struct stackframe {
|
||||
#endif
|
||||
};
|
||||
|
||||
enum stack_type {
|
||||
STACK_TYPE_UNKNOWN,
|
||||
STACK_TYPE_TASK,
|
||||
STACK_TYPE_IRQ,
|
||||
STACK_TYPE_OVERFLOW,
|
||||
STACK_TYPE_SDEI_NORMAL,
|
||||
STACK_TYPE_SDEI_CRITICAL,
|
||||
};
|
||||
|
||||
struct stack_info {
|
||||
unsigned long low;
|
||||
unsigned long high;
|
||||
enum stack_type type;
|
||||
};
|
||||
|
||||
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
|
||||
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
||||
int (*fn)(struct stackframe *, void *), void *data);
|
||||
@ -39,7 +54,8 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
|
||||
|
||||
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
|
||||
|
||||
static inline bool on_irq_stack(unsigned long sp)
|
||||
static inline bool on_irq_stack(unsigned long sp,
|
||||
struct stack_info *info)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
|
||||
unsigned long high = low + IRQ_STACK_SIZE;
|
||||
@ -47,46 +63,79 @@ static inline bool on_irq_stack(unsigned long sp)
|
||||
if (!low)
|
||||
return false;
|
||||
|
||||
return (low <= sp && sp < high);
|
||||
if (sp < low || sp >= high)
|
||||
return false;
|
||||
|
||||
if (info) {
|
||||
info->low = low;
|
||||
info->high = high;
|
||||
info->type = STACK_TYPE_IRQ;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
|
||||
static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
|
||||
struct stack_info *info)
|
||||
{
|
||||
unsigned long low = (unsigned long)task_stack_page(tsk);
|
||||
unsigned long high = low + THREAD_SIZE;
|
||||
|
||||
return (low <= sp && sp < high);
|
||||
if (sp < low || sp >= high)
|
||||
return false;
|
||||
|
||||
if (info) {
|
||||
info->low = low;
|
||||
info->high = high;
|
||||
info->type = STACK_TYPE_TASK;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
||||
|
||||
static inline bool on_overflow_stack(unsigned long sp)
|
||||
static inline bool on_overflow_stack(unsigned long sp,
|
||||
struct stack_info *info)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
|
||||
unsigned long high = low + OVERFLOW_STACK_SIZE;
|
||||
|
||||
return (low <= sp && sp < high);
|
||||
if (sp < low || sp >= high)
|
||||
return false;
|
||||
|
||||
if (info) {
|
||||
info->low = low;
|
||||
info->high = high;
|
||||
info->type = STACK_TYPE_OVERFLOW;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool on_overflow_stack(unsigned long sp) { return false; }
|
||||
static inline bool on_overflow_stack(unsigned long sp,
|
||||
struct stack_info *info) { return false; }
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* We can only safely access per-cpu stacks from current in a non-preemptible
|
||||
* context.
|
||||
*/
|
||||
static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
|
||||
static inline bool on_accessible_stack(struct task_struct *tsk,
|
||||
unsigned long sp,
|
||||
struct stack_info *info)
|
||||
{
|
||||
if (on_task_stack(tsk, sp))
|
||||
if (on_task_stack(tsk, sp, info))
|
||||
return true;
|
||||
if (tsk != current || preemptible())
|
||||
return false;
|
||||
if (on_irq_stack(sp))
|
||||
if (on_irq_stack(sp, info))
|
||||
return true;
|
||||
if (on_overflow_stack(sp))
|
||||
if (on_overflow_stack(sp, info))
|
||||
return true;
|
||||
if (on_sdei_stack(sp))
|
||||
if (on_sdei_stack(sp, info))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -20,7 +20,13 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
extern const void *sys_call_table[];
|
||||
typedef long (*syscall_fn_t)(struct pt_regs *regs);
|
||||
|
||||
extern const syscall_fn_t sys_call_table[];
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern const syscall_fn_t compat_sys_call_table[];
|
||||
#endif
|
||||
|
||||
static inline int syscall_get_nr(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
|
80
arch/arm64/include/asm/syscall_wrapper.h
Normal file
80
arch/arm64/include/asm/syscall_wrapper.h
Normal file
@ -0,0 +1,80 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* syscall_wrapper.h - arm64 specific wrappers to syscall definitions
|
||||
*
|
||||
* Based on arch/x86/include/asm_syscall_wrapper.h
|
||||
*/
|
||||
|
||||
#ifndef __ASM_SYSCALL_WRAPPER_H
|
||||
#define __ASM_SYSCALL_WRAPPER_H
|
||||
|
||||
#define SC_ARM64_REGS_TO_ARGS(x, ...) \
|
||||
__MAP(x,__SC_ARGS \
|
||||
,,regs->regs[0],,regs->regs[1],,regs->regs[2] \
|
||||
,,regs->regs[3],,regs->regs[4],,regs->regs[5])
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
|
||||
asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs); \
|
||||
ALLOW_ERROR_INJECTION(__arm64_compat_sys##name, ERRNO); \
|
||||
static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
|
||||
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
|
||||
asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs) \
|
||||
{ \
|
||||
return __se_compat_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
|
||||
} \
|
||||
static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
|
||||
{ \
|
||||
return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
|
||||
} \
|
||||
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
|
||||
|
||||
#define COMPAT_SYSCALL_DEFINE0(sname) \
|
||||
asmlinkage long __arm64_compat_sys_##sname(void); \
|
||||
ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
|
||||
asmlinkage long __arm64_compat_sys_##sname(void)
|
||||
|
||||
#define COND_SYSCALL_COMPAT(name) \
|
||||
cond_syscall(__arm64_compat_sys_##name);
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
|
||||
ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
|
||||
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
|
||||
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
|
||||
asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
|
||||
{ \
|
||||
return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
|
||||
} \
|
||||
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
|
||||
{ \
|
||||
long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
|
||||
__MAP(x,__SC_TEST,__VA_ARGS__); \
|
||||
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
|
||||
return ret; \
|
||||
} \
|
||||
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
|
||||
|
||||
#ifndef SYSCALL_DEFINE0
|
||||
#define SYSCALL_DEFINE0(sname) \
|
||||
SYSCALL_METADATA(_##sname, 0); \
|
||||
asmlinkage long __arm64_sys_##sname(void); \
|
||||
ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
|
||||
asmlinkage long __arm64_sys_##sname(void)
|
||||
#endif
|
||||
|
||||
#ifndef COND_SYSCALL
|
||||
#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name)
|
||||
#endif
|
||||
|
||||
#ifndef SYS_NI
|
||||
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SYSCALL_WRAPPER_H */
|
@ -436,7 +436,8 @@
|
||||
#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
|
||||
(1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
|
||||
(1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
|
||||
(1 << 27) | (1 << 30) | (1 << 31))
|
||||
(1 << 27) | (1 << 30) | (1 << 31) | \
|
||||
(0xffffffffUL << 32))
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
|
||||
@ -452,9 +453,9 @@
|
||||
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
|
||||
ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
|
||||
|
||||
/* Check all the bits are accounted for */
|
||||
#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
|
||||
|
||||
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
|
||||
#error "Inconsistent SCTLR_EL2 set/clear bits"
|
||||
#endif
|
||||
|
||||
/* SCTLR_EL1 specific flags. */
|
||||
#define SCTLR_EL1_UCI (1 << 26)
|
||||
@ -473,7 +474,8 @@
|
||||
#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
|
||||
(1 << 29))
|
||||
#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
|
||||
(1 << 27) | (1 << 30) | (1 << 31))
|
||||
(1 << 27) | (1 << 30) | (1 << 31) | \
|
||||
(0xffffffffUL << 32))
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
|
||||
@ -492,8 +494,9 @@
|
||||
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
|
||||
SCTLR_EL1_RES0)
|
||||
|
||||
/* Check all the bits are accounted for */
|
||||
#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
|
||||
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
|
||||
#error "Inconsistent SCTLR_EL1 set/clear bits"
|
||||
#endif
|
||||
|
||||
/* id_aa64isar0 */
|
||||
#define ID_AA64ISAR0_TS_SHIFT 52
|
||||
@ -739,19 +742,6 @@ asm(
|
||||
write_sysreg(__scs_new, sysreg); \
|
||||
} while (0)
|
||||
|
||||
static inline void config_sctlr_el1(u32 clear, u32 set)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
|
||||
SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
|
||||
|
||||
val = read_sysreg(sctlr_el1);
|
||||
val &= ~clear;
|
||||
val |= set;
|
||||
write_sysreg(val, sctlr_el1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SYSREG_H */
|
||||
|
@ -218,6 +218,13 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
|
||||
{
|
||||
unsigned long addr = __TLBI_VADDR(kaddr, 0);
|
||||
|
||||
__tlbi(vaae1is, addr);
|
||||
dsb(ish);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -11,7 +11,7 @@ struct cpu_topology {
|
||||
int llc_id;
|
||||
cpumask_t thread_sibling;
|
||||
cpumask_t core_sibling;
|
||||
cpumask_t llc_siblings;
|
||||
cpumask_t llc_sibling;
|
||||
};
|
||||
|
||||
extern struct cpu_topology cpu_topology[NR_CPUS];
|
||||
@ -20,9 +20,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
|
||||
|
||||
void init_cpu_topology(void);
|
||||
void store_cpu_topology(unsigned int cpuid);
|
||||
void remove_cpu_topology(unsigned int cpuid);
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
@ -43,7 +43,7 @@
|
||||
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
|
||||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
|
||||
|
||||
#define __NR_compat_syscalls 398
|
||||
#define __NR_compat_syscalls 399
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
@ -260,7 +260,7 @@ __SYSCALL(117, sys_ni_syscall)
|
||||
#define __NR_fsync 118
|
||||
__SYSCALL(__NR_fsync, sys_fsync)
|
||||
#define __NR_sigreturn 119
|
||||
__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper)
|
||||
__SYSCALL(__NR_sigreturn, compat_sys_sigreturn)
|
||||
#define __NR_clone 120
|
||||
__SYSCALL(__NR_clone, sys_clone)
|
||||
#define __NR_setdomainname 121
|
||||
@ -368,7 +368,7 @@ __SYSCALL(__NR_getresgid, sys_getresgid16)
|
||||
#define __NR_prctl 172
|
||||
__SYSCALL(__NR_prctl, sys_prctl)
|
||||
#define __NR_rt_sigreturn 173
|
||||
__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper)
|
||||
__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn)
|
||||
#define __NR_rt_sigaction 174
|
||||
__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
|
||||
#define __NR_rt_sigprocmask 175
|
||||
@ -382,9 +382,9 @@ __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
|
||||
#define __NR_rt_sigsuspend 179
|
||||
__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
|
||||
#define __NR_pread64 180
|
||||
__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper)
|
||||
__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64)
|
||||
#define __NR_pwrite64 181
|
||||
__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper)
|
||||
__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64)
|
||||
#define __NR_chown 182
|
||||
__SYSCALL(__NR_chown, sys_chown16)
|
||||
#define __NR_getcwd 183
|
||||
@ -406,11 +406,11 @@ __SYSCALL(__NR_vfork, sys_vfork)
|
||||
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
|
||||
__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */
|
||||
#define __NR_mmap2 192
|
||||
__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper)
|
||||
__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2)
|
||||
#define __NR_truncate64 193
|
||||
__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
|
||||
__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64)
|
||||
#define __NR_ftruncate64 194
|
||||
__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper)
|
||||
__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64)
|
||||
#define __NR_stat64 195
|
||||
__SYSCALL(__NR_stat64, sys_stat64)
|
||||
#define __NR_lstat64 196
|
||||
@ -472,7 +472,7 @@ __SYSCALL(223, sys_ni_syscall)
|
||||
#define __NR_gettid 224
|
||||
__SYSCALL(__NR_gettid, sys_gettid)
|
||||
#define __NR_readahead 225
|
||||
__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper)
|
||||
__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead)
|
||||
#define __NR_setxattr 226
|
||||
__SYSCALL(__NR_setxattr, sys_setxattr)
|
||||
#define __NR_lsetxattr 227
|
||||
@ -554,15 +554,15 @@ __SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
|
||||
#define __NR_clock_nanosleep 265
|
||||
__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
|
||||
#define __NR_statfs64 266
|
||||
__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper)
|
||||
__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64)
|
||||
#define __NR_fstatfs64 267
|
||||
__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper)
|
||||
__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64)
|
||||
#define __NR_tgkill 268
|
||||
__SYSCALL(__NR_tgkill, sys_tgkill)
|
||||
#define __NR_utimes 269
|
||||
__SYSCALL(__NR_utimes, compat_sys_utimes)
|
||||
#define __NR_arm_fadvise64_64 270
|
||||
__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper)
|
||||
__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64)
|
||||
#define __NR_pciconfig_iobase 271
|
||||
__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
|
||||
#define __NR_pciconfig_read 272
|
||||
@ -704,7 +704,7 @@ __SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
|
||||
#define __NR_splice 340
|
||||
__SYSCALL(__NR_splice, sys_splice)
|
||||
#define __NR_sync_file_range2 341
|
||||
__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper)
|
||||
__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2)
|
||||
#define __NR_tee 342
|
||||
__SYSCALL(__NR_tee, sys_tee)
|
||||
#define __NR_vmsplice 343
|
||||
@ -726,7 +726,7 @@ __SYSCALL(__NR_timerfd_create, sys_timerfd_create)
|
||||
#define __NR_eventfd 351
|
||||
__SYSCALL(__NR_eventfd, sys_eventfd)
|
||||
#define __NR_fallocate 352
|
||||
__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper)
|
||||
__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate)
|
||||
#define __NR_timerfd_settime 353
|
||||
__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
|
||||
#define __NR_timerfd_gettime 354
|
||||
@ -817,6 +817,8 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
|
||||
__SYSCALL(__NR_pkey_free, sys_pkey_free)
|
||||
#define __NR_statx 397
|
||||
__SYSCALL(__NR_statx, sys_statx)
|
||||
#define __NR_rseq 398
|
||||
__SYSCALL(__NR_rseq, sys_rseq)
|
||||
|
||||
/*
|
||||
* Please add new compat syscalls above this comment and update
|
||||
|
@ -18,7 +18,8 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
||||
hyp-stub.o psci.o cpu_ops.o insn.o \
|
||||
return_address.o cpuinfo.o cpu_errata.o \
|
||||
cpufeature.o alternative.o cacheinfo.o \
|
||||
smp.o smp_spin_table.o topology.o smccc-call.o
|
||||
smp.o smp_spin_table.o topology.o smccc-call.o \
|
||||
syscall.o
|
||||
|
||||
extra-$(CONFIG_EFI) := efi-entry.o
|
||||
|
||||
@ -27,7 +28,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
|
||||
sys_compat.o entry32.o
|
||||
sys_compat.o
|
||||
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
|
||||
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
|
||||
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/efi-bgrt.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
@ -29,13 +30,9 @@
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
#ifdef CONFIG_ACPI_APEI
|
||||
# include <linux/efi.h>
|
||||
# include <asm/pgtable.h>
|
||||
#endif
|
||||
|
||||
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
|
||||
int acpi_disabled = 1;
|
||||
EXPORT_SYMBOL(acpi_disabled);
|
||||
@ -239,8 +236,7 @@ done:
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_APEI
|
||||
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
|
||||
{
|
||||
/*
|
||||
* According to "Table 8 Map: EFI memory types to AArch64 memory
|
||||
@ -261,4 +257,3 @@ pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||
return __pgprot(PROT_NORMAL_NC);
|
||||
return __pgprot(PROT_DEVICE_nGnRnE);
|
||||
}
|
||||
#endif
|
||||
|
@ -26,36 +26,73 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/topology.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <asm/numa.h>
|
||||
|
||||
static int cpus_in_srat;
|
||||
static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
|
||||
|
||||
struct __node_cpu_hwid {
|
||||
u32 node_id; /* logical node containing this CPU */
|
||||
u64 cpu_hwid; /* MPIDR for this CPU */
|
||||
};
|
||||
|
||||
static struct __node_cpu_hwid early_node_cpu_hwid[NR_CPUS] = {
|
||||
[0 ... NR_CPUS - 1] = {NUMA_NO_NODE, PHYS_CPUID_INVALID} };
|
||||
|
||||
int acpi_numa_get_nid(unsigned int cpu, u64 hwid)
|
||||
int __init acpi_numa_get_nid(unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
return acpi_early_node_map[cpu];
|
||||
}
|
||||
|
||||
for (i = 0; i < cpus_in_srat; i++) {
|
||||
if (hwid == early_node_cpu_hwid[i].cpu_hwid)
|
||||
return early_node_cpu_hwid[i].node_id;
|
||||
}
|
||||
static inline int get_cpu_for_acpi_id(u32 uid)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
return NUMA_NO_NODE;
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
if (uid == get_acpi_id_for_cpu(cpu))
|
||||
return cpu;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_srat_gicc_affinity *pa;
|
||||
int cpu, pxm, node;
|
||||
|
||||
if (srat_disabled())
|
||||
return -EINVAL;
|
||||
|
||||
pa = (struct acpi_srat_gicc_affinity *)header;
|
||||
if (!pa)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
|
||||
return 0;
|
||||
|
||||
pxm = pa->proximity_domain;
|
||||
node = pxm_to_node(pxm);
|
||||
|
||||
/*
|
||||
* If we can't map the UID to a logical cpu this
|
||||
* means that the UID is not part of possible cpus
|
||||
* so we do not need a NUMA mapping for it, skip
|
||||
* the SRAT entry and keep parsing.
|
||||
*/
|
||||
cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
|
||||
if (cpu < 0)
|
||||
return 0;
|
||||
|
||||
acpi_early_node_map[cpu] = node;
|
||||
pr_info("SRAT: PXM %d -> MPIDR 0x%llx -> Node %d\n", pxm,
|
||||
cpu_logical_map(cpu), node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init acpi_map_cpus_to_nodes(void)
|
||||
{
|
||||
acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
|
||||
ACPI_SRAT_TYPE_GICC_AFFINITY,
|
||||
acpi_parse_gicc_pxm, 0);
|
||||
}
|
||||
|
||||
/* Callback for Proximity Domain -> ACPI processor UID mapping */
|
||||
void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
|
||||
{
|
||||
int pxm, node;
|
||||
phys_cpuid_t mpidr;
|
||||
|
||||
if (srat_disabled())
|
||||
return;
|
||||
@ -70,12 +107,6 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
|
||||
if (!(pa->flags & ACPI_SRAT_GICC_ENABLED))
|
||||
return;
|
||||
|
||||
if (cpus_in_srat >= NR_CPUS) {
|
||||
pr_warn_once("SRAT: cpu_to_node_map[%d] is too small, may not be able to use all cpus\n",
|
||||
NR_CPUS);
|
||||
return;
|
||||
}
|
||||
|
||||
pxm = pa->proximity_domain;
|
||||
node = acpi_map_pxm_to_node(pxm);
|
||||
|
||||
@ -85,20 +116,7 @@ void __init acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa)
|
||||
return;
|
||||
}
|
||||
|
||||
mpidr = acpi_map_madt_entry(pa->acpi_processor_uid);
|
||||
if (mpidr == PHYS_CPUID_INVALID) {
|
||||
pr_err("SRAT: PXM %d with ACPI ID %d has no valid MPIDR in MADT\n",
|
||||
pxm, pa->acpi_processor_uid);
|
||||
bad_srat();
|
||||
return;
|
||||
}
|
||||
|
||||
early_node_cpu_hwid[cpus_in_srat].node_id = node;
|
||||
early_node_cpu_hwid[cpus_in_srat].cpu_hwid = mpidr;
|
||||
node_set(node, numa_nodes_parsed);
|
||||
cpus_in_srat++;
|
||||
pr_info("SRAT: PXM %d -> MPIDR 0x%Lx -> Node %d\n",
|
||||
pxm, mpidr, node);
|
||||
}
|
||||
|
||||
int __init arm64_acpi_numa_init(void)
|
||||
|
@ -47,11 +47,11 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
|
||||
unsigned long replptr;
|
||||
|
||||
if (kernel_text_address(pc))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
replptr = (unsigned long)ALT_REPL_PTR(alt);
|
||||
if (pc >= replptr && pc <= (replptr + alt->alt_len))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Branching into *another* alternate sequence is doomed, and
|
||||
|
@ -441,8 +441,8 @@ static struct undef_hook swp_hooks[] = {
|
||||
{
|
||||
.instr_mask = 0x0fb00ff0,
|
||||
.instr_val = 0x01000090,
|
||||
.pstate_mask = COMPAT_PSR_MODE_MASK,
|
||||
.pstate_val = COMPAT_PSR_MODE_USR,
|
||||
.pstate_mask = PSR_AA32_MODE_MASK,
|
||||
.pstate_val = PSR_AA32_MODE_USR,
|
||||
.fn = swp_handler
|
||||
},
|
||||
{ }
|
||||
@ -511,9 +511,9 @@ ret:
|
||||
static int cp15_barrier_set_hw_mode(bool enable)
|
||||
{
|
||||
if (enable)
|
||||
config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN);
|
||||
else
|
||||
config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -521,15 +521,15 @@ static struct undef_hook cp15_barrier_hooks[] = {
|
||||
{
|
||||
.instr_mask = 0x0fff0fdf,
|
||||
.instr_val = 0x0e070f9a,
|
||||
.pstate_mask = COMPAT_PSR_MODE_MASK,
|
||||
.pstate_val = COMPAT_PSR_MODE_USR,
|
||||
.pstate_mask = PSR_AA32_MODE_MASK,
|
||||
.pstate_val = PSR_AA32_MODE_USR,
|
||||
.fn = cp15barrier_handler,
|
||||
},
|
||||
{
|
||||
.instr_mask = 0x0fff0fff,
|
||||
.instr_val = 0x0e070f95,
|
||||
.pstate_mask = COMPAT_PSR_MODE_MASK,
|
||||
.pstate_val = COMPAT_PSR_MODE_USR,
|
||||
.pstate_mask = PSR_AA32_MODE_MASK,
|
||||
.pstate_val = PSR_AA32_MODE_USR,
|
||||
.fn = cp15barrier_handler,
|
||||
},
|
||||
{ }
|
||||
@ -548,9 +548,9 @@ static int setend_set_hw_mode(bool enable)
|
||||
return -EINVAL;
|
||||
|
||||
if (enable)
|
||||
config_sctlr_el1(SCTLR_EL1_SED, 0);
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0);
|
||||
else
|
||||
config_sctlr_el1(0, SCTLR_EL1_SED);
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -562,10 +562,10 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
|
||||
|
||||
if (big_endian) {
|
||||
insn = "setend be";
|
||||
regs->pstate |= COMPAT_PSR_E_BIT;
|
||||
regs->pstate |= PSR_AA32_E_BIT;
|
||||
} else {
|
||||
insn = "setend le";
|
||||
regs->pstate &= ~COMPAT_PSR_E_BIT;
|
||||
regs->pstate &= ~PSR_AA32_E_BIT;
|
||||
}
|
||||
|
||||
trace_instruction_emulation(insn, regs->pc);
|
||||
@ -593,16 +593,16 @@ static struct undef_hook setend_hooks[] = {
|
||||
{
|
||||
.instr_mask = 0xfffffdff,
|
||||
.instr_val = 0xf1010000,
|
||||
.pstate_mask = COMPAT_PSR_MODE_MASK,
|
||||
.pstate_val = COMPAT_PSR_MODE_USR,
|
||||
.pstate_mask = PSR_AA32_MODE_MASK,
|
||||
.pstate_val = PSR_AA32_MODE_USR,
|
||||
.fn = a32_setend_handler,
|
||||
},
|
||||
{
|
||||
/* Thumb mode */
|
||||
.instr_mask = 0x0000fff7,
|
||||
.instr_val = 0x0000b650,
|
||||
.pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
|
||||
.pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
|
||||
.pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
|
||||
.pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
|
||||
.fn = t16_setend_handler,
|
||||
},
|
||||
{}
|
||||
|
@ -16,13 +16,14 @@
|
||||
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
|
||||
unsigned long arg0, unsigned long arg1, unsigned long arg2);
|
||||
|
||||
static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
|
||||
unsigned long entry, unsigned long arg0, unsigned long arg1,
|
||||
unsigned long arg2)
|
||||
static inline void __noreturn cpu_soft_restart(unsigned long entry,
|
||||
unsigned long arg0,
|
||||
unsigned long arg1,
|
||||
unsigned long arg2)
|
||||
{
|
||||
typeof(__cpu_soft_restart) *restart;
|
||||
|
||||
el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
|
||||
unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
|
||||
is_hyp_mode_available();
|
||||
restart = (void *)__pa_symbol(__cpu_soft_restart);
|
||||
|
||||
|
@ -65,19 +65,24 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
}
|
||||
|
||||
static bool
|
||||
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
u64 mask = CTR_CACHE_MINLINE_MASK;
|
||||
|
||||
/* Skip matching the min line sizes for cache type check */
|
||||
if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
|
||||
mask ^= arm64_ftr_reg_ctrel0.strict_mask;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
|
||||
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
|
||||
return (read_cpuid_cachetype() & mask) !=
|
||||
(arm64_ftr_reg_ctrel0.sys_val & mask);
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
/* Clear SCTLR_EL1.UCT */
|
||||
config_sctlr_el1(SCTLR_EL1_UCT, 0);
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
|
||||
}
|
||||
|
||||
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
|
||||
@ -101,7 +106,7 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
|
||||
for (i = 0; i < SZ_2K; i += 0x80)
|
||||
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
|
||||
|
||||
flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
|
||||
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
|
||||
}
|
||||
|
||||
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
@ -613,7 +618,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
{
|
||||
.desc = "Mismatched cache line size",
|
||||
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
|
||||
.matches = has_mismatched_cache_line_size,
|
||||
.matches = has_mismatched_cache_type,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.cpu_enable = cpu_enable_trap_ctr_access,
|
||||
},
|
||||
{
|
||||
.desc = "Mismatched cache type",
|
||||
.capability = ARM64_MISMATCHED_CACHE_TYPE,
|
||||
.matches = has_mismatched_cache_type,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.cpu_enable = cpu_enable_trap_ctr_access,
|
||||
},
|
||||
@ -649,7 +661,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.cpu_enable = enable_smccc_arch_workaround_1,
|
||||
ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
|
||||
},
|
||||
@ -658,7 +669,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
{
|
||||
.desc = "EL2 vector hardening",
|
||||
.capability = ARM64_HARDEN_EL2_VECTORS,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
|
||||
},
|
||||
#endif
|
||||
|
@ -214,7 +214,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
|
||||
* If we have differing I-cache policies, report it as the weakest - VIPT.
|
||||
*/
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
@ -1723,7 +1723,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
|
||||
static struct undef_hook mrs_hook = {
|
||||
.instr_mask = 0xfff00000,
|
||||
.instr_val = 0xd5300000,
|
||||
.pstate_mask = COMPAT_PSR_MODE_MASK,
|
||||
.pstate_mask = PSR_AA32_MODE_MASK,
|
||||
.pstate_val = PSR_MODE_EL0t,
|
||||
.fn = emulate_mrs,
|
||||
};
|
||||
|
@ -41,19 +41,9 @@
|
||||
* Context tracking subsystem. Used to instrument transitions
|
||||
* between user and kernel mode.
|
||||
*/
|
||||
.macro ct_user_exit, syscall = 0
|
||||
.macro ct_user_exit
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
bl context_tracking_user_exit
|
||||
.if \syscall == 1
|
||||
/*
|
||||
* Save/restore needed during syscalls. Restore syscall arguments from
|
||||
* the values already saved on stack during kernel_entry.
|
||||
*/
|
||||
ldp x0, x1, [sp]
|
||||
ldp x2, x3, [sp, #S_X2]
|
||||
ldp x4, x5, [sp, #S_X4]
|
||||
ldp x6, x7, [sp, #S_X6]
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
@ -63,6 +53,12 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro clear_gp_regs
|
||||
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
|
||||
mov x\n, xzr
|
||||
.endr
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Bad Abort numbers
|
||||
*-----------------
|
||||
@ -140,20 +136,21 @@ alternative_else_nop_endif
|
||||
|
||||
// This macro corrupts x0-x3. It is the caller's duty
|
||||
// to save/restore them if required.
|
||||
.macro apply_ssbd, state, targ, tmp1, tmp2
|
||||
.macro apply_ssbd, state, tmp1, tmp2
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
alternative_cb arm64_enable_wa2_handling
|
||||
b \targ
|
||||
b .L__asm_ssbd_skip\@
|
||||
alternative_cb_end
|
||||
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
|
||||
cbz \tmp2, \targ
|
||||
cbz \tmp2, .L__asm_ssbd_skip\@
|
||||
ldr \tmp2, [tsk, #TSK_TI_FLAGS]
|
||||
tbnz \tmp2, #TIF_SSBD, \targ
|
||||
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
|
||||
mov w1, #\state
|
||||
alternative_cb arm64_update_smccc_conduit
|
||||
nop // Patched to SMC/HVC #0
|
||||
alternative_cb_end
|
||||
.L__asm_ssbd_skip\@:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
@ -178,20 +175,14 @@ alternative_cb_end
|
||||
stp x28, x29, [sp, #16 * 14]
|
||||
|
||||
.if \el == 0
|
||||
clear_gp_regs
|
||||
mrs x21, sp_el0
|
||||
ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
|
||||
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
|
||||
disable_step_tsk x19, x20 // exceptions when scheduling.
|
||||
|
||||
apply_ssbd 1, 1f, x22, x23
|
||||
apply_ssbd 1, x22, x23
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
ldp x0, x1, [sp, #16 * 0]
|
||||
ldp x2, x3, [sp, #16 * 1]
|
||||
#endif
|
||||
1:
|
||||
|
||||
mov x29, xzr // fp pointed to user-space
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
get_thread_info tsk
|
||||
@ -331,8 +322,7 @@ alternative_if ARM64_WORKAROUND_845719
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
3:
|
||||
apply_ssbd 0, 5f, x0, x1
|
||||
5:
|
||||
apply_ssbd 0, x0, x1
|
||||
.endif
|
||||
|
||||
msr elr_el1, x21 // set up the return data
|
||||
@ -720,14 +710,9 @@ el0_sync_compat:
|
||||
b.ge el0_dbg
|
||||
b el0_inv
|
||||
el0_svc_compat:
|
||||
/*
|
||||
* AArch32 syscall handling
|
||||
*/
|
||||
ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
|
||||
adrp stbl, compat_sys_call_table // load compat syscall table pointer
|
||||
mov wscno, w7 // syscall number in w7 (r7)
|
||||
mov wsc_nr, #__NR_compat_syscalls
|
||||
b el0_svc_naked
|
||||
mov x0, sp
|
||||
bl el0_svc_compat_handler
|
||||
b ret_to_user
|
||||
|
||||
.align 6
|
||||
el0_irq_compat:
|
||||
@ -896,25 +881,6 @@ el0_error_naked:
|
||||
b ret_to_user
|
||||
ENDPROC(el0_error)
|
||||
|
||||
|
||||
/*
|
||||
* This is the fast syscall return path. We do as little as possible here,
|
||||
* and this includes saving x0 back into the kernel stack.
|
||||
*/
|
||||
ret_fast_syscall:
|
||||
disable_daif
|
||||
str x0, [sp, #S_X0] // returned x0
|
||||
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
|
||||
and x2, x1, #_TIF_SYSCALL_WORK
|
||||
cbnz x2, ret_fast_syscall_trace
|
||||
and x2, x1, #_TIF_WORK_MASK
|
||||
cbnz x2, work_pending
|
||||
enable_step_tsk x1, x2
|
||||
kernel_exit 0
|
||||
ret_fast_syscall_trace:
|
||||
enable_daif
|
||||
b __sys_trace_return_skipped // we already saved x0
|
||||
|
||||
/*
|
||||
* Ok, we need to do extra processing, enter the slow path.
|
||||
*/
|
||||
@ -936,6 +902,9 @@ ret_to_user:
|
||||
cbnz x2, work_pending
|
||||
finish_ret_to_user:
|
||||
enable_step_tsk x1, x2
|
||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
bl stackleak_erase
|
||||
#endif
|
||||
kernel_exit 0
|
||||
ENDPROC(ret_to_user)
|
||||
|
||||
@ -944,85 +913,10 @@ ENDPROC(ret_to_user)
|
||||
*/
|
||||
.align 6
|
||||
el0_svc:
|
||||
ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
|
||||
adrp stbl, sys_call_table // load syscall table pointer
|
||||
mov wscno, w8 // syscall number in w8
|
||||
mov wsc_nr, #__NR_syscalls
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
alternative_if_not ARM64_SVE
|
||||
b el0_svc_naked
|
||||
alternative_else_nop_endif
|
||||
tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
|
||||
bic x16, x16, #_TIF_SVE // discard SVE state
|
||||
str x16, [tsk, #TSK_TI_FLAGS]
|
||||
|
||||
/*
|
||||
* task_fpsimd_load() won't be called to update CPACR_EL1 in
|
||||
* ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
|
||||
* happens if a context switch or kernel_neon_begin() or context
|
||||
* modification (sigreturn, ptrace) intervenes.
|
||||
* So, ensure that CPACR_EL1 is already correct for the fast-path case:
|
||||
*/
|
||||
mrs x9, cpacr_el1
|
||||
bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
|
||||
msr cpacr_el1, x9 // synchronised by eret to el0
|
||||
#endif
|
||||
|
||||
el0_svc_naked: // compat entry point
|
||||
stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
|
||||
enable_daif
|
||||
ct_user_exit 1
|
||||
|
||||
tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
|
||||
b.ne __sys_trace
|
||||
cmp wscno, wsc_nr // check upper syscall limit
|
||||
b.hs ni_sys
|
||||
mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
|
||||
ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
|
||||
blr x16 // call sys_* routine
|
||||
b ret_fast_syscall
|
||||
ni_sys:
|
||||
mov x0, sp
|
||||
bl do_ni_syscall
|
||||
b ret_fast_syscall
|
||||
ENDPROC(el0_svc)
|
||||
|
||||
/*
|
||||
* This is the really slow path. We're going to be doing context
|
||||
* switches, and waiting for our parent to respond.
|
||||
*/
|
||||
__sys_trace:
|
||||
cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
|
||||
b.ne 1f
|
||||
mov x0, #-ENOSYS // set default errno if so
|
||||
str x0, [sp, #S_X0]
|
||||
1: mov x0, sp
|
||||
bl syscall_trace_enter
|
||||
cmp w0, #NO_SYSCALL // skip the syscall?
|
||||
b.eq __sys_trace_return_skipped
|
||||
mov wscno, w0 // syscall number (possibly new)
|
||||
mov x1, sp // pointer to regs
|
||||
cmp wscno, wsc_nr // check upper syscall limit
|
||||
b.hs __ni_sys_trace
|
||||
ldp x0, x1, [sp] // restore the syscall args
|
||||
ldp x2, x3, [sp, #S_X2]
|
||||
ldp x4, x5, [sp, #S_X4]
|
||||
ldp x6, x7, [sp, #S_X6]
|
||||
ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
|
||||
blr x16 // call sys_* routine
|
||||
|
||||
__sys_trace_return:
|
||||
str x0, [sp, #S_X0] // save returned x0
|
||||
__sys_trace_return_skipped:
|
||||
mov x0, sp
|
||||
bl syscall_trace_exit
|
||||
bl el0_svc_handler
|
||||
b ret_to_user
|
||||
|
||||
__ni_sys_trace:
|
||||
mov x0, sp
|
||||
bl do_ni_syscall
|
||||
b __sys_trace_return
|
||||
ENDPROC(el0_svc)
|
||||
|
||||
.popsection // .entry.text
|
||||
|
||||
@ -1137,14 +1031,6 @@ __entry_tramp_data_start:
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
/*
|
||||
* Special system call wrappers.
|
||||
*/
|
||||
ENTRY(sys_rt_sigreturn_wrapper)
|
||||
mov x0, sp
|
||||
b sys_rt_sigreturn
|
||||
ENDPROC(sys_rt_sigreturn_wrapper)
|
||||
|
||||
/*
|
||||
* Register switch for AArch64. The callee-saved registers need to be saved
|
||||
* and restored. On entry:
|
||||
|
@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Compat system call wrappers
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Authors: Will Deacon <will.deacon@arm.com>
|
||||
* Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/const.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* System call wrappers for the AArch32 compatibility layer.
|
||||
*/
|
||||
|
||||
ENTRY(compat_sys_sigreturn_wrapper)
|
||||
mov x0, sp
|
||||
b compat_sys_sigreturn
|
||||
ENDPROC(compat_sys_sigreturn_wrapper)
|
||||
|
||||
ENTRY(compat_sys_rt_sigreturn_wrapper)
|
||||
mov x0, sp
|
||||
b compat_sys_rt_sigreturn
|
||||
ENDPROC(compat_sys_rt_sigreturn_wrapper)
|
||||
|
||||
ENTRY(compat_sys_statfs64_wrapper)
|
||||
mov w3, #84
|
||||
cmp w1, #88
|
||||
csel w1, w3, w1, eq
|
||||
b compat_sys_statfs64
|
||||
ENDPROC(compat_sys_statfs64_wrapper)
|
||||
|
||||
ENTRY(compat_sys_fstatfs64_wrapper)
|
||||
mov w3, #84
|
||||
cmp w1, #88
|
||||
csel w1, w3, w1, eq
|
||||
b compat_sys_fstatfs64
|
||||
ENDPROC(compat_sys_fstatfs64_wrapper)
|
||||
|
||||
/*
|
||||
* Note: off_4k (w5) is always in units of 4K. If we can't do the
|
||||
* requested offset because it is not page-aligned, we return -EINVAL.
|
||||
*/
|
||||
ENTRY(compat_sys_mmap2_wrapper)
|
||||
#if PAGE_SHIFT > 12
|
||||
tst w5, #~PAGE_MASK >> 12
|
||||
b.ne 1f
|
||||
lsr w5, w5, #PAGE_SHIFT - 12
|
||||
#endif
|
||||
b sys_mmap_pgoff
|
||||
1: mov x0, #-EINVAL
|
||||
ret
|
||||
ENDPROC(compat_sys_mmap2_wrapper)
|
||||
|
||||
/*
|
||||
* Wrappers for AArch32 syscalls that either take 64-bit parameters
|
||||
* in registers or that take 32-bit parameters which require sign
|
||||
* extension.
|
||||
*/
|
||||
ENTRY(compat_sys_pread64_wrapper)
|
||||
regs_to_64 x3, x4, x5
|
||||
b sys_pread64
|
||||
ENDPROC(compat_sys_pread64_wrapper)
|
||||
|
||||
ENTRY(compat_sys_pwrite64_wrapper)
|
||||
regs_to_64 x3, x4, x5
|
||||
b sys_pwrite64
|
||||
ENDPROC(compat_sys_pwrite64_wrapper)
|
||||
|
||||
ENTRY(compat_sys_truncate64_wrapper)
|
||||
regs_to_64 x1, x2, x3
|
||||
b sys_truncate
|
||||
ENDPROC(compat_sys_truncate64_wrapper)
|
||||
|
||||
ENTRY(compat_sys_ftruncate64_wrapper)
|
||||
regs_to_64 x1, x2, x3
|
||||
b sys_ftruncate
|
||||
ENDPROC(compat_sys_ftruncate64_wrapper)
|
||||
|
||||
ENTRY(compat_sys_readahead_wrapper)
|
||||
regs_to_64 x1, x2, x3
|
||||
mov w2, w4
|
||||
b sys_readahead
|
||||
ENDPROC(compat_sys_readahead_wrapper)
|
||||
|
||||
ENTRY(compat_sys_fadvise64_64_wrapper)
|
||||
mov w6, w1
|
||||
regs_to_64 x1, x2, x3
|
||||
regs_to_64 x2, x4, x5
|
||||
mov w3, w6
|
||||
b sys_fadvise64_64
|
||||
ENDPROC(compat_sys_fadvise64_64_wrapper)
|
||||
|
||||
ENTRY(compat_sys_sync_file_range2_wrapper)
|
||||
regs_to_64 x2, x2, x3
|
||||
regs_to_64 x3, x4, x5
|
||||
b sys_sync_file_range2
|
||||
ENDPROC(compat_sys_sync_file_range2_wrapper)
|
||||
|
||||
ENTRY(compat_sys_fallocate_wrapper)
|
||||
regs_to_64 x2, x2, x3
|
||||
regs_to_64 x3, x4, x5
|
||||
b sys_fallocate
|
||||
ENDPROC(compat_sys_fallocate_wrapper)
|
@ -159,25 +159,6 @@ static void sve_free(struct task_struct *task)
|
||||
__sve_free(task);
|
||||
}
|
||||
|
||||
static void change_cpacr(u64 val, u64 mask)
|
||||
{
|
||||
u64 cpacr = read_sysreg(CPACR_EL1);
|
||||
u64 new = (cpacr & ~mask) | val;
|
||||
|
||||
if (new != cpacr)
|
||||
write_sysreg(new, CPACR_EL1);
|
||||
}
|
||||
|
||||
static void sve_user_disable(void)
|
||||
{
|
||||
change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
|
||||
}
|
||||
|
||||
static void sve_user_enable(void)
|
||||
{
|
||||
change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
|
||||
}
|
||||
|
||||
/*
|
||||
* TIF_SVE controls whether a task can use SVE without trapping while
|
||||
* in userspace, and also the way a task's FPSIMD/SVE state is stored
|
||||
|
@ -149,20 +149,6 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn)
|
||||
return __aarch64_insn_write(addr, cpu_to_le32(insn));
|
||||
}
|
||||
|
||||
static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
|
||||
{
|
||||
if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
|
||||
return false;
|
||||
|
||||
return aarch64_insn_is_b(insn) ||
|
||||
aarch64_insn_is_bl(insn) ||
|
||||
aarch64_insn_is_svc(insn) ||
|
||||
aarch64_insn_is_hvc(insn) ||
|
||||
aarch64_insn_is_smc(insn) ||
|
||||
aarch64_insn_is_brk(insn) ||
|
||||
aarch64_insn_is_nop(insn);
|
||||
}
|
||||
|
||||
bool __kprobes aarch64_insn_uses_literal(u32 insn)
|
||||
{
|
||||
/* ldr/ldrsw (literal), prfm */
|
||||
@ -189,22 +175,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
|
||||
aarch64_insn_is_bcond(insn);
|
||||
}
|
||||
|
||||
/*
|
||||
* ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
|
||||
* Section B2.6.5 "Concurrent modification and execution of instructions":
|
||||
* Concurrent modification and execution of instructions can lead to the
|
||||
* resulting instruction performing any behavior that can be achieved by
|
||||
* executing any sequence of instructions that can be executed from the
|
||||
* same Exception level, except where the instruction before modification
|
||||
* and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
|
||||
* or SMC instruction.
|
||||
*/
|
||||
bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
|
||||
{
|
||||
return __aarch64_insn_hotpatch_safe(old_insn) &&
|
||||
__aarch64_insn_hotpatch_safe(new_insn);
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
|
||||
{
|
||||
u32 *tp = addr;
|
||||
@ -216,8 +186,8 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
|
||||
|
||||
ret = aarch64_insn_write(tp, insn);
|
||||
if (ret == 0)
|
||||
flush_icache_range((uintptr_t)tp,
|
||||
(uintptr_t)tp + AARCH64_INSN_SIZE);
|
||||
__flush_icache_range((uintptr_t)tp,
|
||||
(uintptr_t)tp + AARCH64_INSN_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -239,11 +209,6 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
|
||||
for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
|
||||
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
|
||||
pp->new_insns[i]);
|
||||
/*
|
||||
* aarch64_insn_patch_text_nosync() calls flush_icache_range(),
|
||||
* which ends with "dsb; isb" pair guaranteeing global
|
||||
* visibility.
|
||||
*/
|
||||
/* Notify other processors with an additional increment. */
|
||||
atomic_inc(&pp->cpu_count);
|
||||
} else {
|
||||
@ -255,8 +220,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static
|
||||
int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
|
||||
int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
|
||||
{
|
||||
struct aarch64_insn_patch patch = {
|
||||
.text_addrs = addrs,
|
||||
@ -272,34 +236,6 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
|
||||
cpu_online_mask);
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
|
||||
{
|
||||
int ret;
|
||||
u32 insn;
|
||||
|
||||
/* Unsafe to patch multiple instructions without synchronizaiton */
|
||||
if (cnt == 1) {
|
||||
ret = aarch64_insn_read(addrs[0], &insn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
|
||||
/*
|
||||
* ARMv8 architecture doesn't guarantee all CPUs see
|
||||
* the new instruction after returning from function
|
||||
* aarch64_insn_patch_text_nosync(). So send IPIs to
|
||||
* all other CPUs to achieve instruction
|
||||
* synchronization.
|
||||
*/
|
||||
ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
|
||||
kick_all_cpus_sync();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return aarch64_insn_patch_text_sync(addrs, insns, cnt);
|
||||
}
|
||||
|
||||
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
|
||||
u32 *maskp, int *shiftp)
|
||||
{
|
||||
|
@ -184,8 +184,15 @@ void machine_kexec(struct kimage *kimage)
|
||||
|
||||
/* Flush the reboot_code_buffer in preparation for its execution. */
|
||||
__flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
|
||||
flush_icache_range((uintptr_t)reboot_code_buffer,
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
/*
|
||||
* Although we've killed off the secondary CPUs, we don't update
|
||||
* the online mask if we're handling a crash kernel and consequently
|
||||
* need to avoid flush_icache_range(), which will attempt to IPI
|
||||
* the offline CPUs. Therefore, we must use the __* variant here.
|
||||
*/
|
||||
__flush_icache_range((uintptr_t)reboot_code_buffer,
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
/* Flush the kimage list and its buffers. */
|
||||
kexec_list_flush(kimage);
|
||||
@ -207,8 +214,7 @@ void machine_kexec(struct kimage *kimage)
|
||||
* relocation is complete.
|
||||
*/
|
||||
|
||||
cpu_soft_restart(kimage != kexec_crash_image,
|
||||
reboot_code_buffer_phys, kimage->head, kimage->start, 0);
|
||||
cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0);
|
||||
|
||||
BUG(); /* Should never get here. */
|
||||
}
|
||||
@ -361,4 +367,5 @@ void arch_crash_save_vmcoreinfo(void)
|
||||
kimage_voffset);
|
||||
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
|
||||
PHYS_OFFSET);
|
||||
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <asm/virt.h>
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/perf/arm_pmu.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -446,9 +447,16 @@ static struct attribute_group armv8_pmuv3_events_attr_group = {
|
||||
};
|
||||
|
||||
PMU_FORMAT_ATTR(event, "config:0-15");
|
||||
PMU_FORMAT_ATTR(long, "config1:0");
|
||||
|
||||
static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
|
||||
{
|
||||
return event->attr.config1 & 0x1;
|
||||
}
|
||||
|
||||
static struct attribute *armv8_pmuv3_format_attrs[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_long.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -465,6 +473,21 @@ static struct attribute_group armv8_pmuv3_format_attr_group = {
|
||||
#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
|
||||
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
|
||||
|
||||
/*
|
||||
* We must chain two programmable counters for 64 bit events,
|
||||
* except when we have allocated the 64bit cycle counter (for CPU
|
||||
* cycles event). This must be called only when the event has
|
||||
* a counter allocated.
|
||||
*/
|
||||
static inline bool armv8pmu_event_is_chained(struct perf_event *event)
|
||||
{
|
||||
int idx = event->hw.idx;
|
||||
|
||||
return !WARN_ON(idx < 0) &&
|
||||
armv8pmu_event_is_64bit(event) &&
|
||||
(idx != ARMV8_IDX_CYCLE_COUNTER);
|
||||
}
|
||||
|
||||
/*
|
||||
* ARMv8 low level PMU access
|
||||
*/
|
||||
@ -503,34 +526,68 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
|
||||
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
|
||||
}
|
||||
|
||||
static inline int armv8pmu_select_counter(int idx)
|
||||
static inline void armv8pmu_select_counter(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
write_sysreg(counter, pmselr_el0);
|
||||
isb();
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline u32 armv8pmu_read_counter(struct perf_event *event)
|
||||
static inline u32 armv8pmu_read_evcntr(int idx)
|
||||
{
|
||||
armv8pmu_select_counter(idx);
|
||||
return read_sysreg(pmxevcntr_el0);
|
||||
}
|
||||
|
||||
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
|
||||
{
|
||||
int idx = event->hw.idx;
|
||||
u64 val = 0;
|
||||
|
||||
val = armv8pmu_read_evcntr(idx);
|
||||
if (armv8pmu_event_is_chained(event))
|
||||
val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline u64 armv8pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
u32 value = 0;
|
||||
u64 value = 0;
|
||||
|
||||
if (!armv8pmu_counter_valid(cpu_pmu, idx))
|
||||
pr_err("CPU%u reading wrong counter %d\n",
|
||||
smp_processor_id(), idx);
|
||||
else if (idx == ARMV8_IDX_CYCLE_COUNTER)
|
||||
value = read_sysreg(pmccntr_el0);
|
||||
else if (armv8pmu_select_counter(idx) == idx)
|
||||
value = read_sysreg(pmxevcntr_el0);
|
||||
else
|
||||
value = armv8pmu_read_hw_counter(event);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv8pmu_write_evcntr(int idx, u32 value)
|
||||
{
|
||||
armv8pmu_select_counter(idx);
|
||||
write_sysreg(value, pmxevcntr_el0);
|
||||
}
|
||||
|
||||
static inline void armv8pmu_write_hw_counter(struct perf_event *event,
|
||||
u64 value)
|
||||
{
|
||||
int idx = event->hw.idx;
|
||||
|
||||
if (armv8pmu_event_is_chained(event)) {
|
||||
armv8pmu_write_evcntr(idx, upper_32_bits(value));
|
||||
armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
|
||||
} else {
|
||||
armv8pmu_write_evcntr(idx, value);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
@ -541,22 +598,43 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
|
||||
smp_processor_id(), idx);
|
||||
else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
|
||||
/*
|
||||
* Set the upper 32bits as this is a 64bit counter but we only
|
||||
* count using the lower 32bits and we want an interrupt when
|
||||
* it overflows.
|
||||
* The cycles counter is really a 64-bit counter.
|
||||
* When treating it as a 32-bit counter, we only count
|
||||
* the lower 32 bits, and set the upper 32-bits so that
|
||||
* we get an interrupt upon 32-bit overflow.
|
||||
*/
|
||||
u64 value64 = 0xffffffff00000000ULL | value;
|
||||
|
||||
write_sysreg(value64, pmccntr_el0);
|
||||
} else if (armv8pmu_select_counter(idx) == idx)
|
||||
write_sysreg(value, pmxevcntr_el0);
|
||||
if (!armv8pmu_event_is_64bit(event))
|
||||
value |= 0xffffffff00000000ULL;
|
||||
write_sysreg(value, pmccntr_el0);
|
||||
} else
|
||||
armv8pmu_write_hw_counter(event, value);
|
||||
}
|
||||
|
||||
static inline void armv8pmu_write_evtype(int idx, u32 val)
|
||||
{
|
||||
if (armv8pmu_select_counter(idx) == idx) {
|
||||
val &= ARMV8_PMU_EVTYPE_MASK;
|
||||
write_sysreg(val, pmxevtyper_el0);
|
||||
armv8pmu_select_counter(idx);
|
||||
val &= ARMV8_PMU_EVTYPE_MASK;
|
||||
write_sysreg(val, pmxevtyper_el0);
|
||||
}
|
||||
|
||||
static inline void armv8pmu_write_event_type(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
|
||||
/*
|
||||
* For chained events, the low counter is programmed to count
|
||||
* the event of interest and the high counter is programmed
|
||||
* with CHAIN event code with filters set to count at all ELs.
|
||||
*/
|
||||
if (armv8pmu_event_is_chained(event)) {
|
||||
u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
|
||||
ARMV8_PMU_INCLUDE_EL2;
|
||||
|
||||
armv8pmu_write_evtype(idx - 1, hwc->config_base);
|
||||
armv8pmu_write_evtype(idx, chain_evt);
|
||||
} else {
|
||||
armv8pmu_write_evtype(idx, hwc->config_base);
|
||||
}
|
||||
}
|
||||
|
||||
@ -567,6 +645,16 @@ static inline int armv8pmu_enable_counter(int idx)
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
|
||||
{
|
||||
int idx = event->hw.idx;
|
||||
|
||||
armv8pmu_enable_counter(idx);
|
||||
if (armv8pmu_event_is_chained(event))
|
||||
armv8pmu_enable_counter(idx - 1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline int armv8pmu_disable_counter(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
@ -574,6 +662,16 @@ static inline int armv8pmu_disable_counter(int idx)
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
|
||||
if (armv8pmu_event_is_chained(event))
|
||||
armv8pmu_disable_counter(idx - 1);
|
||||
armv8pmu_disable_counter(idx);
|
||||
}
|
||||
|
||||
static inline int armv8pmu_enable_intens(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
@ -581,6 +679,11 @@ static inline int armv8pmu_enable_intens(int idx)
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline int armv8pmu_enable_event_irq(struct perf_event *event)
|
||||
{
|
||||
return armv8pmu_enable_intens(event->hw.idx);
|
||||
}
|
||||
|
||||
static inline int armv8pmu_disable_intens(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
@ -593,6 +696,11 @@ static inline int armv8pmu_disable_intens(int idx)
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline int armv8pmu_disable_event_irq(struct perf_event *event)
|
||||
{
|
||||
return armv8pmu_disable_intens(event->hw.idx);
|
||||
}
|
||||
|
||||
static inline u32 armv8pmu_getreset_flags(void)
|
||||
{
|
||||
u32 value;
|
||||
@ -610,10 +718,8 @@ static inline u32 armv8pmu_getreset_flags(void)
|
||||
static void armv8pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
int idx = hwc->idx;
|
||||
|
||||
/*
|
||||
* Enable counter and interrupt, and set the counter to count
|
||||
@ -624,22 +730,22 @@ static void armv8pmu_enable_event(struct perf_event *event)
|
||||
/*
|
||||
* Disable counter
|
||||
*/
|
||||
armv8pmu_disable_counter(idx);
|
||||
armv8pmu_disable_event_counter(event);
|
||||
|
||||
/*
|
||||
* Set event (if destined for PMNx counters).
|
||||
*/
|
||||
armv8pmu_write_evtype(idx, hwc->config_base);
|
||||
armv8pmu_write_event_type(event);
|
||||
|
||||
/*
|
||||
* Enable interrupt for this counter
|
||||
*/
|
||||
armv8pmu_enable_intens(idx);
|
||||
armv8pmu_enable_event_irq(event);
|
||||
|
||||
/*
|
||||
* Enable counter
|
||||
*/
|
||||
armv8pmu_enable_counter(idx);
|
||||
armv8pmu_enable_event_counter(event);
|
||||
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
@ -647,10 +753,8 @@ static void armv8pmu_enable_event(struct perf_event *event)
|
||||
static void armv8pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
int idx = hwc->idx;
|
||||
|
||||
/*
|
||||
* Disable counter and interrupt
|
||||
@ -660,16 +764,38 @@ static void armv8pmu_disable_event(struct perf_event *event)
|
||||
/*
|
||||
* Disable counter
|
||||
*/
|
||||
armv8pmu_disable_counter(idx);
|
||||
armv8pmu_disable_event_counter(event);
|
||||
|
||||
/*
|
||||
* Disable interrupt for this counter
|
||||
*/
|
||||
armv8pmu_disable_intens(idx);
|
||||
armv8pmu_disable_event_irq(event);
|
||||
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
|
||||
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||
/* Enable all counters */
|
||||
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
|
||||
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||
/* Disable all counters */
|
||||
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
u32 pmovsr;
|
||||
@ -694,6 +820,11 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
*/
|
||||
regs = get_irq_regs();
|
||||
|
||||
/*
|
||||
* Stop the PMU while processing the counter overflows
|
||||
* to prevent skews in group events.
|
||||
*/
|
||||
armv8pmu_stop(cpu_pmu);
|
||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
struct hw_perf_event *hwc;
|
||||
@ -718,6 +849,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
cpu_pmu->disable(event);
|
||||
}
|
||||
armv8pmu_start(cpu_pmu);
|
||||
|
||||
/*
|
||||
* Handle the pending perf events.
|
||||
@ -731,32 +863,42 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
|
||||
static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
|
||||
struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
int idx;
|
||||
|
||||
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||
/* Enable all counters */
|
||||
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
|
||||
if (!test_and_set_bit(idx, cpuc->used_mask))
|
||||
return idx;
|
||||
}
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
|
||||
static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
|
||||
struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
|
||||
int idx;
|
||||
|
||||
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||
/* Disable all counters */
|
||||
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
|
||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
/*
|
||||
* Chaining requires two consecutive event counters, where
|
||||
* the lower idx must be even.
|
||||
*/
|
||||
for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
|
||||
if (!test_and_set_bit(idx, cpuc->used_mask)) {
|
||||
/* Check if the preceding even counter is available */
|
||||
if (!test_and_set_bit(idx - 1, cpuc->used_mask))
|
||||
return idx;
|
||||
/* Release the Odd counter */
|
||||
clear_bit(idx, cpuc->used_mask);
|
||||
}
|
||||
}
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
int idx;
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
|
||||
@ -770,13 +912,20 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
||||
/*
|
||||
* Otherwise use events counters
|
||||
*/
|
||||
for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
|
||||
if (!test_and_set_bit(idx, cpuc->used_mask))
|
||||
return idx;
|
||||
}
|
||||
if (armv8pmu_event_is_64bit(event))
|
||||
return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
|
||||
else
|
||||
return armv8pmu_get_single_idx(cpuc, cpu_pmu);
|
||||
}
|
||||
|
||||
/* The counters are all in use. */
|
||||
return -EAGAIN;
|
||||
static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
int idx = event->hw.idx;
|
||||
|
||||
clear_bit(idx, cpuc->used_mask);
|
||||
if (armv8pmu_event_is_chained(event))
|
||||
clear_bit(idx - 1, cpuc->used_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -851,6 +1000,9 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
|
||||
&armv8_pmuv3_perf_cache_map,
|
||||
ARMV8_PMU_EVTYPE_EVENT);
|
||||
|
||||
if (armv8pmu_event_is_64bit(event))
|
||||
event->hw.flags |= ARMPMU_EVT_64BIT;
|
||||
|
||||
/* Onl expose micro/arch events supported by this PMU */
|
||||
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
|
||||
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
|
||||
@ -957,10 +1109,10 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->read_counter = armv8pmu_read_counter,
|
||||
cpu_pmu->write_counter = armv8pmu_write_counter,
|
||||
cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
|
||||
cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
|
||||
cpu_pmu->start = armv8pmu_start,
|
||||
cpu_pmu->stop = armv8pmu_stop,
|
||||
cpu_pmu->reset = armv8pmu_reset,
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1,
|
||||
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
|
||||
|
||||
return 0;
|
||||
@ -1127,3 +1279,32 @@ static int __init armv8_pmu_driver_init(void)
|
||||
return arm_pmu_acpi_probe(armv8_pmuv3_init);
|
||||
}
|
||||
device_initcall(armv8_pmu_driver_init)
|
||||
|
||||
void arch_perf_update_userpage(struct perf_event *event,
|
||||
struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
u32 freq;
|
||||
u32 shift;
|
||||
|
||||
/*
|
||||
* Internal timekeeping for enabled/running/stopped times
|
||||
* is always computed with the sched_clock.
|
||||
*/
|
||||
freq = arch_timer_get_rate();
|
||||
userpg->cap_user_time = 1;
|
||||
|
||||
clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
|
||||
NSEC_PER_SEC, 0);
|
||||
/*
|
||||
* time_shift is not expected to be greater than 31 due to
|
||||
* the original published conversion algorithm shifting a
|
||||
* 32-bit value (now specifies a 64-bit value) - refer
|
||||
* perf_event_mmap_page documentation in perf_event.h.
|
||||
*/
|
||||
if (shift == 32) {
|
||||
shift = 31;
|
||||
userpg->time_mult >>= 1;
|
||||
}
|
||||
userpg->time_shift = (u16)shift;
|
||||
userpg->time_offset = -now;
|
||||
}
|
||||
|
@ -177,16 +177,16 @@ static void print_pstate(struct pt_regs *regs)
|
||||
if (compat_user_mode(regs)) {
|
||||
printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
|
||||
pstate,
|
||||
pstate & COMPAT_PSR_N_BIT ? 'N' : 'n',
|
||||
pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z',
|
||||
pstate & COMPAT_PSR_C_BIT ? 'C' : 'c',
|
||||
pstate & COMPAT_PSR_V_BIT ? 'V' : 'v',
|
||||
pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q',
|
||||
pstate & COMPAT_PSR_T_BIT ? "T32" : "A32",
|
||||
pstate & COMPAT_PSR_E_BIT ? "BE" : "LE",
|
||||
pstate & COMPAT_PSR_A_BIT ? 'A' : 'a',
|
||||
pstate & COMPAT_PSR_I_BIT ? 'I' : 'i',
|
||||
pstate & COMPAT_PSR_F_BIT ? 'F' : 'f');
|
||||
pstate & PSR_AA32_N_BIT ? 'N' : 'n',
|
||||
pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
|
||||
pstate & PSR_AA32_C_BIT ? 'C' : 'c',
|
||||
pstate & PSR_AA32_V_BIT ? 'V' : 'v',
|
||||
pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
|
||||
pstate & PSR_AA32_T_BIT ? "T32" : "A32",
|
||||
pstate & PSR_AA32_E_BIT ? "BE" : "LE",
|
||||
pstate & PSR_AA32_A_BIT ? 'A' : 'a',
|
||||
pstate & PSR_AA32_I_BIT ? 'I' : 'i',
|
||||
pstate & PSR_AA32_F_BIT ? 'F' : 'f');
|
||||
} else {
|
||||
printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
|
||||
pstate,
|
||||
@ -493,3 +493,25 @@ void arch_setup_new_exec(void)
|
||||
{
|
||||
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
void __used stackleak_check_alloca(unsigned long size)
|
||||
{
|
||||
unsigned long stack_left;
|
||||
unsigned long current_sp = current_stack_pointer;
|
||||
struct stack_info info;
|
||||
|
||||
BUG_ON(!on_accessible_stack(current, current_sp, &info));
|
||||
|
||||
stack_left = current_sp - info.low;
|
||||
|
||||
/*
|
||||
* There's a good chance we're almost out of stack space if this
|
||||
* is true. Using panic() over BUG() is more likely to give
|
||||
* reliable debugging output.
|
||||
*/
|
||||
if (size >= stack_left)
|
||||
panic("alloca() over the kernel stack boundary\n");
|
||||
}
|
||||
EXPORT_SYMBOL(stackleak_check_alloca);
|
||||
#endif
|
||||
|
@ -132,7 +132,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
return ((addr & ~(THREAD_SIZE - 1)) ==
|
||||
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
|
||||
on_irq_stack(addr);
|
||||
on_irq_stack(addr, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -277,19 +277,22 @@ static int ptrace_hbp_set_event(unsigned int note_type,
|
||||
|
||||
switch (note_type) {
|
||||
case NT_ARM_HW_BREAK:
|
||||
if (idx < ARM_MAX_BRP) {
|
||||
tsk->thread.debug.hbp_break[idx] = bp;
|
||||
err = 0;
|
||||
}
|
||||
if (idx >= ARM_MAX_BRP)
|
||||
goto out;
|
||||
idx = array_index_nospec(idx, ARM_MAX_BRP);
|
||||
tsk->thread.debug.hbp_break[idx] = bp;
|
||||
err = 0;
|
||||
break;
|
||||
case NT_ARM_HW_WATCH:
|
||||
if (idx < ARM_MAX_WRP) {
|
||||
tsk->thread.debug.hbp_watch[idx] = bp;
|
||||
err = 0;
|
||||
}
|
||||
if (idx >= ARM_MAX_WRP)
|
||||
goto out;
|
||||
idx = array_index_nospec(idx, ARM_MAX_WRP);
|
||||
tsk->thread.debug.hbp_watch[idx] = bp;
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1076,6 +1079,7 @@ static int compat_gpr_get(struct task_struct *target,
|
||||
break;
|
||||
case 16:
|
||||
reg = task_pt_regs(target)->pstate;
|
||||
reg = pstate_to_compat_psr(reg);
|
||||
break;
|
||||
case 17:
|
||||
reg = task_pt_regs(target)->orig_x0;
|
||||
@ -1143,6 +1147,7 @@ static int compat_gpr_set(struct task_struct *target,
|
||||
newregs.pc = reg;
|
||||
break;
|
||||
case 16:
|
||||
reg = compat_psr_to_pstate(reg);
|
||||
newregs.pstate = reg;
|
||||
break;
|
||||
case 17:
|
||||
@ -1629,7 +1634,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
|
||||
regs->regs[regno] = saved_reg;
|
||||
}
|
||||
|
||||
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
|
||||
int syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
|
||||
@ -1647,7 +1652,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
|
||||
return regs->syscallno;
|
||||
}
|
||||
|
||||
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
|
||||
void syscall_trace_exit(struct pt_regs *regs)
|
||||
{
|
||||
audit_syscall_exit(regs);
|
||||
|
||||
@ -1656,18 +1661,24 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
|
||||
|
||||
rseq_syscall(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bits which are always architecturally RES0 per ARM DDI 0487A.h
|
||||
* SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
|
||||
* We also take into account DIT (bit 24), which is not yet documented, and
|
||||
* treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
|
||||
* allocated an EL0 meaning in future.
|
||||
* Userspace cannot use these until they have an architectural meaning.
|
||||
* Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
|
||||
* We also reserve IL for the kernel; SS is handled dynamically.
|
||||
*/
|
||||
#define SPSR_EL1_AARCH64_RES0_BITS \
|
||||
(GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
|
||||
GENMASK_ULL(5, 5))
|
||||
(GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
|
||||
GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
|
||||
#define SPSR_EL1_AARCH32_RES0_BITS \
|
||||
(GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
|
||||
(GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
|
||||
|
||||
static int valid_compat_regs(struct user_pt_regs *regs)
|
||||
{
|
||||
@ -1675,15 +1686,15 @@ static int valid_compat_regs(struct user_pt_regs *regs)
|
||||
|
||||
if (!system_supports_mixed_endian_el0()) {
|
||||
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
||||
regs->pstate |= COMPAT_PSR_E_BIT;
|
||||
regs->pstate |= PSR_AA32_E_BIT;
|
||||
else
|
||||
regs->pstate &= ~COMPAT_PSR_E_BIT;
|
||||
regs->pstate &= ~PSR_AA32_E_BIT;
|
||||
}
|
||||
|
||||
if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
|
||||
(regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
|
||||
(regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
|
||||
(regs->pstate & COMPAT_PSR_F_BIT) == 0) {
|
||||
(regs->pstate & PSR_AA32_A_BIT) == 0 &&
|
||||
(regs->pstate & PSR_AA32_I_BIT) == 0 &&
|
||||
(regs->pstate & PSR_AA32_F_BIT) == 0) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1691,11 +1702,11 @@ static int valid_compat_regs(struct user_pt_regs *regs)
|
||||
* Force PSR to a valid 32-bit EL0t, preserving the same bits as
|
||||
* arch/arm.
|
||||
*/
|
||||
regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
|
||||
COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
|
||||
COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
|
||||
COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
|
||||
COMPAT_PSR_T_BIT;
|
||||
regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
|
||||
PSR_AA32_C_BIT | PSR_AA32_V_BIT |
|
||||
PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
|
||||
PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
|
||||
PSR_AA32_T_BIT;
|
||||
regs->pstate |= PSR_MODE32_BIT;
|
||||
|
||||
return 0;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/vmap_stack.h>
|
||||
|
||||
@ -88,23 +89,52 @@ static int init_sdei_stacks(void)
|
||||
return err;
|
||||
}
|
||||
|
||||
bool _on_sdei_stack(unsigned long sp)
|
||||
static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
unsigned long low, high;
|
||||
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
|
||||
unsigned long high = low + SDEI_STACK_SIZE;
|
||||
|
||||
if (sp < low || sp >= high)
|
||||
return false;
|
||||
|
||||
if (info) {
|
||||
info->low = low;
|
||||
info->high = high;
|
||||
info->type = STACK_TYPE_SDEI_NORMAL;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
|
||||
unsigned long high = low + SDEI_STACK_SIZE;
|
||||
|
||||
if (sp < low || sp >= high)
|
||||
return false;
|
||||
|
||||
if (info) {
|
||||
info->low = low;
|
||||
info->high = high;
|
||||
info->type = STACK_TYPE_SDEI_CRITICAL;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return false;
|
||||
|
||||
low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
|
||||
high = low + SDEI_STACK_SIZE;
|
||||
|
||||
if (low <= sp && sp < high)
|
||||
if (on_sdei_critical_stack(sp, info))
|
||||
return true;
|
||||
|
||||
low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
|
||||
high = low + SDEI_STACK_SIZE;
|
||||
if (on_sdei_normal_stack(sp, info))
|
||||
return true;
|
||||
|
||||
return (low <= sp && sp < high);
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned long sdei_arch_get_entry_point(int conduit)
|
||||
|
@ -241,6 +241,44 @@ static void __init request_standard_resources(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int __init reserve_memblock_reserved_regions(void)
|
||||
{
|
||||
phys_addr_t start, end, roundup_end = 0;
|
||||
struct resource *mem, *res;
|
||||
u64 i;
|
||||
|
||||
for_each_reserved_mem_region(i, &start, &end) {
|
||||
if (end <= roundup_end)
|
||||
continue; /* done already */
|
||||
|
||||
start = __pfn_to_phys(PFN_DOWN(start));
|
||||
end = __pfn_to_phys(PFN_UP(end)) - 1;
|
||||
roundup_end = end;
|
||||
|
||||
res = kzalloc(sizeof(*res), GFP_ATOMIC);
|
||||
if (WARN_ON(!res))
|
||||
return -ENOMEM;
|
||||
res->start = start;
|
||||
res->end = end;
|
||||
res->name = "reserved";
|
||||
res->flags = IORESOURCE_MEM;
|
||||
|
||||
mem = request_resource_conflict(&iomem_resource, res);
|
||||
/*
|
||||
* We expected memblock_reserve() regions to conflict with
|
||||
* memory created by request_standard_resources().
|
||||
*/
|
||||
if (WARN_ON_ONCE(!mem))
|
||||
continue;
|
||||
kfree(res);
|
||||
|
||||
reserve_region_with_split(mem, start, end, "reserved");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(reserve_memblock_reserved_regions);
|
||||
|
||||
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
|
||||
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
|
@ -539,8 +539,9 @@ static int restore_sigframe(struct pt_regs *regs,
|
||||
return err;
|
||||
}
|
||||
|
||||
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
||||
SYSCALL_DEFINE0(rt_sigreturn)
|
||||
{
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
struct rt_sigframe __user *frame;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
@ -802,6 +803,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
int usig = ksig->sig;
|
||||
int ret;
|
||||
|
||||
rseq_signal_deliver(ksig, regs);
|
||||
|
||||
/*
|
||||
* Set up the stack frame
|
||||
*/
|
||||
@ -910,7 +913,7 @@ static void do_signal(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
unsigned int thread_flags)
|
||||
unsigned long thread_flags)
|
||||
{
|
||||
/*
|
||||
* The assembly code enters us with IRQs off, but it hasn't
|
||||
@ -940,6 +943,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
if (thread_flags & _TIF_FOREIGN_FPSTATE)
|
||||
|
@ -243,6 +243,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
|
||||
int err;
|
||||
sigset_t set;
|
||||
struct compat_aux_sigframe __user *aux;
|
||||
unsigned long psr;
|
||||
|
||||
err = get_sigset_t(&set, &sf->uc.uc_sigmask);
|
||||
if (err == 0) {
|
||||
@ -266,7 +267,9 @@ static int compat_restore_sigframe(struct pt_regs *regs,
|
||||
__get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
|
||||
__get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
|
||||
__get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
|
||||
__get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
|
||||
__get_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
|
||||
|
||||
regs->pstate = compat_psr_to_pstate(psr);
|
||||
|
||||
/*
|
||||
* Avoid compat_sys_sigreturn() restarting.
|
||||
@ -282,8 +285,9 @@ static int compat_restore_sigframe(struct pt_regs *regs,
|
||||
return err;
|
||||
}
|
||||
|
||||
asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
|
||||
COMPAT_SYSCALL_DEFINE0(sigreturn)
|
||||
{
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
struct compat_sigframe __user *frame;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
@ -312,8 +316,9 @@ badframe:
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
|
||||
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
|
||||
{
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
struct compat_rt_sigframe __user *frame;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
@ -372,22 +377,22 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
|
||||
{
|
||||
compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
|
||||
compat_ulong_t retcode;
|
||||
compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT);
|
||||
compat_ulong_t spsr = regs->pstate & ~(PSR_f | PSR_AA32_E_BIT);
|
||||
int thumb;
|
||||
|
||||
/* Check if the handler is written for ARM or Thumb */
|
||||
thumb = handler & 1;
|
||||
|
||||
if (thumb)
|
||||
spsr |= COMPAT_PSR_T_BIT;
|
||||
spsr |= PSR_AA32_T_BIT;
|
||||
else
|
||||
spsr &= ~COMPAT_PSR_T_BIT;
|
||||
spsr &= ~PSR_AA32_T_BIT;
|
||||
|
||||
/* The IT state must be cleared for both ARM and Thumb-2 */
|
||||
spsr &= ~COMPAT_PSR_IT_MASK;
|
||||
spsr &= ~PSR_AA32_IT_MASK;
|
||||
|
||||
/* Restore the original endianness */
|
||||
spsr |= COMPAT_PSR_ENDSTATE;
|
||||
spsr |= PSR_AA32_ENDSTATE;
|
||||
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
retcode = ptr_to_compat(ka->sa.sa_restorer);
|
||||
@ -414,6 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
|
||||
struct pt_regs *regs, sigset_t *set)
|
||||
{
|
||||
struct compat_aux_sigframe __user *aux;
|
||||
unsigned long psr = pstate_to_compat_psr(regs->pstate);
|
||||
int err = 0;
|
||||
|
||||
__put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
|
||||
@ -432,7 +438,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
|
||||
__put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
|
||||
__put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
|
||||
__put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
|
||||
__put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
|
||||
__put_user_error(psr, &sf->uc.uc_mcontext.arm_cpsr, err);
|
||||
|
||||
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
|
||||
/* set the compat FSR WnR */
|
||||
|
@ -225,6 +225,7 @@ asmlinkage notrace void secondary_start_kernel(void)
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
store_cpu_topology(cpu);
|
||||
numa_add_cpu(cpu);
|
||||
|
||||
/*
|
||||
* OK, now it's safe to let the boot CPU continue. Wait for
|
||||
@ -278,6 +279,9 @@ int __cpu_disable(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
remove_cpu_topology(cpu);
|
||||
numa_remove_cpu(cpu);
|
||||
|
||||
/*
|
||||
* Take this CPU offline. Once we clear this, we can't return,
|
||||
* and we must not schedule until we're ready to give up the cpu.
|
||||
@ -518,7 +522,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
||||
}
|
||||
bootcpu_valid = true;
|
||||
cpu_madt_gicc[0] = *processor;
|
||||
early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -541,8 +544,6 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
||||
*/
|
||||
acpi_set_mailbox_entry(cpu_count, processor);
|
||||
|
||||
early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid));
|
||||
|
||||
cpu_count++;
|
||||
}
|
||||
|
||||
@ -562,8 +563,34 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init acpi_parse_and_init_cpus(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* do a walk of MADT to determine how many CPUs
|
||||
* we have including disabled CPUs, and get information
|
||||
* we need for SMP init.
|
||||
*/
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
||||
acpi_parse_gic_cpu_interface, 0);
|
||||
|
||||
/*
|
||||
* In ACPI, SMP and CPU NUMA information is provided in separate
|
||||
* static tables, namely the MADT and the SRAT.
|
||||
*
|
||||
* Thus, it is simpler to first create the cpu logical map through
|
||||
* an MADT walk and then map the logical cpus to their node ids
|
||||
* as separate steps.
|
||||
*/
|
||||
acpi_map_cpus_to_nodes();
|
||||
|
||||
for (i = 0; i < nr_cpu_ids; i++)
|
||||
early_map_cpu_to_node(i, acpi_numa_get_nid(i));
|
||||
}
|
||||
#else
|
||||
#define acpi_table_parse_madt(...) do { } while (0)
|
||||
#define acpi_parse_and_init_cpus(...) do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -636,13 +663,7 @@ void __init smp_init_cpus(void)
|
||||
if (acpi_disabled)
|
||||
of_parse_and_init_cpus();
|
||||
else
|
||||
/*
|
||||
* do a walk of MADT to determine how many CPUs
|
||||
* we have including disabled CPUs, and get information
|
||||
* we need for SMP init
|
||||
*/
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
||||
acpi_parse_gic_cpu_interface, 0);
|
||||
acpi_parse_and_init_cpus();
|
||||
|
||||
if (cpu_count > nr_cpu_ids)
|
||||
pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
|
||||
@ -679,6 +700,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
this_cpu = smp_processor_id();
|
||||
store_cpu_topology(this_cpu);
|
||||
numa_store_cpu_info(this_cpu);
|
||||
numa_add_cpu(this_cpu);
|
||||
|
||||
/*
|
||||
* If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
|
||||
|
@ -50,7 +50,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
if (!tsk)
|
||||
tsk = current;
|
||||
|
||||
if (!on_accessible_stack(tsk, fp))
|
||||
if (!on_accessible_stack(tsk, fp, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
|
||||
|
@ -25,11 +25,13 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, off_t off)
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/syscall.h>
|
||||
|
||||
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, prot, unsigned long, flags,
|
||||
unsigned long, fd, off_t, off)
|
||||
{
|
||||
if (offset_in_page(off) != 0)
|
||||
return -EINVAL;
|
||||
@ -42,24 +44,25 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
|
||||
if (personality(personality) == PER_LINUX32 &&
|
||||
!system_supports_32bit_el0())
|
||||
return -EINVAL;
|
||||
return sys_personality(personality);
|
||||
return ksys_personality(personality);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrappers to pass the pt_regs argument.
|
||||
*/
|
||||
asmlinkage long sys_rt_sigreturn_wrapper(void);
|
||||
#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
|
||||
#define sys_personality sys_arm64_personality
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, sym) [nr] = sym,
|
||||
asmlinkage long sys_ni_syscall(const struct pt_regs *);
|
||||
#define __arm64_sys_ni_syscall sys_ni_syscall
|
||||
|
||||
/*
|
||||
* The sys_call_table array must be 4K aligned to be accessible from
|
||||
* kernel/entry.S.
|
||||
*/
|
||||
void * const sys_call_table[__NR_syscalls] __aligned(4096) = {
|
||||
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
|
||||
|
||||
const syscall_fn_t sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
};
|
||||
|
@ -22,31 +22,128 @@
|
||||
*/
|
||||
#define __COMPAT_SYSCALL_NR
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
asmlinkage long compat_sys_sigreturn_wrapper(void);
|
||||
asmlinkage long compat_sys_rt_sigreturn_wrapper(void);
|
||||
asmlinkage long compat_sys_statfs64_wrapper(void);
|
||||
asmlinkage long compat_sys_fstatfs64_wrapper(void);
|
||||
asmlinkage long compat_sys_pread64_wrapper(void);
|
||||
asmlinkage long compat_sys_pwrite64_wrapper(void);
|
||||
asmlinkage long compat_sys_truncate64_wrapper(void);
|
||||
asmlinkage long compat_sys_ftruncate64_wrapper(void);
|
||||
asmlinkage long compat_sys_readahead_wrapper(void);
|
||||
asmlinkage long compat_sys_fadvise64_64_wrapper(void);
|
||||
asmlinkage long compat_sys_sync_file_range2_wrapper(void);
|
||||
asmlinkage long compat_sys_fallocate_wrapper(void);
|
||||
asmlinkage long compat_sys_mmap2_wrapper(void);
|
||||
#include <asm/syscall.h>
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, sym) [nr] = sym,
|
||||
asmlinkage long compat_sys_sigreturn(void);
|
||||
asmlinkage long compat_sys_rt_sigreturn(void);
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(aarch32_statfs64, const char __user *, pathname,
|
||||
compat_size_t, sz, struct compat_statfs64 __user *, buf)
|
||||
{
|
||||
/*
|
||||
* 32-bit ARM applies an OABI compatibility fixup to statfs64 and
|
||||
* fstatfs64 regardless of whether OABI is in use, and therefore
|
||||
* arbitrary binaries may rely upon it, so we must do the same.
|
||||
* For more details, see commit:
|
||||
*
|
||||
* 713c481519f19df9 ("[ARM] 3108/2: old ABI compat: statfs64 and
|
||||
* fstatfs64")
|
||||
*/
|
||||
if (sz == 88)
|
||||
sz = 84;
|
||||
|
||||
return kcompat_sys_statfs64(pathname, sz, buf);
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(aarch32_fstatfs64, unsigned int, fd, compat_size_t, sz,
|
||||
struct compat_statfs64 __user *, buf)
|
||||
{
|
||||
/* see aarch32_statfs64 */
|
||||
if (sz == 88)
|
||||
sz = 84;
|
||||
|
||||
return kcompat_sys_fstatfs64(fd, sz, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* The sys_call_table array must be 4K aligned to be accessible from
|
||||
* kernel/entry.S.
|
||||
* Note: off_4k is always in units of 4K. If we can't do the
|
||||
* requested offset because it is not page-aligned, we return -EINVAL.
|
||||
*/
|
||||
void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = {
|
||||
[0 ... __NR_compat_syscalls - 1] = sys_ni_syscall,
|
||||
COMPAT_SYSCALL_DEFINE6(aarch32_mmap2, unsigned long, addr, unsigned long, len,
|
||||
unsigned long, prot, unsigned long, flags,
|
||||
unsigned long, fd, unsigned long, off_4k)
|
||||
{
|
||||
if (off_4k & (~PAGE_MASK >> 12))
|
||||
return -EINVAL;
|
||||
|
||||
off_4k >>= (PAGE_SHIFT - 12);
|
||||
|
||||
return ksys_mmap_pgoff(addr, len, prot, flags, fd, off_4k);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define arg_u32p(name) u32, name##_hi, u32, name##_lo
|
||||
#else
|
||||
#define arg_u32p(name) u32, name##_lo, u32, name##_hi
|
||||
#endif
|
||||
|
||||
#define arg_u64(name) (((u64)name##_hi << 32) | name##_lo)
|
||||
|
||||
COMPAT_SYSCALL_DEFINE6(aarch32_pread64, unsigned int, fd, char __user *, buf,
|
||||
size_t, count, u32, __pad, arg_u32p(pos))
|
||||
{
|
||||
return ksys_pread64(fd, buf, count, arg_u64(pos));
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE6(aarch32_pwrite64, unsigned int, fd,
|
||||
const char __user *, buf, size_t, count, u32, __pad,
|
||||
arg_u32p(pos))
|
||||
{
|
||||
return ksys_pwrite64(fd, buf, count, arg_u64(pos));
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname,
|
||||
u32, __pad, arg_u32p(length))
|
||||
{
|
||||
return ksys_truncate(pathname, arg_u64(length));
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad,
|
||||
arg_u32p(length))
|
||||
{
|
||||
return ksys_ftruncate(fd, arg_u64(length));
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad,
|
||||
arg_u32p(offset), size_t, count)
|
||||
{
|
||||
return ksys_readahead(fd, arg_u64(offset), count);
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE6(aarch32_fadvise64_64, int, fd, int, advice,
|
||||
arg_u32p(offset), arg_u32p(len))
|
||||
{
|
||||
return ksys_fadvise64_64(fd, arg_u64(offset), arg_u64(len), advice);
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE6(aarch32_sync_file_range2, int, fd, unsigned int, flags,
|
||||
arg_u32p(offset), arg_u32p(nbytes))
|
||||
{
|
||||
return ksys_sync_file_range(fd, arg_u64(offset), arg_u64(nbytes),
|
||||
flags);
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
|
||||
arg_u32p(offset), arg_u32p(len))
|
||||
{
|
||||
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
|
||||
}
|
||||
|
||||
asmlinkage long sys_ni_syscall(const struct pt_regs *);
|
||||
#define __arm64_sys_ni_syscall sys_ni_syscall
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
|
||||
#include <asm/unistd32.h>
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
|
||||
|
||||
const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
|
||||
[0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
|
||||
#include <asm/unistd32.h>
|
||||
};
|
||||
|
139
arch/arm64/kernel/syscall.c
Normal file
139
arch/arm64/kernel/syscall.c
Normal file
@ -0,0 +1,139 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/syscalls.h>
|
||||
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
long compat_arm_syscall(struct pt_regs *regs);
|
||||
|
||||
long sys_ni_syscall(void);
|
||||
|
||||
asmlinkage long do_ni_syscall(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
long ret;
|
||||
if (is_compat_task()) {
|
||||
ret = compat_arm_syscall(regs);
|
||||
if (ret != -ENOSYS)
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
return sys_ni_syscall();
|
||||
}
|
||||
|
||||
static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
|
||||
{
|
||||
return syscall_fn(regs);
|
||||
}
|
||||
|
||||
static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
||||
unsigned int sc_nr,
|
||||
const syscall_fn_t syscall_table[])
|
||||
{
|
||||
long ret;
|
||||
|
||||
if (scno < sc_nr) {
|
||||
syscall_fn_t syscall_fn;
|
||||
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
|
||||
ret = __invoke_syscall(regs, syscall_fn);
|
||||
} else {
|
||||
ret = do_ni_syscall(regs);
|
||||
}
|
||||
|
||||
regs->regs[0] = ret;
|
||||
}
|
||||
|
||||
static inline bool has_syscall_work(unsigned long flags)
|
||||
{
|
||||
return unlikely(flags & _TIF_SYSCALL_WORK);
|
||||
}
|
||||
|
||||
int syscall_trace_enter(struct pt_regs *regs);
|
||||
void syscall_trace_exit(struct pt_regs *regs);
|
||||
|
||||
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
||||
const syscall_fn_t syscall_table[])
|
||||
{
|
||||
unsigned long flags = current_thread_info()->flags;
|
||||
|
||||
regs->orig_x0 = regs->regs[0];
|
||||
regs->syscallno = scno;
|
||||
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
user_exit();
|
||||
|
||||
if (has_syscall_work(flags)) {
|
||||
/* set default errno for user-issued syscall(-1) */
|
||||
if (scno == NO_SYSCALL)
|
||||
regs->regs[0] = -ENOSYS;
|
||||
scno = syscall_trace_enter(regs);
|
||||
if (scno == NO_SYSCALL)
|
||||
goto trace_exit;
|
||||
}
|
||||
|
||||
invoke_syscall(regs, scno, sc_nr, syscall_table);
|
||||
|
||||
/*
|
||||
* The tracing status may have changed under our feet, so we have to
|
||||
* check again. However, if we were tracing entry, then we always trace
|
||||
* exit regardless, as the old entry assembly did.
|
||||
*/
|
||||
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
|
||||
local_daif_mask();
|
||||
flags = current_thread_info()->flags;
|
||||
if (!has_syscall_work(flags)) {
|
||||
/*
|
||||
* We're off to userspace, where interrupts are
|
||||
* always enabled after we restore the flags from
|
||||
* the SPSR.
|
||||
*/
|
||||
trace_hardirqs_on();
|
||||
return;
|
||||
}
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
}
|
||||
|
||||
trace_exit:
|
||||
syscall_trace_exit(regs);
|
||||
}
|
||||
|
||||
static inline void sve_user_discard(void)
|
||||
{
|
||||
if (!system_supports_sve())
|
||||
return;
|
||||
|
||||
clear_thread_flag(TIF_SVE);
|
||||
|
||||
/*
|
||||
* task_fpsimd_load() won't be called to update CPACR_EL1 in
|
||||
* ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
|
||||
* happens if a context switch or kernel_neon_begin() or context
|
||||
* modification (sigreturn, ptrace) intervenes.
|
||||
* So, ensure that CPACR_EL1 is already correct for the fast-path case.
|
||||
*/
|
||||
sve_user_disable();
|
||||
}
|
||||
|
||||
asmlinkage void el0_svc_handler(struct pt_regs *regs)
|
||||
{
|
||||
sve_user_discard();
|
||||
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
asmlinkage void el0_svc_compat_handler(struct pt_regs *regs)
|
||||
{
|
||||
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
|
||||
compat_sys_call_table);
|
||||
}
|
||||
#endif
|
@ -215,11 +215,16 @@ EXPORT_SYMBOL_GPL(cpu_topology);
|
||||
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
{
|
||||
const cpumask_t *core_mask = &cpu_topology[cpu].core_sibling;
|
||||
const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
|
||||
|
||||
/* Find the smaller of NUMA, core or LLC siblings */
|
||||
if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
|
||||
/* not numa in package, lets use the package siblings */
|
||||
core_mask = &cpu_topology[cpu].core_sibling;
|
||||
}
|
||||
if (cpu_topology[cpu].llc_id != -1) {
|
||||
if (cpumask_subset(&cpu_topology[cpu].llc_siblings, core_mask))
|
||||
core_mask = &cpu_topology[cpu].llc_siblings;
|
||||
if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
|
||||
core_mask = &cpu_topology[cpu].llc_sibling;
|
||||
}
|
||||
|
||||
return core_mask;
|
||||
@ -231,27 +236,25 @@ static void update_siblings_masks(unsigned int cpuid)
|
||||
int cpu;
|
||||
|
||||
/* update core and thread sibling masks */
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
cpu_topo = &cpu_topology[cpu];
|
||||
|
||||
if (cpuid_topo->llc_id == cpu_topo->llc_id) {
|
||||
cpumask_set_cpu(cpu, &cpuid_topo->llc_siblings);
|
||||
cpumask_set_cpu(cpuid, &cpu_topo->llc_siblings);
|
||||
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
|
||||
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
|
||||
}
|
||||
|
||||
if (cpuid_topo->package_id != cpu_topo->package_id)
|
||||
continue;
|
||||
|
||||
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
|
||||
if (cpu != cpuid)
|
||||
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
|
||||
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
|
||||
|
||||
if (cpuid_topo->core_id != cpu_topo->core_id)
|
||||
continue;
|
||||
|
||||
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
|
||||
if (cpu != cpuid)
|
||||
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
|
||||
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,6 +296,19 @@ topology_populated:
|
||||
update_siblings_masks(cpuid);
|
||||
}
|
||||
|
||||
static void clear_cpu_topology(int cpu)
|
||||
{
|
||||
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
|
||||
|
||||
cpumask_clear(&cpu_topo->llc_sibling);
|
||||
cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
|
||||
|
||||
cpumask_clear(&cpu_topo->core_sibling);
|
||||
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
|
||||
cpumask_clear(&cpu_topo->thread_sibling);
|
||||
cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
|
||||
}
|
||||
|
||||
static void __init reset_cpu_topology(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
@ -303,18 +319,26 @@ static void __init reset_cpu_topology(void)
|
||||
cpu_topo->thread_id = -1;
|
||||
cpu_topo->core_id = 0;
|
||||
cpu_topo->package_id = -1;
|
||||
|
||||
cpu_topo->llc_id = -1;
|
||||
cpumask_clear(&cpu_topo->llc_siblings);
|
||||
cpumask_set_cpu(cpu, &cpu_topo->llc_siblings);
|
||||
|
||||
cpumask_clear(&cpu_topo->core_sibling);
|
||||
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
|
||||
cpumask_clear(&cpu_topo->thread_sibling);
|
||||
cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
|
||||
clear_cpu_topology(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void remove_cpu_topology(unsigned int cpu)
|
||||
{
|
||||
int sibling;
|
||||
|
||||
for_each_cpu(sibling, topology_core_cpumask(cpu))
|
||||
cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
|
||||
for_each_cpu(sibling, topology_sibling_cpumask(cpu))
|
||||
cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
|
||||
for_each_cpu(sibling, topology_llc_cpumask(cpu))
|
||||
cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
|
||||
|
||||
clear_cpu_topology(cpu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
* Propagate the topology information of the processor_topology_node tree to the
|
||||
|
@ -411,7 +411,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
||||
|
||||
void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
config_sctlr_el1(SCTLR_EL1_UCI, 0);
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
|
||||
}
|
||||
|
||||
#define __user_cache_maint(insn, address, res) \
|
||||
@ -547,22 +547,6 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
|
||||
do_undefinstr(regs);
|
||||
}
|
||||
|
||||
long compat_arm_syscall(struct pt_regs *regs);
|
||||
|
||||
asmlinkage long do_ni_syscall(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
long ret;
|
||||
if (is_compat_task()) {
|
||||
ret = compat_arm_syscall(regs);
|
||||
if (ret != -ENOSYS)
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
return sys_ni_syscall();
|
||||
}
|
||||
|
||||
static const char *esr_class_str[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
|
||||
[ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
|
||||
|
@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
}
|
||||
|
||||
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
|
||||
u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
|
||||
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
|
||||
switch (mode) {
|
||||
case COMPAT_PSR_MODE_USR:
|
||||
case COMPAT_PSR_MODE_FIQ:
|
||||
case COMPAT_PSR_MODE_IRQ:
|
||||
case COMPAT_PSR_MODE_SVC:
|
||||
case COMPAT_PSR_MODE_ABT:
|
||||
case COMPAT_PSR_MODE_UND:
|
||||
case PSR_AA32_MODE_USR:
|
||||
case PSR_AA32_MODE_FIQ:
|
||||
case PSR_AA32_MODE_IRQ:
|
||||
case PSR_AA32_MODE_SVC:
|
||||
case PSR_AA32_MODE_ABT:
|
||||
case PSR_AA32_MODE_UND:
|
||||
case PSR_MODE_EL0t:
|
||||
case PSR_MODE_EL1t:
|
||||
case PSR_MODE_EL1h:
|
||||
|
@ -3,7 +3,8 @@
|
||||
# Makefile for Kernel-based Virtual Machine module, HYP part
|
||||
#
|
||||
|
||||
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
|
||||
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
|
||||
KVM=../../../../virt/kvm
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
|
||||
return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT);
|
||||
|
||||
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
|
||||
}
|
||||
|
@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
|
||||
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
{
|
||||
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
|
||||
|
||||
switch (mode) {
|
||||
case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
|
||||
case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
|
||||
mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
|
||||
break;
|
||||
|
||||
case COMPAT_PSR_MODE_ABT:
|
||||
case PSR_AA32_MODE_ABT:
|
||||
mode = 4;
|
||||
break;
|
||||
|
||||
case COMPAT_PSR_MODE_UND:
|
||||
case PSR_AA32_MODE_UND:
|
||||
mode = 5;
|
||||
break;
|
||||
|
||||
case COMPAT_PSR_MODE_SYS:
|
||||
case PSR_AA32_MODE_SYS:
|
||||
mode = 0; /* SYS maps to USR */
|
||||
break;
|
||||
|
||||
@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
*/
|
||||
static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
|
||||
switch (mode) {
|
||||
case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC;
|
||||
case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT;
|
||||
case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND;
|
||||
case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ;
|
||||
case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ;
|
||||
case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
|
||||
case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
|
||||
case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
|
||||
case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
|
||||
case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
|
||||
default: BUG();
|
||||
}
|
||||
}
|
||||
|
@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = {
|
||||
};
|
||||
|
||||
static const struct kvm_regs default_regs_reset32 = {
|
||||
.regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
|
||||
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
|
||||
.regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
|
||||
PSR_AA32_I_BIT | PSR_AA32_F_BIT),
|
||||
};
|
||||
|
||||
static bool cpu_has_32bit_el1(void)
|
||||
|
@ -35,7 +35,7 @@
|
||||
* - start - virtual start address of region
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
ENTRY(flush_icache_range)
|
||||
ENTRY(__flush_icache_range)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
@ -77,7 +77,7 @@ alternative_else_nop_endif
|
||||
9:
|
||||
mov x0, #-EFAULT
|
||||
b 1b
|
||||
ENDPROC(flush_icache_range)
|
||||
ENDPROC(__flush_icache_range)
|
||||
ENDPROC(__flush_cache_user_range)
|
||||
|
||||
/*
|
||||
|
@ -874,7 +874,7 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
|
||||
*/
|
||||
WARN_ON_ONCE(in_interrupt());
|
||||
|
||||
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
|
||||
asm(SET_PSTATE_PAN(1));
|
||||
}
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
|
@ -66,6 +66,7 @@ void __sync_icache_dcache(pte_t pte)
|
||||
sync_icache_aliases(page_address(page),
|
||||
PAGE_SIZE << compound_order(page));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
|
||||
|
||||
/*
|
||||
* This function is called when a page has been modified by the kernel. Mark
|
||||
@ -82,7 +83,7 @@ EXPORT_SYMBOL(flush_dcache_page);
|
||||
/*
|
||||
* Additional functions defined in assembly.
|
||||
*/
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
EXPORT_SYMBOL(__flush_icache_range);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_PMEM_API
|
||||
void arch_wb_cache_pmem(void *addr, size_t size)
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <asm/memblock.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/ptdump.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#define NO_BLOCK_MAPPINGS BIT(0)
|
||||
#define NO_CONT_MAPPINGS BIT(1)
|
||||
@ -977,12 +978,51 @@ int pmd_clear_huge(pmd_t *pmdp)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
||||
int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
|
||||
{
|
||||
return pud_none(*pud);
|
||||
pte_t *table;
|
||||
pmd_t pmd;
|
||||
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
|
||||
/* No-op for empty entry and WARN_ON for valid entry */
|
||||
if (!pmd_present(pmd) || !pmd_table(pmd)) {
|
||||
VM_WARN_ON(!pmd_table(pmd));
|
||||
return 1;
|
||||
}
|
||||
|
||||
table = pte_offset_kernel(pmdp, addr);
|
||||
pmd_clear(pmdp);
|
||||
__flush_tlb_kernel_pgtable(addr);
|
||||
pte_free_kernel(NULL, table);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
|
||||
int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
|
||||
{
|
||||
return pmd_none(*pmd);
|
||||
pmd_t *table;
|
||||
pmd_t *pmdp;
|
||||
pud_t pud;
|
||||
unsigned long next, end;
|
||||
|
||||
pud = READ_ONCE(*pudp);
|
||||
|
||||
/* No-op for empty entry and WARN_ON for valid entry */
|
||||
if (!pud_present(pud) || !pud_table(pud)) {
|
||||
VM_WARN_ON(!pud_table(pud));
|
||||
return 1;
|
||||
}
|
||||
|
||||
table = pmd_offset(pudp, addr);
|
||||
pmdp = table;
|
||||
next = addr;
|
||||
end = addr + PUD_SIZE;
|
||||
do {
|
||||
pmd_free_pte_page(pmdp, next);
|
||||
} while (pmdp++, next += PMD_SIZE, next != end);
|
||||
|
||||
pud_clear(pudp);
|
||||
__flush_tlb_kernel_pgtable(addr);
|
||||
pmd_free(NULL, table);
|
||||
return 1;
|
||||
}
|
||||
|
@ -70,19 +70,32 @@ EXPORT_SYMBOL(cpumask_of_node);
|
||||
|
||||
#endif
|
||||
|
||||
static void map_cpu_to_node(unsigned int cpu, int nid)
|
||||
static void numa_update_cpu(unsigned int cpu, bool remove)
|
||||
{
|
||||
set_cpu_numa_node(cpu, nid);
|
||||
if (nid >= 0)
|
||||
int nid = cpu_to_node(cpu);
|
||||
|
||||
if (nid == NUMA_NO_NODE)
|
||||
return;
|
||||
|
||||
if (remove)
|
||||
cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
|
||||
else
|
||||
cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
|
||||
}
|
||||
|
||||
void numa_add_cpu(unsigned int cpu)
|
||||
{
|
||||
numa_update_cpu(cpu, false);
|
||||
}
|
||||
|
||||
void numa_remove_cpu(unsigned int cpu)
|
||||
{
|
||||
numa_update_cpu(cpu, true);
|
||||
}
|
||||
|
||||
void numa_clear_node(unsigned int cpu)
|
||||
{
|
||||
int nid = cpu_to_node(cpu);
|
||||
|
||||
if (nid >= 0)
|
||||
cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
|
||||
numa_remove_cpu(cpu);
|
||||
set_cpu_numa_node(cpu, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
@ -116,7 +129,7 @@ static void __init setup_node_to_cpumask_map(void)
|
||||
*/
|
||||
void numa_store_cpu_info(unsigned int cpu)
|
||||
{
|
||||
map_cpu_to_node(cpu, cpu_to_node_map[cpu]);
|
||||
set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
|
||||
}
|
||||
|
||||
void __init early_map_cpu_to_node(unsigned int cpu, int nid)
|
||||
|
@ -10,18 +10,7 @@ static int ptdump_show(struct seq_file *m, void *v)
|
||||
ptdump_walk_pgd(m, info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ptdump_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ptdump_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations ptdump_fops = {
|
||||
.open = ptdump_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
DEFINE_SHOW_ATTRIBUTE(ptdump);
|
||||
|
||||
int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
|
||||
{
|
||||
|
@ -16,6 +16,7 @@ config IA64
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select PCI if (!IA64_HP_SIM)
|
||||
select ACPI if (!IA64_HP_SIM)
|
||||
select ARCH_SUPPORTS_ACPI if (!IA64_HP_SIM)
|
||||
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
|
||||
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
|
@ -75,6 +75,7 @@ config X86
|
||||
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select ARCH_SUPPORTS_ACPI
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
|
@ -5,11 +5,10 @@
|
||||
|
||||
menuconfig ACPI
|
||||
bool "ACPI (Advanced Configuration and Power Interface) Support"
|
||||
depends on !IA64_HP_SIM
|
||||
depends on IA64 || X86 || ARM64
|
||||
depends on ARCH_SUPPORTS_ACPI
|
||||
depends on PCI
|
||||
select PNP
|
||||
default y if (IA64 || X86)
|
||||
default y if X86
|
||||
help
|
||||
Advanced Configuration and Power Interface (ACPI) support for
|
||||
Linux requires an ACPI-compliant platform (hardware/firmware),
|
||||
@ -41,6 +40,9 @@ menuconfig ACPI
|
||||
<http://www.acpi.info>
|
||||
<http://www.uefi.org/acpi/specs>
|
||||
|
||||
config ARCH_SUPPORTS_ACPI
|
||||
bool
|
||||
|
||||
if ACPI
|
||||
|
||||
config ACPI_LEGACY_TABLES_LOOKUP
|
||||
|
@ -259,7 +259,6 @@ void __init efi_init(void)
|
||||
|
||||
reserve_regions();
|
||||
efi_esrt_init();
|
||||
efi_memmap_unmap();
|
||||
|
||||
memblock_reserve(params.mmap & PAGE_MASK,
|
||||
PAGE_ALIGN(params.mmap_size +
|
||||
|
@ -110,11 +110,20 @@ static int __init arm_enable_runtime_services(void)
|
||||
{
|
||||
u64 mapsize;
|
||||
|
||||
if (!efi_enabled(EFI_BOOT)) {
|
||||
if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
|
||||
pr_info("EFI services will not be available.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
efi_memmap_unmap();
|
||||
|
||||
mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
|
||||
|
||||
if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
|
||||
pr_err("Failed to remap EFI memory map\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (efi_runtime_disabled()) {
|
||||
pr_info("EFI runtime services will be disabled.\n");
|
||||
return 0;
|
||||
@ -127,13 +136,6 @@ static int __init arm_enable_runtime_services(void)
|
||||
|
||||
pr_info("Remapping and enabling EFI services.\n");
|
||||
|
||||
mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
|
||||
|
||||
if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
|
||||
pr_err("Failed to remap EFI memory map\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!efi_virtmap_init()) {
|
||||
pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
|
||||
return -ENOMEM;
|
||||
|
@ -11,7 +11,10 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
|
||||
-fPIC -fno-strict-aliasing -mno-red-zone \
|
||||
-mno-mmx -mno-sse -fshort-wchar
|
||||
|
||||
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
|
||||
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
|
||||
# disable the stackleak plugin
|
||||
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
|
||||
$(DISABLE_STACKLEAK_PLUGIN)
|
||||
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
|
||||
-fno-builtin -fpic -mno-single-pic-base
|
||||
|
||||
@ -20,7 +23,7 @@ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
|
||||
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
|
||||
-D__NO_FORTIFY \
|
||||
$(call cc-option,-ffreestanding) \
|
||||
$(call cc-option,-fno-stack-protector)
|
||||
$(call cc-option,-fno-stack-protector) \
|
||||
|
||||
GCOV_PROFILE := n
|
||||
KASAN_SANITIZE := n
|
||||
|
@ -53,6 +53,16 @@ enum {
|
||||
CCI_IF_MAX,
|
||||
};
|
||||
|
||||
#define NUM_HW_CNTRS_CII_4XX 4
|
||||
#define NUM_HW_CNTRS_CII_5XX 8
|
||||
#define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX
|
||||
|
||||
#define FIXED_HW_CNTRS_CII_4XX 1
|
||||
#define FIXED_HW_CNTRS_CII_5XX 0
|
||||
#define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX
|
||||
|
||||
#define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX)
|
||||
|
||||
struct event_range {
|
||||
u32 min;
|
||||
u32 max;
|
||||
@ -633,8 +643,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
|
||||
{
|
||||
int i;
|
||||
struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
|
||||
|
||||
DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
|
||||
DECLARE_BITMAP(mask, HW_CNTRS_MAX);
|
||||
|
||||
bitmap_zero(mask, cci_pmu->num_cntrs);
|
||||
for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
|
||||
@ -940,7 +949,7 @@ static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
|
||||
static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
|
||||
{
|
||||
int i;
|
||||
DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs);
|
||||
DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX);
|
||||
|
||||
bitmap_zero(saved_mask, cci_pmu->num_cntrs);
|
||||
pmu_save_counters(cci_pmu, saved_mask);
|
||||
@ -1245,7 +1254,7 @@ static int validate_group(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *sibling, *leader = event->group_leader;
|
||||
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
|
||||
unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
|
||||
unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)];
|
||||
struct cci_pmu_hw_events fake_pmu = {
|
||||
/*
|
||||
* Initialise the fake PMU. We only need to populate the
|
||||
@ -1403,6 +1412,11 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
|
||||
char *name = model->name;
|
||||
u32 num_cntrs;
|
||||
|
||||
if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX))
|
||||
return -EINVAL;
|
||||
if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
pmu_event_attr_group.attrs = model->event_attrs;
|
||||
pmu_format_attr_group.attrs = model->format_attrs;
|
||||
|
||||
@ -1455,8 +1469,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
|
||||
#ifdef CONFIG_ARM_CCI400_PMU
|
||||
[CCI400_R0] = {
|
||||
.name = "CCI_400",
|
||||
.fixed_hw_cntrs = 1, /* Cycle counter */
|
||||
.num_hw_cntrs = 4,
|
||||
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
|
||||
.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
|
||||
.cntr_size = SZ_4K,
|
||||
.format_attrs = cci400_pmu_format_attrs,
|
||||
.event_attrs = cci400_r0_pmu_event_attrs,
|
||||
@ -1475,8 +1489,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
|
||||
},
|
||||
[CCI400_R1] = {
|
||||
.name = "CCI_400_r1",
|
||||
.fixed_hw_cntrs = 1, /* Cycle counter */
|
||||
.num_hw_cntrs = 4,
|
||||
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
|
||||
.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
|
||||
.cntr_size = SZ_4K,
|
||||
.format_attrs = cci400_pmu_format_attrs,
|
||||
.event_attrs = cci400_r1_pmu_event_attrs,
|
||||
@ -1497,8 +1511,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
|
||||
#ifdef CONFIG_ARM_CCI5xx_PMU
|
||||
[CCI500_R0] = {
|
||||
.name = "CCI_500",
|
||||
.fixed_hw_cntrs = 0,
|
||||
.num_hw_cntrs = 8,
|
||||
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
|
||||
.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
|
||||
.cntr_size = SZ_64K,
|
||||
.format_attrs = cci5xx_pmu_format_attrs,
|
||||
.event_attrs = cci5xx_pmu_event_attrs,
|
||||
@ -1521,8 +1535,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
|
||||
},
|
||||
[CCI550_R0] = {
|
||||
.name = "CCI_550",
|
||||
.fixed_hw_cntrs = 0,
|
||||
.num_hw_cntrs = 8,
|
||||
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
|
||||
.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
|
||||
.cntr_size = SZ_64K,
|
||||
.format_attrs = cci5xx_pmu_format_attrs,
|
||||
.event_attrs = cci5xx_pmu_event_attrs,
|
||||
|
@ -1485,17 +1485,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, ccn);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
if (!devm_request_mem_region(ccn->dev, res->start,
|
||||
resource_size(res), pdev->name))
|
||||
return -EBUSY;
|
||||
|
||||
ccn->base = devm_ioremap(ccn->dev, res->start,
|
||||
resource_size(res));
|
||||
if (!ccn->base)
|
||||
return -EFAULT;
|
||||
ccn->base = devm_ioremap_resource(ccn->dev, res);
|
||||
if (IS_ERR(ccn->base))
|
||||
return PTR_ERR(ccn->base);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!res)
|
||||
|
@ -28,6 +28,14 @@
|
||||
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
|
||||
static DEFINE_PER_CPU(int, cpu_irq);
|
||||
|
||||
static inline u64 arm_pmu_event_max_period(struct perf_event *event)
|
||||
{
|
||||
if (event->hw.flags & ARMPMU_EVT_64BIT)
|
||||
return GENMASK_ULL(63, 0);
|
||||
else
|
||||
return GENMASK_ULL(31, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
armpmu_map_cache_event(const unsigned (*cache_map)
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
@ -114,8 +122,10 @@ int armpmu_event_set_period(struct perf_event *event)
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
s64 left = local64_read(&hwc->period_left);
|
||||
s64 period = hwc->sample_period;
|
||||
u64 max_period;
|
||||
int ret = 0;
|
||||
|
||||
max_period = arm_pmu_event_max_period(event);
|
||||
if (unlikely(left <= -period)) {
|
||||
left = period;
|
||||
local64_set(&hwc->period_left, left);
|
||||
@ -136,12 +146,12 @@ int armpmu_event_set_period(struct perf_event *event)
|
||||
* effect we are reducing max_period to account for
|
||||
* interrupt latency (and we are being very conservative).
|
||||
*/
|
||||
if (left > (armpmu->max_period >> 1))
|
||||
left = armpmu->max_period >> 1;
|
||||
if (left > (max_period >> 1))
|
||||
left = (max_period >> 1);
|
||||
|
||||
local64_set(&hwc->prev_count, (u64)-left);
|
||||
|
||||
armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
|
||||
armpmu->write_counter(event, (u64)(-left) & max_period);
|
||||
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
@ -153,6 +163,7 @@ u64 armpmu_event_update(struct perf_event *event)
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 delta, prev_raw_count, new_raw_count;
|
||||
u64 max_period = arm_pmu_event_max_period(event);
|
||||
|
||||
again:
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
@ -162,7 +173,7 @@ again:
|
||||
new_raw_count) != prev_raw_count)
|
||||
goto again;
|
||||
|
||||
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
|
||||
delta = (new_raw_count - prev_raw_count) & max_period;
|
||||
|
||||
local64_add(delta, &event->count);
|
||||
local64_sub(delta, &hwc->period_left);
|
||||
@ -227,11 +238,10 @@ armpmu_del(struct perf_event *event, int flags)
|
||||
|
||||
armpmu_stop(event, PERF_EF_UPDATE);
|
||||
hw_events->events[idx] = NULL;
|
||||
clear_bit(idx, hw_events->used_mask);
|
||||
if (armpmu->clear_event_idx)
|
||||
armpmu->clear_event_idx(hw_events, event);
|
||||
|
||||
armpmu->clear_event_idx(hw_events, event);
|
||||
perf_event_update_userpage(event);
|
||||
/* Clear the allocated counter */
|
||||
hwc->idx = -1;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -360,6 +370,7 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int mapping;
|
||||
|
||||
hwc->flags = 0;
|
||||
mapping = armpmu->map_event(event);
|
||||
|
||||
if (mapping < 0) {
|
||||
@ -402,7 +413,7 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
* is far less likely to overtake the previous one unless
|
||||
* you have some serious IRQ latency issues.
|
||||
*/
|
||||
hwc->sample_period = armpmu->max_period >> 1;
|
||||
hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
local64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
@ -654,14 +665,9 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < armpmu->num_events; idx++) {
|
||||
/*
|
||||
* If the counter is not used skip it, there is no
|
||||
* need of stopping/restarting it.
|
||||
*/
|
||||
if (!test_bit(idx, hw_events->used_mask))
|
||||
continue;
|
||||
|
||||
event = hw_events->events[idx];
|
||||
if (!event)
|
||||
continue;
|
||||
|
||||
switch (cmd) {
|
||||
case CPU_PM_ENTER:
|
||||
|
@ -160,7 +160,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
|
||||
static int armpmu_request_irqs(struct arm_pmu *armpmu)
|
||||
{
|
||||
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
|
||||
int cpu, err;
|
||||
int cpu, err = 0;
|
||||
|
||||
for_each_cpu(cpu, &armpmu->supported_cpus) {
|
||||
int irq = per_cpu(hw_events->irq, cpu);
|
||||
|
@ -350,19 +350,21 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
|
||||
|
||||
/*
|
||||
* Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
|
||||
* If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID
|
||||
* is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is
|
||||
* in MPIDR[aff1]. If this changes in future, this shall be updated.
|
||||
* If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2]
|
||||
* and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID
|
||||
* is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
|
||||
*/
|
||||
static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
|
||||
{
|
||||
u64 mpidr = read_cpuid_mpidr();
|
||||
|
||||
if (mpidr & MPIDR_MT_BITMASK) {
|
||||
int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
|
||||
|
||||
if (sccl_id)
|
||||
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
|
||||
*sccl_id = aff2 >> 3;
|
||||
if (ccl_id)
|
||||
*ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
|
||||
*ccl_id = aff2 & 0x7;
|
||||
} else {
|
||||
if (sccl_id)
|
||||
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
|
||||
|
14
fs/statfs.c
14
fs/statfs.c
@ -335,7 +335,7 @@ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstat
|
||||
return 0;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
|
||||
int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, struct compat_statfs64 __user * buf)
|
||||
{
|
||||
struct kstatfs tmp;
|
||||
int error;
|
||||
@ -349,7 +349,12 @@ COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, s
|
||||
return error;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
|
||||
COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
|
||||
{
|
||||
return kcompat_sys_statfs64(pathname, sz, buf);
|
||||
}
|
||||
|
||||
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf)
|
||||
{
|
||||
struct kstatfs tmp;
|
||||
int error;
|
||||
@ -363,6 +368,11 @@ COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct co
|
||||
return error;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
|
||||
{
|
||||
return kcompat_sys_fstatfs64(fd, sz, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a copy of sys_ustat, just dealing with a structure layout.
|
||||
* Given how simple this syscall is that apporach is more maintainable
|
||||
|
@ -1019,6 +1019,17 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
|
||||
return ctv;
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
|
||||
* directly. Instead, use one of the functions which work equivalently, such
|
||||
* as the kcompat_sys_xyzyyz() functions prototyped below.
|
||||
*/
|
||||
|
||||
int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
|
||||
struct compat_statfs64 __user * buf);
|
||||
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
|
||||
struct compat_statfs64 __user * buf);
|
||||
|
||||
#else /* !CONFIG_COMPAT */
|
||||
|
||||
#define is_compat_task() (0)
|
||||
|
@ -25,6 +25,12 @@
|
||||
*/
|
||||
#define ARMPMU_MAX_HWEVENTS 32
|
||||
|
||||
/*
|
||||
* ARM PMU hw_event flags
|
||||
*/
|
||||
/* Event uses a 64bit counter */
|
||||
#define ARMPMU_EVT_64BIT 1
|
||||
|
||||
#define HW_OP_UNSUPPORTED 0xFFFF
|
||||
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
||||
#define CACHE_OP_UNSUPPORTED 0xFFFF
|
||||
@ -87,14 +93,13 @@ struct arm_pmu {
|
||||
struct perf_event *event);
|
||||
int (*set_event_filter)(struct hw_perf_event *evt,
|
||||
struct perf_event_attr *attr);
|
||||
u32 (*read_counter)(struct perf_event *event);
|
||||
void (*write_counter)(struct perf_event *event, u32 val);
|
||||
u64 (*read_counter)(struct perf_event *event);
|
||||
void (*write_counter)(struct perf_event *event, u64 val);
|
||||
void (*start)(struct arm_pmu *);
|
||||
void (*stop)(struct arm_pmu *);
|
||||
void (*reset)(void *);
|
||||
int (*map_event)(struct perf_event *event);
|
||||
int num_events;
|
||||
u64 max_period;
|
||||
bool secure_access; /* 32-bit ARM only */
|
||||
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
|
||||
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
||||
|
@ -81,6 +81,7 @@ union bpf_attr;
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/quota.h>
|
||||
#include <linux/key.h>
|
||||
#include <linux/personality.h>
|
||||
#include <trace/syscall.h>
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
|
||||
@ -1282,4 +1283,14 @@ static inline long ksys_truncate(const char __user *pathname, loff_t length)
|
||||
return do_sys_truncate(pathname, length);
|
||||
}
|
||||
|
||||
static inline unsigned int ksys_personality(unsigned int personality)
|
||||
{
|
||||
unsigned int old = current->personality;
|
||||
|
||||
if (personality != 0xffffffff)
|
||||
set_personality(personality);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
|
||||
__SYSCALL(__NR_statx, sys_statx)
|
||||
#define __NR_io_pgetevents 292
|
||||
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
|
||||
#define __NR_rseq 293
|
||||
__SYSCALL(__NR_rseq, sys_rseq)
|
||||
|
||||
#undef __NR_syscalls
|
||||
#define __NR_syscalls 293
|
||||
#define __NR_syscalls 294
|
||||
|
||||
/*
|
||||
* 32 bit systems traditionally used different
|
||||
|
@ -5246,8 +5246,8 @@ void perf_event_update_userpage(struct perf_event *event)
|
||||
|
||||
userpg = rb->user_page;
|
||||
/*
|
||||
* Disable preemption so as to not let the corresponding user-space
|
||||
* spin too long if we get preempted.
|
||||
* Disable preemption to guarantee consistent time stamps are stored to
|
||||
* the user page.
|
||||
*/
|
||||
preempt_disable();
|
||||
++userpg->lock;
|
||||
|
@ -163,8 +163,8 @@ cc-ldoption = $(call try-run,\
|
||||
$(CC) $(1) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
|
||||
|
||||
# ld-option
|
||||
# Usage: LDFLAGS += $(call ld-option, -X)
|
||||
ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
|
||||
# Usage: LDFLAGS += $(call ld-option, -X, -Y)
|
||||
ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2),$(3))
|
||||
|
||||
# ar-option
|
||||
# Usage: KBUILD_ARFLAGS := $(call ar-option,D)
|
||||
|
@ -138,6 +138,26 @@ unsigned int yield_mod_cnt, nr_abort;
|
||||
"bne 222b\n\t" \
|
||||
"333:\n\t"
|
||||
|
||||
#elif defined(__AARCH64EL__)
|
||||
|
||||
#define RSEQ_INJECT_INPUT \
|
||||
, [loop_cnt_1] "Qo" (loop_cnt[1]) \
|
||||
, [loop_cnt_2] "Qo" (loop_cnt[2]) \
|
||||
, [loop_cnt_3] "Qo" (loop_cnt[3]) \
|
||||
, [loop_cnt_4] "Qo" (loop_cnt[4]) \
|
||||
, [loop_cnt_5] "Qo" (loop_cnt[5]) \
|
||||
, [loop_cnt_6] "Qo" (loop_cnt[6])
|
||||
|
||||
#define INJECT_ASM_REG RSEQ_ASM_TMP_REG32
|
||||
|
||||
#define RSEQ_INJECT_ASM(n) \
|
||||
" ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n" \
|
||||
" cbz " INJECT_ASM_REG ", 333f\n" \
|
||||
"222:\n" \
|
||||
" sub " INJECT_ASM_REG ", " INJECT_ASM_REG ", #1\n" \
|
||||
" cbnz " INJECT_ASM_REG ", 222b\n" \
|
||||
"333:\n"
|
||||
|
||||
#elif __PPC__
|
||||
|
||||
#define RSEQ_INJECT_INPUT \
|
||||
|
594
tools/testing/selftests/rseq/rseq-arm64.h
Normal file
594
tools/testing/selftests/rseq/rseq-arm64.h
Normal file
@ -0,0 +1,594 @@
|
||||
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
|
||||
/*
|
||||
* rseq-arm64.h
|
||||
*
|
||||
* (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
||||
* (C) Copyright 2018 - Will Deacon <will.deacon@arm.com>
|
||||
*/
|
||||
|
||||
#define RSEQ_SIG 0xd428bc00 /* BRK #0x45E0 */
|
||||
|
||||
#define rseq_smp_mb() __asm__ __volatile__ ("dmb ish" ::: "memory")
|
||||
#define rseq_smp_rmb() __asm__ __volatile__ ("dmb ishld" ::: "memory")
|
||||
#define rseq_smp_wmb() __asm__ __volatile__ ("dmb ishst" ::: "memory")
|
||||
|
||||
#define rseq_smp_load_acquire(p) \
|
||||
__extension__ ({ \
|
||||
__typeof(*p) ____p1; \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
asm volatile ("ldarb %w0, %1" \
|
||||
: "=r" (*(__u8 *)p) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile ("ldarh %w0, %1" \
|
||||
: "=r" (*(__u16 *)p) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile ("ldar %w0, %1" \
|
||||
: "=r" (*(__u32 *)p) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile ("ldar %0, %1" \
|
||||
: "=r" (*(__u64 *)p) \
|
||||
: "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
} \
|
||||
____p1; \
|
||||
})
|
||||
|
||||
#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
|
||||
|
||||
#define rseq_smp_store_release(p, v) \
|
||||
do { \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
asm volatile ("stlrb %w1, %0" \
|
||||
: "=Q" (*p) \
|
||||
: "r" ((__u8)v) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile ("stlrh %w1, %0" \
|
||||
: "=Q" (*p) \
|
||||
: "r" ((__u16)v) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile ("stlr %w1, %0" \
|
||||
: "=Q" (*p) \
|
||||
: "r" ((__u32)v) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile ("stlr %1, %0" \
|
||||
: "=Q" (*p) \
|
||||
: "r" ((__u64)v) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef RSEQ_SKIP_FASTPATH
|
||||
#include "rseq-skip.h"
|
||||
#else /* !RSEQ_SKIP_FASTPATH */
|
||||
|
||||
#define RSEQ_ASM_TMP_REG32 "w15"
|
||||
#define RSEQ_ASM_TMP_REG "x15"
|
||||
#define RSEQ_ASM_TMP_REG_2 "x14"
|
||||
|
||||
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
|
||||
post_commit_offset, abort_ip) \
|
||||
" .pushsection __rseq_table, \"aw\"\n" \
|
||||
" .balign 32\n" \
|
||||
__rseq_str(label) ":\n" \
|
||||
" .long " __rseq_str(version) ", " __rseq_str(flags) "\n" \
|
||||
" .quad " __rseq_str(start_ip) ", " \
|
||||
__rseq_str(post_commit_offset) ", " \
|
||||
__rseq_str(abort_ip) "\n" \
|
||||
" .popsection\n"
|
||||
|
||||
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
|
||||
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
|
||||
(post_commit_ip - start_ip), abort_ip)
|
||||
|
||||
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
|
||||
RSEQ_INJECT_ASM(1) \
|
||||
" adrp " RSEQ_ASM_TMP_REG ", " __rseq_str(cs_label) "\n" \
|
||||
" add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
|
||||
", :lo12:" __rseq_str(cs_label) "\n" \
|
||||
" str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(rseq_cs) "]\n" \
|
||||
__rseq_str(label) ":\n"
|
||||
|
||||
#define RSEQ_ASM_DEFINE_ABORT(label, abort_label) \
|
||||
" b 222f\n" \
|
||||
" .inst " __rseq_str(RSEQ_SIG) "\n" \
|
||||
__rseq_str(label) ":\n" \
|
||||
" b %l[" __rseq_str(abort_label) "]\n" \
|
||||
"222:\n"
|
||||
|
||||
#define RSEQ_ASM_OP_STORE(value, var) \
|
||||
" str %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
|
||||
|
||||
#define RSEQ_ASM_OP_STORE_RELEASE(value, var) \
|
||||
" stlr %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
|
||||
|
||||
#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \
|
||||
RSEQ_ASM_OP_STORE(value, var) \
|
||||
__rseq_str(post_commit_label) ":\n"
|
||||
|
||||
#define RSEQ_ASM_OP_FINAL_STORE_RELEASE(value, var, post_commit_label) \
|
||||
RSEQ_ASM_OP_STORE_RELEASE(value, var) \
|
||||
__rseq_str(post_commit_label) ":\n"
|
||||
|
||||
#define RSEQ_ASM_OP_CMPEQ(var, expect, label) \
|
||||
" ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
|
||||
" sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
|
||||
", %[" __rseq_str(expect) "]\n" \
|
||||
" cbnz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n"
|
||||
|
||||
#define RSEQ_ASM_OP_CMPEQ32(var, expect, label) \
|
||||
" ldr " RSEQ_ASM_TMP_REG32 ", %[" __rseq_str(var) "]\n" \
|
||||
" sub " RSEQ_ASM_TMP_REG32 ", " RSEQ_ASM_TMP_REG32 \
|
||||
", %w[" __rseq_str(expect) "]\n" \
|
||||
" cbnz " RSEQ_ASM_TMP_REG32 ", " __rseq_str(label) "\n"
|
||||
|
||||
#define RSEQ_ASM_OP_CMPNE(var, expect, label) \
|
||||
" ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
|
||||
" sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
|
||||
", %[" __rseq_str(expect) "]\n" \
|
||||
" cbz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n"
|
||||
|
||||
#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
|
||||
RSEQ_INJECT_ASM(2) \
|
||||
RSEQ_ASM_OP_CMPEQ32(current_cpu_id, cpu_id, label)
|
||||
|
||||
#define RSEQ_ASM_OP_R_LOAD(var) \
|
||||
" ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n"
|
||||
|
||||
#define RSEQ_ASM_OP_R_STORE(var) \
|
||||
" str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n"
|
||||
|
||||
#define RSEQ_ASM_OP_R_LOAD_OFF(offset) \
|
||||
" ldr " RSEQ_ASM_TMP_REG ", [" RSEQ_ASM_TMP_REG \
|
||||
", %[" __rseq_str(offset) "]]\n"
|
||||
|
||||
#define RSEQ_ASM_OP_R_ADD(count) \
|
||||
" add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
|
||||
", %[" __rseq_str(count) "]\n"
|
||||
|
||||
#define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \
|
||||
" str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
|
||||
__rseq_str(post_commit_label) ":\n"
|
||||
|
||||
#define RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len) \
|
||||
" cbz %[" __rseq_str(len) "], 333f\n" \
|
||||
" mov " RSEQ_ASM_TMP_REG_2 ", %[" __rseq_str(len) "]\n" \
|
||||
"222: sub " RSEQ_ASM_TMP_REG_2 ", " RSEQ_ASM_TMP_REG_2 ", #1\n" \
|
||||
" ldrb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(src) "]" \
|
||||
", " RSEQ_ASM_TMP_REG_2 "]\n" \
|
||||
" strb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(dst) "]" \
|
||||
", " RSEQ_ASM_TMP_REG_2 "]\n" \
|
||||
" cbnz " RSEQ_ASM_TMP_REG_2 ", 222b\n" \
|
||||
"333:\n"
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(4)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
|
||||
#endif
|
||||
RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
|
||||
RSEQ_INJECT_ASM(5)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[v] "Qo" (*v),
|
||||
[expect] "r" (expect),
|
||||
[newv] "r" (newv)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG
|
||||
: abort, cmpfail
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1, error2
|
||||
#endif
|
||||
);
|
||||
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
cmpfail:
|
||||
return 1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
error2:
|
||||
rseq_bug("expected value comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
|
||||
off_t voffp, intptr_t *load, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(4)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2])
|
||||
#endif
|
||||
RSEQ_ASM_OP_R_LOAD(v)
|
||||
RSEQ_ASM_OP_R_STORE(load)
|
||||
RSEQ_ASM_OP_R_LOAD_OFF(voffp)
|
||||
RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
|
||||
RSEQ_INJECT_ASM(5)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[v] "Qo" (*v),
|
||||
[expectnot] "r" (expectnot),
|
||||
[load] "Qo" (*load),
|
||||
[voffp] "r" (voffp)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG
|
||||
: abort, cmpfail
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1, error2
|
||||
#endif
|
||||
);
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
cmpfail:
|
||||
return 1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
error2:
|
||||
rseq_bug("expected value comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_addv(intptr_t *v, intptr_t count, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
#endif
|
||||
RSEQ_ASM_OP_R_LOAD(v)
|
||||
RSEQ_ASM_OP_R_ADD(count)
|
||||
RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
|
||||
RSEQ_INJECT_ASM(4)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[v] "Qo" (*v),
|
||||
[count] "r" (count)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG
|
||||
: abort
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1
|
||||
#endif
|
||||
);
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
|
||||
intptr_t *v2, intptr_t newv2,
|
||||
intptr_t newv, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(4)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
|
||||
#endif
|
||||
RSEQ_ASM_OP_STORE(newv2, v2)
|
||||
RSEQ_INJECT_ASM(5)
|
||||
RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
|
||||
RSEQ_INJECT_ASM(6)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[expect] "r" (expect),
|
||||
[v] "Qo" (*v),
|
||||
[newv] "r" (newv),
|
||||
[v2] "Qo" (*v2),
|
||||
[newv2] "r" (newv2)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG
|
||||
: abort, cmpfail
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1, error2
|
||||
#endif
|
||||
);
|
||||
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
cmpfail:
|
||||
return 1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
error2:
|
||||
rseq_bug("expected value comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
|
||||
intptr_t *v2, intptr_t newv2,
|
||||
intptr_t newv, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(4)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
|
||||
#endif
|
||||
RSEQ_ASM_OP_STORE(newv2, v2)
|
||||
RSEQ_INJECT_ASM(5)
|
||||
RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
|
||||
RSEQ_INJECT_ASM(6)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[expect] "r" (expect),
|
||||
[v] "Qo" (*v),
|
||||
[newv] "r" (newv),
|
||||
[v2] "Qo" (*v2),
|
||||
[newv2] "r" (newv2)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG
|
||||
: abort, cmpfail
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1, error2
|
||||
#endif
|
||||
);
|
||||
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
cmpfail:
|
||||
return 1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
error2:
|
||||
rseq_bug("expected value comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
|
||||
intptr_t *v2, intptr_t expect2,
|
||||
intptr_t newv, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(4)
|
||||
RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(5)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
|
||||
RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3])
|
||||
#endif
|
||||
RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
|
||||
RSEQ_INJECT_ASM(6)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[v] "Qo" (*v),
|
||||
[expect] "r" (expect),
|
||||
[v2] "Qo" (*v2),
|
||||
[expect2] "r" (expect2),
|
||||
[newv] "r" (newv)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG
|
||||
: abort, cmpfail
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1, error2, error3
|
||||
#endif
|
||||
);
|
||||
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
cmpfail:
|
||||
return 1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
error2:
|
||||
rseq_bug("expected value comparison failed");
|
||||
error3:
|
||||
rseq_bug("2nd expected value comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
|
||||
void *dst, void *src, size_t len,
|
||||
intptr_t newv, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(4)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
|
||||
#endif
|
||||
RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
|
||||
RSEQ_INJECT_ASM(5)
|
||||
RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
|
||||
RSEQ_INJECT_ASM(6)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[expect] "r" (expect),
|
||||
[v] "Qo" (*v),
|
||||
[newv] "r" (newv),
|
||||
[dst] "r" (dst),
|
||||
[src] "r" (src),
|
||||
[len] "r" (len)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2
|
||||
: abort, cmpfail
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1, error2
|
||||
#endif
|
||||
);
|
||||
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
cmpfail:
|
||||
return 1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
error2:
|
||||
rseq_bug("expected value comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline))
|
||||
int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
|
||||
void *dst, void *src, size_t len,
|
||||
intptr_t newv, int cpu)
|
||||
{
|
||||
RSEQ_INJECT_C(9)
|
||||
|
||||
__asm__ __volatile__ goto (
|
||||
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
|
||||
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
|
||||
RSEQ_INJECT_ASM(3)
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
|
||||
RSEQ_INJECT_ASM(4)
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
|
||||
RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
|
||||
#endif
|
||||
RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
|
||||
RSEQ_INJECT_ASM(5)
|
||||
RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
|
||||
RSEQ_INJECT_ASM(6)
|
||||
RSEQ_ASM_DEFINE_ABORT(4, abort)
|
||||
: /* gcc asm goto does not allow outputs */
|
||||
: [cpu_id] "r" (cpu),
|
||||
[current_cpu_id] "Qo" (__rseq_abi.cpu_id),
|
||||
[rseq_cs] "m" (__rseq_abi.rseq_cs),
|
||||
[expect] "r" (expect),
|
||||
[v] "Qo" (*v),
|
||||
[newv] "r" (newv),
|
||||
[dst] "r" (dst),
|
||||
[src] "r" (src),
|
||||
[len] "r" (len)
|
||||
RSEQ_INJECT_INPUT
|
||||
: "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2
|
||||
: abort, cmpfail
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
, error1, error2
|
||||
#endif
|
||||
);
|
||||
|
||||
return 0;
|
||||
abort:
|
||||
RSEQ_INJECT_FAILED
|
||||
return -1;
|
||||
cmpfail:
|
||||
return 1;
|
||||
#ifdef RSEQ_COMPARE_TWICE
|
||||
error1:
|
||||
rseq_bug("cpu_id comparison failed");
|
||||
error2:
|
||||
rseq_bug("expected value comparison failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* !RSEQ_SKIP_FASTPATH */
|
@ -71,6 +71,8 @@ extern __thread volatile struct rseq __rseq_abi;
|
||||
#include <rseq-x86.h>
|
||||
#elif defined(__ARMEL__)
|
||||
#include <rseq-arm.h>
|
||||
#elif defined (__AARCH64EL__)
|
||||
#include <rseq-arm64.h>
|
||||
#elif defined(__PPC__)
|
||||
#include <rseq-ppc.h>
|
||||
#elif defined(__mips__)
|
||||
|
@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long itbits, cond;
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
|
||||
bool is_arm = !(cpsr & PSR_AA32_T_BIT);
|
||||
|
||||
if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK))
|
||||
if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
|
||||
return;
|
||||
|
||||
cond = (cpsr & 0xe000) >> 13;
|
||||
@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
|
||||
else
|
||||
itbits = (itbits << 1) & 0x1f;
|
||||
|
||||
cpsr &= ~COMPAT_PSR_IT_MASK;
|
||||
cpsr &= ~PSR_AA32_IT_MASK;
|
||||
cpsr |= cond << 13;
|
||||
cpsr |= (itbits & 0x1c) << (10 - 2);
|
||||
cpsr |= (itbits & 0x3) << 25;
|
||||
@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
bool is_thumb;
|
||||
|
||||
is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
|
||||
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
|
||||
if (is_thumb && !is_wide_instr)
|
||||
*vcpu_pc(vcpu) += 2;
|
||||
else
|
||||
@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
{
|
||||
unsigned long cpsr;
|
||||
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
|
||||
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
|
||||
bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
|
||||
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
|
||||
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
|
||||
|
||||
cpsr = mode | COMPAT_PSR_I_BIT;
|
||||
cpsr = mode | PSR_AA32_I_BIT;
|
||||
|
||||
if (sctlr & (1 << 30))
|
||||
cpsr |= COMPAT_PSR_T_BIT;
|
||||
cpsr |= PSR_AA32_T_BIT;
|
||||
if (sctlr & (1 << 25))
|
||||
cpsr |= COMPAT_PSR_E_BIT;
|
||||
cpsr |= PSR_AA32_E_BIT;
|
||||
|
||||
*vcpu_cpsr(vcpu) = cpsr;
|
||||
|
||||
@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
|
||||
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
|
||||
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
||||
fsr = &vcpu_cp15(vcpu, c5_DFSR);
|
||||
}
|
||||
|
||||
prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
|
||||
prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
|
||||
|
||||
*far = addr;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user