forked from Minki/linux
Merge branches 'fixes', 'misc' and 'spectre' into for-next
This commit is contained in:
commit
3e98d24098
@ -45,35 +45,42 @@ config DEBUG_WX
|
||||
|
||||
If in doubt, say "Y".
|
||||
|
||||
# RMK wants arm kernels compiled with frame pointers or stack unwinding.
|
||||
# If you know what you are doing and are willing to live without stack
|
||||
# traces, you can get a slightly smaller kernel by setting this option to
|
||||
# n, but then RMK will have to kill you ;).
|
||||
config FRAME_POINTER
|
||||
bool
|
||||
depends on !THUMB2_KERNEL
|
||||
default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
|
||||
choice
|
||||
prompt "Choose kernel unwinder"
|
||||
default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
|
||||
default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
|
||||
help
|
||||
If you say N here, the resulting kernel will be slightly smaller and
|
||||
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
|
||||
when a problem occurs with the kernel, the information that is
|
||||
reported is severely limited.
|
||||
This determines which method will be used for unwinding kernel stack
|
||||
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
|
||||
livepatch, lockdep, and more.
|
||||
|
||||
config ARM_UNWIND
|
||||
bool "Enable stack unwinding support (EXPERIMENTAL)"
|
||||
config UNWINDER_FRAME_POINTER
|
||||
bool "Frame pointer unwinder"
|
||||
depends on !THUMB2_KERNEL && !CC_IS_CLANG
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select FRAME_POINTER
|
||||
help
|
||||
This option enables the frame pointer unwinder for unwinding
|
||||
kernel stack traces.
|
||||
|
||||
config UNWINDER_ARM
|
||||
bool "ARM EABI stack unwinder"
|
||||
depends on AEABI
|
||||
default y
|
||||
select ARM_UNWIND
|
||||
help
|
||||
This option enables stack unwinding support in the kernel
|
||||
using the information automatically generated by the
|
||||
compiler. The resulting kernel image is slightly bigger but
|
||||
the performance is not affected. Currently, this feature
|
||||
only works with EABI compilers. If unsure say Y.
|
||||
only works with EABI compilers.
|
||||
|
||||
config OLD_MCOUNT
|
||||
endchoice
|
||||
|
||||
config ARM_UNWIND
|
||||
bool
|
||||
|
||||
config FRAME_POINTER
|
||||
bool
|
||||
depends on FUNCTION_TRACER && FRAME_POINTER
|
||||
default y
|
||||
|
||||
config DEBUG_USER
|
||||
bool "Verbose user fault messages"
|
||||
|
@ -74,7 +74,7 @@ endif
|
||||
arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
|
||||
arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t
|
||||
arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4
|
||||
arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3
|
||||
arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m
|
||||
|
||||
# Evaluate arch cc-option calls now
|
||||
arch-y := $(arch-y)
|
||||
@ -264,13 +264,9 @@ platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
|
||||
|
||||
ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
|
||||
ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
|
||||
ifeq ($(KBUILD_SRC),)
|
||||
KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
|
||||
else
|
||||
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
export TEXT_OFFSET GZFLAGS MMUEXT
|
||||
|
||||
|
@ -114,6 +114,35 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Debug kernel copy by printing the memory addresses involved
|
||||
*/
|
||||
.macro dbgkc, begin, end, cbegin, cend
|
||||
#ifdef DEBUG
|
||||
kputc #'\n'
|
||||
kputc #'C'
|
||||
kputc #':'
|
||||
kputc #'0'
|
||||
kputc #'x'
|
||||
kphex \begin, 8 /* Start of compressed kernel */
|
||||
kputc #'-'
|
||||
kputc #'0'
|
||||
kputc #'x'
|
||||
kphex \end, 8 /* End of compressed kernel */
|
||||
kputc #'-'
|
||||
kputc #'>'
|
||||
kputc #'0'
|
||||
kputc #'x'
|
||||
kphex \cbegin, 8 /* Start of kernel copy */
|
||||
kputc #'-'
|
||||
kputc #'0'
|
||||
kputc #'x'
|
||||
kphex \cend, 8 /* End of kernel copy */
|
||||
kputc #'\n'
|
||||
kputc #'\r'
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.section ".start", #alloc, #execinstr
|
||||
/*
|
||||
* sort out different calling conventions
|
||||
@ -450,6 +479,20 @@ dtb_check_done:
|
||||
add r6, r9, r5
|
||||
add r9, r9, r10
|
||||
|
||||
#ifdef DEBUG
|
||||
sub r10, r6, r5
|
||||
sub r10, r9, r10
|
||||
/*
|
||||
* We are about to copy the kernel to a new memory area.
|
||||
* The boundaries of the new memory area can be found in
|
||||
* r10 and r9, whilst r5 and r6 contain the boundaries
|
||||
* of the memory we are going to copy.
|
||||
* Calling dbgkc will help with the printing of this
|
||||
* information.
|
||||
*/
|
||||
dbgkc r5, r6, r10, r9
|
||||
#endif
|
||||
|
||||
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
|
||||
cmp r6, r5
|
||||
stmdb r9!, {r0 - r3, r10 - r12, lr}
|
||||
|
@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
sub \tmp, \limit, #1
|
||||
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
||||
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
||||
subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
||||
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
||||
csdb
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_disable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
|
@ -16,9 +16,6 @@ extern void __gnu_mcount_nc(void);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
struct dyn_arch_ftrace {
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
bool old_mcount;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
|
@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||
struct user_vfp;
|
||||
struct user_vfp_exc;
|
||||
|
||||
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
|
||||
struct user_vfp_exc __user *);
|
||||
extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
|
||||
struct user_vfp_exc *);
|
||||
extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
struct user_vfp_exc *);
|
||||
#endif
|
||||
|
@ -69,6 +69,14 @@ extern int __put_user_bad(void);
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current_thread_info()->addr_limit = fs;
|
||||
|
||||
/*
|
||||
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||
* the wrong address limit to access_ok under speculation.
|
||||
*/
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
|
||||
}
|
||||
|
||||
@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
|
||||
#define __inttype(x) \
|
||||
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||
|
||||
/*
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if addr+size
|
||||
* is above the current addr_limit.
|
||||
*/
|
||||
#define uaccess_mask_range_ptr(ptr, size) \
|
||||
((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
|
||||
static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
|
||||
size_t size)
|
||||
{
|
||||
void __user *safe_ptr = (void __user *)ptr;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile(
|
||||
" sub %1, %3, #1\n"
|
||||
" subs %1, %1, %0\n"
|
||||
" addhs %1, %1, #1\n"
|
||||
" subhss %1, %1, %2\n"
|
||||
" movlo %0, #0\n"
|
||||
: "+r" (safe_ptr), "=&r" (tmp)
|
||||
: "r" (size), "r" (current_thread_info()->addr_limit)
|
||||
: "cc");
|
||||
|
||||
csdb();
|
||||
return safe_ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Single-value transfer routines. They automatically use the right
|
||||
* size if we just have the right pointer type. Note that the functions
|
||||
@ -362,6 +396,14 @@ do { \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
/*
|
||||
* When mitigating Spectre variant 1.1, all accessors need to include
|
||||
* verification of the address space.
|
||||
*/
|
||||
#define __put_user(x, ptr) put_user(x, ptr)
|
||||
|
||||
#else
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
long __pu_err = 0; \
|
||||
@ -369,12 +411,6 @@ do { \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__put_user_switch((x), (ptr), (err), __put_user_nocheck); \
|
||||
(void) 0; \
|
||||
})
|
||||
|
||||
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
|
||||
do { \
|
||||
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
|
||||
@ -454,6 +490,7 @@ do { \
|
||||
: "r" (x), "i" (-EFAULT) \
|
||||
: "cc")
|
||||
|
||||
#endif /* !CONFIG_CPU_SPECTRE */
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern unsigned long __must_check
|
||||
|
@ -167,9 +167,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
EXPORT_SYMBOL(mcount);
|
||||
#endif
|
||||
EXPORT_SYMBOL(__gnu_mcount_nc);
|
||||
#endif
|
||||
|
||||
|
@ -296,16 +296,15 @@ __sys_trace:
|
||||
cmp scno, #-1 @ skip the syscall?
|
||||
bne 2b
|
||||
add sp, sp, #S_OFF @ restore stack
|
||||
b ret_slow_syscall
|
||||
|
||||
__sys_trace_return:
|
||||
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
|
||||
__sys_trace_return_nosave:
|
||||
enable_irq_notrace
|
||||
mov r0, sp
|
||||
bl syscall_trace_exit
|
||||
b ret_slow_syscall
|
||||
|
||||
__sys_trace_return_nosave:
|
||||
enable_irq_notrace
|
||||
__sys_trace_return:
|
||||
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
|
||||
mov r0, sp
|
||||
bl syscall_trace_exit
|
||||
b ret_slow_syscall
|
||||
|
@ -15,23 +15,8 @@
|
||||
* start of every function. In mcount, apart from the function's address (in
|
||||
* lr), we need to get hold of the function's caller's address.
|
||||
*
|
||||
* Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
|
||||
*
|
||||
* bl mcount
|
||||
*
|
||||
* These versions have the limitation that in order for the mcount routine to
|
||||
* be able to determine the function's caller's address, an APCS-style frame
|
||||
* pointer (which is set up with something like the code below) is required.
|
||||
*
|
||||
* mov ip, sp
|
||||
* push {fp, ip, lr, pc}
|
||||
* sub fp, ip, #4
|
||||
*
|
||||
* With EABI, these frame pointers are not available unless -mapcs-frame is
|
||||
* specified, and if building as Thumb-2, not even then.
|
||||
*
|
||||
* Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
|
||||
* with call sites like:
|
||||
* Newer GCCs (4.4+) solve this problem by using a version of mcount with call
|
||||
* sites like:
|
||||
*
|
||||
* push {lr}
|
||||
* bl __gnu_mcount_nc
|
||||
@ -46,17 +31,10 @@
|
||||
* allows it to be clobbered in subroutines and doesn't use it to hold
|
||||
* parameters.)
|
||||
*
|
||||
* When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
|
||||
* for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
|
||||
* arch/arm/kernel/ftrace.c).
|
||||
* When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
|
||||
* instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_OLD_MCOUNT
|
||||
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
|
||||
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
|
||||
#endif
|
||||
#endif
|
||||
|
||||
.macro mcount_adjust_addr rd, rn
|
||||
bic \rd, \rn, #1 @ clear the Thumb bit if present
|
||||
sub \rd, \rd, #MCOUNT_INSN_SIZE
|
||||
@ -209,51 +187,6 @@ ftrace_graph_call\suffix:
|
||||
mcount_exit
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
/*
|
||||
* mcount
|
||||
*/
|
||||
|
||||
.macro mcount_enter
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
.endm
|
||||
|
||||
.macro mcount_get_lr reg
|
||||
ldr \reg, [fp, #-4]
|
||||
.endm
|
||||
|
||||
.macro mcount_exit
|
||||
ldr lr, [fp, #-4]
|
||||
ldmia sp!, {r0-r3, pc}
|
||||
.endm
|
||||
|
||||
ENTRY(mcount)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
stmdb sp!, {lr}
|
||||
ldr lr, [fp, #-4]
|
||||
ldmia sp!, {pc}
|
||||
#else
|
||||
__mcount _old
|
||||
#endif
|
||||
ENDPROC(mcount)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(ftrace_caller_old)
|
||||
__ftrace_caller _old
|
||||
ENDPROC(ftrace_caller_old)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller_old)
|
||||
__ftrace_graph_caller
|
||||
ENDPROC(ftrace_graph_caller_old)
|
||||
#endif
|
||||
|
||||
.purgem mcount_enter
|
||||
.purgem mcount_get_lr
|
||||
.purgem mcount_exit
|
||||
#endif
|
||||
|
||||
/*
|
||||
* __gnu_mcount_nc
|
||||
*/
|
||||
|
@ -47,30 +47,6 @@ void arch_ftrace_update_code(int command)
|
||||
stop_machine(__ftrace_modify_code, &command, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
|
||||
#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
|
||||
|
||||
#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
|
||||
|
||||
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
|
||||
{
|
||||
return rec->arch.old_mcount ? OLD_NOP : NOP;
|
||||
}
|
||||
|
||||
static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
if (!rec->arch.old_mcount)
|
||||
return addr;
|
||||
|
||||
if (addr == MCOUNT_ADDR)
|
||||
addr = OLD_MCOUNT_ADDR;
|
||||
else if (addr == FTRACE_ADDR)
|
||||
addr = OLD_FTRACE_ADDR;
|
||||
|
||||
return addr;
|
||||
}
|
||||
#else
|
||||
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
|
||||
{
|
||||
return NOP;
|
||||
@ -80,7 +56,6 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
int ftrace_arch_code_modify_prepare(void)
|
||||
{
|
||||
@ -150,15 +125,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
if (!ret) {
|
||||
pc = (unsigned long)&ftrace_call_old;
|
||||
new = ftrace_call_replace(pc, (unsigned long)func);
|
||||
|
||||
ret = ftrace_modify_code(pc, 0, new, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -203,16 +169,6 @@ int ftrace_make_nop(struct module *mod,
|
||||
new = ftrace_nop_replace(rec);
|
||||
ret = ftrace_modify_code(ip, old, new, true);
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
if (ret == -EINVAL && addr == MCOUNT_ADDR) {
|
||||
rec->arch.old_mcount = true;
|
||||
|
||||
old = ftrace_call_replace(ip, adjust_address(rec, addr));
|
||||
new = ftrace_nop_replace(rec);
|
||||
ret = ftrace_modify_code(ip, old, new, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -290,13 +246,6 @@ static int ftrace_modify_graph_caller(bool enable)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
if (!ret)
|
||||
ret = __ftrace_modify_caller(&ftrace_graph_call_old,
|
||||
ftrace_graph_caller_old,
|
||||
enable);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
|
||||
kframe->magic = IWMMXT_MAGIC;
|
||||
kframe->size = IWMMXT_STORAGE_SIZE;
|
||||
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
|
||||
|
||||
err = __copy_to_user(frame, kframe, sizeof(*frame));
|
||||
} else {
|
||||
/*
|
||||
* For bug-compatibility with older kernels, some space
|
||||
@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
|
||||
* Set the magic and size appropriately so that properly
|
||||
* written userspace can skip it reliably:
|
||||
*/
|
||||
__put_user_error(DUMMY_MAGIC, &frame->magic, err);
|
||||
__put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
|
||||
*kframe = (struct iwmmxt_sigframe) {
|
||||
.magic = DUMMY_MAGIC,
|
||||
.size = IWMMXT_STORAGE_SIZE,
|
||||
};
|
||||
}
|
||||
|
||||
err = __copy_to_user(frame, kframe, sizeof(*kframe));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
|
||||
|
||||
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
|
||||
{
|
||||
const unsigned long magic = VFP_MAGIC;
|
||||
const unsigned long size = VFP_STORAGE_SIZE;
|
||||
struct vfp_sigframe kframe;
|
||||
int err = 0;
|
||||
|
||||
__put_user_error(magic, &frame->magic, err);
|
||||
__put_user_error(size, &frame->size, err);
|
||||
memset(&kframe, 0, sizeof(kframe));
|
||||
kframe.magic = VFP_MAGIC;
|
||||
kframe.size = VFP_STORAGE_SIZE;
|
||||
|
||||
err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
return err;
|
||||
|
||||
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
|
||||
return __copy_to_user(frame, &kframe, sizeof(kframe));
|
||||
}
|
||||
|
||||
static int restore_vfp_context(char __user **auxp)
|
||||
@ -288,30 +291,35 @@ static int
|
||||
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
||||
{
|
||||
struct aux_sigframe __user *aux;
|
||||
struct sigcontext context;
|
||||
int err = 0;
|
||||
|
||||
__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
|
||||
__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
|
||||
__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
|
||||
__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
|
||||
__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
|
||||
__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
|
||||
__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
|
||||
__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
|
||||
__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
|
||||
__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
|
||||
__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
|
||||
__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
|
||||
__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
|
||||
__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
|
||||
__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
|
||||
__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
|
||||
__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
|
||||
context = (struct sigcontext) {
|
||||
.arm_r0 = regs->ARM_r0,
|
||||
.arm_r1 = regs->ARM_r1,
|
||||
.arm_r2 = regs->ARM_r2,
|
||||
.arm_r3 = regs->ARM_r3,
|
||||
.arm_r4 = regs->ARM_r4,
|
||||
.arm_r5 = regs->ARM_r5,
|
||||
.arm_r6 = regs->ARM_r6,
|
||||
.arm_r7 = regs->ARM_r7,
|
||||
.arm_r8 = regs->ARM_r8,
|
||||
.arm_r9 = regs->ARM_r9,
|
||||
.arm_r10 = regs->ARM_r10,
|
||||
.arm_fp = regs->ARM_fp,
|
||||
.arm_ip = regs->ARM_ip,
|
||||
.arm_sp = regs->ARM_sp,
|
||||
.arm_lr = regs->ARM_lr,
|
||||
.arm_pc = regs->ARM_pc,
|
||||
.arm_cpsr = regs->ARM_cpsr,
|
||||
|
||||
__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
|
||||
__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
|
||||
__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
|
||||
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
|
||||
.trap_no = current->thread.trap_no,
|
||||
.error_code = current->thread.error_code,
|
||||
.fault_address = current->thread.address,
|
||||
.oldmask = set->sig[0],
|
||||
};
|
||||
|
||||
err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
|
||||
|
||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
||||
if (err == 0)
|
||||
err |= preserve_vfp_context(&aux->vfp);
|
||||
#endif
|
||||
__put_user_error(0, &aux->end_magic, err);
|
||||
err |= __put_user(0, &aux->end_magic);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
||||
/*
|
||||
* Set uc.uc_flags to a value which sc.trap_no would never have.
|
||||
*/
|
||||
__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
|
||||
err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
|
||||
|
||||
err |= setup_sigframe(frame, regs, set);
|
||||
if (err == 0)
|
||||
@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
||||
|
||||
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
||||
|
||||
__put_user_error(0, &frame->sig.uc.uc_flags, err);
|
||||
__put_user_error(NULL, &frame->sig.uc.uc_link, err);
|
||||
err |= __put_user(0, &frame->sig.uc.uc_flags);
|
||||
err |= __put_user(NULL, &frame->sig.uc.uc_link);
|
||||
|
||||
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
|
||||
err |= setup_sigframe(&frame->sig, regs, set);
|
||||
|
@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
||||
int maxevents, int timeout)
|
||||
{
|
||||
struct epoll_event *kbuf;
|
||||
struct oabi_epoll_event e;
|
||||
mm_segment_t fs;
|
||||
long ret, err, i;
|
||||
|
||||
@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
||||
set_fs(fs);
|
||||
err = 0;
|
||||
for (i = 0; i < ret; i++) {
|
||||
__put_user_error(kbuf[i].events, &events->events, err);
|
||||
__put_user_error(kbuf[i].data, &events->data, err);
|
||||
e.events = kbuf[i].events;
|
||||
e.data = kbuf[i].data;
|
||||
err = __copy_to_user(events, &e, sizeof(e));
|
||||
if (err)
|
||||
break;
|
||||
events++;
|
||||
}
|
||||
kfree(kbuf);
|
||||
|
@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
get_thread_info r3
|
||||
ldr r3, [r3, #TI_ADDR_LIMIT]
|
||||
adds ip, r1, r2 @ ip=addr+size
|
||||
sub r3, r3, #1 @ addr_limit - 1
|
||||
cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
|
||||
movcs r1, #0 @ addr = NULL
|
||||
csdb
|
||||
uaccess_mask_range_ptr r1, r2, r3, ip
|
||||
#endif
|
||||
|
||||
#include "copy_template.S"
|
||||
|
@ -94,6 +94,11 @@
|
||||
|
||||
ENTRY(__copy_to_user_std)
|
||||
WEAK(arm_copy_to_user)
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
get_thread_info r3
|
||||
ldr r3, [r3, #TI_ADDR_LIMIT]
|
||||
uaccess_mask_range_ptr r0, r2, r3, ip
|
||||
#endif
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
|
||||
rsb r0, r0, r2
|
||||
copy_abort_end
|
||||
.popsection
|
||||
|
||||
|
@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
n = __copy_to_user_std(to, from, n);
|
||||
uaccess_restore(ua_flags);
|
||||
} else {
|
||||
n = __copy_to_user_memcpy(to, from, n);
|
||||
n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
|
||||
from, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
|
||||
* Save the current VFP state into the provided structures and prepare
|
||||
* for entry into a new function (signal handler).
|
||||
*/
|
||||
int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
||||
struct user_vfp_exc __user *ufp_exc)
|
||||
int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
|
||||
struct user_vfp_exc *ufp_exc)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
||||
int err = 0;
|
||||
|
||||
/* Ensure that the saved hwstate is up-to-date. */
|
||||
vfp_sync_hwstate(thread);
|
||||
@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
||||
* Copy the floating point registers. There can be unused
|
||||
* registers see asm/hwcap.h for details.
|
||||
*/
|
||||
err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
|
||||
sizeof(hwstate->fpregs));
|
||||
memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
|
||||
|
||||
/*
|
||||
* Copy the status and control register.
|
||||
*/
|
||||
__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
|
||||
ufp->fpscr = hwstate->fpscr;
|
||||
|
||||
/*
|
||||
* Copy the exception registers.
|
||||
*/
|
||||
__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
|
||||
__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
|
||||
__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
ufp_exc->fpexc = hwstate->fpexc;
|
||||
ufp_exc->fpinst = hwstate->fpinst;
|
||||
ufp_exc->fpinst2 = ufp_exc->fpinst2;
|
||||
|
||||
/* Ensure that VFP is disabled. */
|
||||
vfp_flush_hwstate(thread);
|
||||
|
@ -1179,7 +1179,7 @@ config LOCKDEP
|
||||
bool
|
||||
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
|
||||
select STACKTRACE
|
||||
select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86
|
||||
select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
|
||||
select KALLSYMS
|
||||
select KALLSYMS_ALL
|
||||
|
||||
@ -1590,7 +1590,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
|
||||
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
|
||||
depends on !X86_64
|
||||
select STACKTRACE
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
|
||||
help
|
||||
Provide stacktrace filter for fault-injection capabilities
|
||||
|
||||
@ -1599,7 +1599,7 @@ config LATENCYTOP
|
||||
depends on DEBUG_KERNEL
|
||||
depends on STACKTRACE_SUPPORT
|
||||
depends on PROC_FS
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
|
||||
select KALLSYMS
|
||||
select KALLSYMS_ALL
|
||||
select STACKTRACE
|
||||
|
Loading…
Reference in New Issue
Block a user