mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 05:41:55 +00:00
819771cc28
In subsequent patches we'll alter the structure and usage of struct exception_table_entry. For inline assembly, we create these using the `_ASM_EXTABLE()` CPP macro defined in <asm/uaccess.h>, and for plain assembly code we use the `_asm_extable()` GAS macro defined in <asm/assembler.h>, which are largely identical save for different escaping and stringification requirements. This patch moves the common definitions to a new <asm/asm-extable.h> header, so that it's easier to keep the two in-sync, and to remove the implication that these are only used for uaccess helpers (as e.g. load_unaligned_zeropad() is only used on kernel memory, and depends upon `_ASM_EXTABLE()`. At the same time, a few minor modifications are made for clarity and in preparation for subsequent patches: * The structure creation is factored out into an `__ASM_EXTABLE_RAW()` macro. This will make it easier to support different fixup variants in subsequent patches without needing to update all users of `_ASM_EXTABLE()`, and makes it easier to see tha the CPP and GAS variants of the macros are structurally identical. For the CPP macro, the stringification of fields is left to the wrapper macro, `_ASM_EXTABLE()`, as in subsequent patches it will be necessary to stringify fields in wrapper macros to safely concatenate strings which cannot be token-pasted together in CPP. * The fields of the structure are created separately on their own lines. This will make it easier to add/remove/modify individual fields clearly. * Additional parentheses are added around the use of macro arguments in field definitions to avoid any potential problems with evaluation due to operator precedence, and to make errors upon misuse clearer. * USER() is moved into <asm/asm-uaccess.h>, as it is not required by all assembly code, and is already refered to by comments in that file. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20211019160219.5202-8-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
56 lines
1.2 KiB
ArmAsm
56 lines
1.2 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2021 Arm Ltd.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/asm-uaccess.h>
|
|
|
|
.text
|
|
|
|
/* Prototype: int __arch_clear_user(void *addr, size_t sz)
|
|
* Purpose : clear some user memory
|
|
* Params : addr - user memory address to clear
|
|
* : sz - number of bytes to clear
|
|
* Returns : number of bytes NOT cleared
|
|
*
|
|
* Alignment fixed up by hardware.
|
|
*/
|
|
|
|
.p2align 4
|
|
// Alignment is for the loop, but since the prologue (including BTI)
|
|
// is also 16 bytes we can keep any padding outside the function
|
|
SYM_FUNC_START(__arch_clear_user)
|
|
add x2, x0, x1
|
|
subs x1, x1, #8
|
|
b.mi 2f
|
|
1:
|
|
USER(9f, sttr xzr, [x0])
|
|
add x0, x0, #8
|
|
subs x1, x1, #8
|
|
b.hi 1b
|
|
USER(9f, sttr xzr, [x2, #-8])
|
|
mov x0, #0
|
|
ret
|
|
|
|
2: tbz x1, #2, 3f
|
|
USER(9f, sttr wzr, [x0])
|
|
USER(8f, sttr wzr, [x2, #-4])
|
|
mov x0, #0
|
|
ret
|
|
|
|
3: tbz x1, #1, 4f
|
|
USER(9f, sttrh wzr, [x0])
|
|
4: tbz x1, #0, 5f
|
|
USER(7f, sttrb wzr, [x2, #-1])
|
|
5: mov x0, #0
|
|
ret
|
|
|
|
// Exception fixups
|
|
7: sub x0, x2, #5 // Adjust for faulting on the final byte...
|
|
8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
|
|
9: sub x0, x2, x0
|
|
ret
|
|
SYM_FUNC_END(__arch_clear_user)
|
|
EXPORT_SYMBOL(__arch_clear_user)
|