mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 00:52:01 +00:00
59d1ff3bfb
save_and_disable_irqs does not need to use mov + msr (which was introduced to work around a documentation bug which was propagated into binutils.) Use msr with an immediate constant, and if we're building for ARMv6 or later, use the new CPSID instruction. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
67 lines
1.3 KiB
C
67 lines
1.3 KiB
C
#include <linux/config.h>
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K)
|
|
.macro bitop, instr
|
|
mov r2, #1
|
|
and r3, r0, #7 @ Get bit offset
|
|
add r1, r1, r0, lsr #3 @ Get byte offset
|
|
mov r3, r2, lsl r3
|
|
1: ldrexb r2, [r1]
|
|
\instr r2, r2, r3
|
|
strexb r0, r2, [r1]
|
|
cmp r0, #0
|
|
bne 1b
|
|
mov pc, lr
|
|
.endm
|
|
|
|
.macro testop, instr, store
|
|
and r3, r0, #7 @ Get bit offset
|
|
mov r2, #1
|
|
add r1, r1, r0, lsr #3 @ Get byte offset
|
|
mov r3, r2, lsl r3 @ create mask
|
|
1: ldrexb r2, [r1]
|
|
ands r0, r2, r3 @ save old value of bit
|
|
\instr r2, r2, r3 @ toggle bit
|
|
strexb ip, r2, [r1]
|
|
cmp ip, #0
|
|
bne 1b
|
|
cmp r0, #0
|
|
movne r0, #1
|
|
2: mov pc, lr
|
|
.endm
|
|
#else
|
|
.macro bitop, instr
|
|
and r2, r0, #7
|
|
mov r3, #1
|
|
mov r3, r3, lsl r2
|
|
save_and_disable_irqs ip
|
|
ldrb r2, [r1, r0, lsr #3]
|
|
\instr r2, r2, r3
|
|
strb r2, [r1, r0, lsr #3]
|
|
restore_irqs ip
|
|
mov pc, lr
|
|
.endm
|
|
|
|
/**
|
|
* testop - implement a test_and_xxx_bit operation.
|
|
* @instr: operational instruction
|
|
* @store: store instruction
|
|
*
|
|
* Note: we can trivially conditionalise the store instruction
|
|
* to avoid dirting the data cache.
|
|
*/
|
|
.macro testop, instr, store
|
|
add r1, r1, r0, lsr #3
|
|
and r3, r0, #7
|
|
mov r0, #1
|
|
save_and_disable_irqs ip
|
|
ldrb r2, [r1]
|
|
tst r2, r0, lsl r3
|
|
\instr r2, r2, r0, lsl r3
|
|
\store r2, [r1]
|
|
restore_irqs ip
|
|
moveq r0, #0
|
|
mov pc, lr
|
|
.endm
|
|
#endif
|