parisc: Fix lightweight spinlock checks to not break futexes

The lightweight spinlock checks verify that a spinlock has either value
0 (spinlock locked) and that not any other bits than in
__ARCH_SPIN_LOCK_UNLOCKED_VAL is set.

This breaks the current LWS code, which writes the address of the lock
into the lock word to unlock it, which was an optimization to save one
assembler instruction.

Fix it by making spinlock_types.h accessible for asm code, change the
LWS spinlock-unlocking code to write __ARCH_SPIN_LOCK_UNLOCKED_VAL into
the lock word, and add some missing lightweight spinlock checks to the
LWS path. Finally, make the spinlock checks dependend on DEBUG_KERNEL.

Noticed-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
Tested-by: John David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # v6.4+
Fixes: 15e64ef652 ("parisc: Add lightweight spinlock checks")
This commit is contained in:
Helge Deller 2023-08-09 09:21:58 +02:00
parent a027b2eca0
commit a0f4b7879f
4 changed files with 27 additions and 6 deletions

View File

@ -2,7 +2,7 @@
#
config LIGHTWEIGHT_SPINLOCK_CHECK
bool "Enable lightweight spinlock checks"
depends on SMP && !DEBUG_SPINLOCK
depends on DEBUG_KERNEL && SMP && !DEBUG_SPINLOCK
default y
help
Add checks with low performance impact to the spinlock functions

View File

@ -7,8 +7,6 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
static inline void arch_spin_val_check(int lock_val)
{
if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))

View File

@ -4,6 +4,10 @@
#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
#ifndef __ASSEMBLY__
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
@ -27,6 +31,8 @@ typedef struct {
volatile unsigned int counter;
} arch_rwlock_t;
#endif /* __ASSEMBLY__ */
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
.counter = __ARCH_RW_LOCK_UNLOCKED__ }

View File

@ -39,6 +39,7 @@ registers).
#include <asm/assembly.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/spinlock_types.h>
#include <linux/linkage.h>
@ -66,6 +67,16 @@ registers).
stw \reg1, 0(%sr2,\reg2)
.endm
/* raise exception if spinlock content is not zero or
* __ARCH_SPIN_LOCK_UNLOCKED_VAL */
.macro spinlock_check spin_val,tmpreg
#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg
andcm,= \spin_val, \tmpreg, %r0
.word SPINLOCK_BREAK_INSN
#endif
.endm
.text
.import syscall_exit,code
@ -508,7 +519,8 @@ lws_start:
lws_exit_noerror:
lws_pagefault_enable %r1,%r21
stw,ma %r20, 0(%sr2,%r20)
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
b lws_exit
copy %r0, %r21
@ -521,7 +533,8 @@ lws_wouldblock:
lws_pagefault:
lws_pagefault_enable %r1,%r21
stw,ma %r20, 0(%sr2,%r20)
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 3(%r0),%r28
b lws_exit
@ -619,6 +632,7 @@ lws_compare_and_swap:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -772,6 +786,7 @@ cas2_lock_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -1001,6 +1016,7 @@ atomic_xchg_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -1199,6 +1215,7 @@ atomic_store_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -1330,7 +1347,7 @@ ENTRY(lws_lock_start)
/* lws locks */
.rept 256
/* Keep locks aligned at 16-bytes */
.word 1
.word __ARCH_SPIN_LOCK_UNLOCKED_VAL
.word 0
.word 0
.word 0