From 31552385f8e9d0869117014bf8e55ba0497e3ec8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:29:27 +0200 Subject: [PATCH] locking/spinlock/rt: Prepare for RT local_lock Add the static and runtime initializer mechanics to support the RT variant of local_lock, which requires the lock type in the lockdep map to be set to LD_LOCK_PERCPU. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211305.967526724@linutronix.de --- include/linux/spinlock_rt.h | 24 ++++++++++++++++-------- include/linux/spinlock_types.h | 6 ++++++ include/linux/spinlock_types_raw.h | 8 ++++++++ kernel/locking/spinlock_rt.c | 7 +++++-- 4 files changed, 35 insertions(+), 10 deletions(-) diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 4fc72199cc9d..835aedaf68ac 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -8,20 +8,28 @@ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key); + struct lock_class_key *key, bool percpu); #else static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) + struct lock_class_key *key, bool percpu) { } #endif -#define spin_lock_init(slock) \ -do { \ - static struct lock_class_key __key; \ - \ - rt_mutex_base_init(&(slock)->lock); \ - __rt_spin_lock_init(slock, #slock, &__key); \ +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, false); \ +} while (0) + +#define local_spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, true); \ } while (0) extern void rt_spin_lock(spinlock_t *lock); diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 8a9aadbaf293..2dfa35ffec76 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -60,6 +60,12 @@ typedef struct spinlock { SPIN_DEP_MAP_INIT(name) \ } +#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + LOCAL_SPIN_DEP_MAP_INIT(name) \ + } + #define DEFINE_SPINLOCK(name) \ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h index a8a4330d7b78..91cb36b65a17 100644 --- a/include/linux/spinlock_types_raw.h +++ b/include/linux/spinlock_types_raw.h @@ -37,9 +37,17 @@ typedef struct raw_spinlock { .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ } + +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + .lock_type = LD_LOCK_PERCPU, \ + } #else # define RAW_SPIN_DEP_MAP_INIT(lockname) # define SPIN_DEP_MAP_INIT(lockname) +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_SPINLOCK diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c index c36648bd765d..d2912e44d61f 100644 --- a/kernel/locking/spinlock_rt.c +++ b/kernel/locking/spinlock_rt.c @@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh); #ifdef CONFIG_DEBUG_LOCK_ALLOC void __rt_spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) + struct lock_class_key *key, bool percpu) { + u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL; + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG); + lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG, + LD_WAIT_INV, type); } EXPORT_SYMBOL(__rt_spin_lock_init); #endif