mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
de8f5e4f2d
Extend lockdep to validate lock wait-type context. The current wait-types are: LD_WAIT_FREE, /* wait free, rcu etc.. */ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ Where lockdep validates that the current lock (the one being acquired) fits in the current wait-context (as generated by the held stack). This ensures that there is no attempt to acquire mutexes while holding spinlocks, to acquire spinlocks while holding raw_spinlocks and so on. In other words, its a more fancy might_sleep(). Obviously RCU made the entire ordeal more complex than a simple single value test because RCU can be acquired in (pretty much) any context and while it presents a context to nested locks it is not the same as it got acquired in. Therefore its necessary to split the wait_type into two values, one representing the acquire (outer) and one representing the nested context (inner). For most 'normal' locks these two are the same. [ To make static initialization easier we have the rule that: .outer == INV means .outer == .inner; because INV == 0. ] It further means that its required to find the minimal .inner of the held stack to compare against the outer of the new lock; because while 'normal' RCU presents a CONFIG type to nested locks, if it is taken while already holding a SPIN type it obviously doesn't relax the rules. Below is an example output generated by the trivial test code: raw_spin_lock(&foo); spin_lock(&bar); spin_unlock(&bar); raw_spin_unlock(&foo); [ BUG: Invalid wait context ] ----------------------------- swapper/0/1 is trying to lock: ffffc90000013f20 (&bar){....}-{3:3}, at: kernel_init+0xdb/0x187 other info that might help us debug this: 1 lock held by swapper/0/1: #0: ffffc90000013ee0 (&foo){+.+.}-{2:2}, at: kernel_init+0xd1/0x187 The way to read it is to look at the new -{n,m} part in the lock description; -{3:3} for the attempted lock, and try and match that up to the held locks, which in this case is the one: -{2,2}. This tells that the acquiring lock requires a more relaxed environment than presented by the lock stack. Currently only the normal locks and RCU are converted, the rest of the lockdep users defaults to .inner = INV which is ignored. More conversions can be done when desired. The check for spinlock_t nesting is not enabled by default. It's a separate config option for now as there are known problems which are currently addressed. The config option allows to identify these problems and to verify that the solutions found are indeed solving them. The config switch will be removed and the checks will permanently enabled once the vast majority of issues has been addressed. [ bigeasy: Move LD_WAIT_FREE,… out of CONFIG_LOCKDEP to avoid compile failure with CONFIG_DEBUG_SPINLOCK + !CONFIG_LOCKDEP] [ tglx: Add the config option ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200321113242.427089655@linutronix.de
231 lines
5.7 KiB
C
231 lines
5.7 KiB
C
/*
|
|
* Copyright 2005, Red Hat, Inc., Ingo Molnar
|
|
* Released under the General Public License (GPL).
|
|
*
|
|
* This file contains the spinlock/rwlock implementations for
|
|
* DEBUG_SPINLOCK.
|
|
*/
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/nmi.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/debug_locks.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/export.h>
|
|
|
|
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
|
|
struct lock_class_key *key, short inner)
|
|
{
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
/*
|
|
* Make sure we are not reinitializing a held lock:
|
|
*/
|
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
|
lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
|
|
#endif
|
|
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
lock->magic = SPINLOCK_MAGIC;
|
|
lock->owner = SPINLOCK_OWNER_INIT;
|
|
lock->owner_cpu = -1;
|
|
}
|
|
|
|
EXPORT_SYMBOL(__raw_spin_lock_init);
|
|
|
|
void __rwlock_init(rwlock_t *lock, const char *name,
|
|
struct lock_class_key *key)
|
|
{
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
/*
|
|
* Make sure we are not reinitializing a held lock:
|
|
*/
|
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
|
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
|
|
#endif
|
|
lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
|
|
lock->magic = RWLOCK_MAGIC;
|
|
lock->owner = SPINLOCK_OWNER_INIT;
|
|
lock->owner_cpu = -1;
|
|
}
|
|
|
|
EXPORT_SYMBOL(__rwlock_init);
|
|
|
|
static void spin_dump(raw_spinlock_t *lock, const char *msg)
|
|
{
|
|
struct task_struct *owner = READ_ONCE(lock->owner);
|
|
|
|
if (owner == SPINLOCK_OWNER_INIT)
|
|
owner = NULL;
|
|
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
|
|
msg, raw_smp_processor_id(),
|
|
current->comm, task_pid_nr(current));
|
|
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
|
|
".owner_cpu: %d\n",
|
|
lock, READ_ONCE(lock->magic),
|
|
owner ? owner->comm : "<none>",
|
|
owner ? task_pid_nr(owner) : -1,
|
|
READ_ONCE(lock->owner_cpu));
|
|
dump_stack();
|
|
}
|
|
|
|
static void spin_bug(raw_spinlock_t *lock, const char *msg)
|
|
{
|
|
if (!debug_locks_off())
|
|
return;
|
|
|
|
spin_dump(lock, msg);
|
|
}
|
|
|
|
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
|
|
|
|
static inline void
|
|
debug_spin_lock_before(raw_spinlock_t *lock)
|
|
{
|
|
SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
|
|
SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
|
|
SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
|
|
lock, "cpu recursion");
|
|
}
|
|
|
|
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
|
|
{
|
|
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
|
|
WRITE_ONCE(lock->owner, current);
|
|
}
|
|
|
|
static inline void debug_spin_unlock(raw_spinlock_t *lock)
|
|
{
|
|
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
|
|
SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
|
|
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
|
|
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
|
|
lock, "wrong CPU");
|
|
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
|
|
WRITE_ONCE(lock->owner_cpu, -1);
|
|
}
|
|
|
|
/*
|
|
* We are now relying on the NMI watchdog to detect lockup instead of doing
|
|
* the detection here with an unfair lock which can cause problem of its own.
|
|
*/
|
|
void do_raw_spin_lock(raw_spinlock_t *lock)
|
|
{
|
|
debug_spin_lock_before(lock);
|
|
arch_spin_lock(&lock->raw_lock);
|
|
mmiowb_spin_lock();
|
|
debug_spin_lock_after(lock);
|
|
}
|
|
|
|
int do_raw_spin_trylock(raw_spinlock_t *lock)
|
|
{
|
|
int ret = arch_spin_trylock(&lock->raw_lock);
|
|
|
|
if (ret) {
|
|
mmiowb_spin_lock();
|
|
debug_spin_lock_after(lock);
|
|
}
|
|
#ifndef CONFIG_SMP
|
|
/*
|
|
* Must not happen on UP:
|
|
*/
|
|
SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
void do_raw_spin_unlock(raw_spinlock_t *lock)
|
|
{
|
|
mmiowb_spin_unlock();
|
|
debug_spin_unlock(lock);
|
|
arch_spin_unlock(&lock->raw_lock);
|
|
}
|
|
|
|
static void rwlock_bug(rwlock_t *lock, const char *msg)
|
|
{
|
|
if (!debug_locks_off())
|
|
return;
|
|
|
|
printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
|
|
msg, raw_smp_processor_id(), current->comm,
|
|
task_pid_nr(current), lock);
|
|
dump_stack();
|
|
}
|
|
|
|
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
|
|
|
|
void do_raw_read_lock(rwlock_t *lock)
|
|
{
|
|
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
|
|
arch_read_lock(&lock->raw_lock);
|
|
}
|
|
|
|
int do_raw_read_trylock(rwlock_t *lock)
|
|
{
|
|
int ret = arch_read_trylock(&lock->raw_lock);
|
|
|
|
#ifndef CONFIG_SMP
|
|
/*
|
|
* Must not happen on UP:
|
|
*/
|
|
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
void do_raw_read_unlock(rwlock_t *lock)
|
|
{
|
|
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
|
|
arch_read_unlock(&lock->raw_lock);
|
|
}
|
|
|
|
static inline void debug_write_lock_before(rwlock_t *lock)
|
|
{
|
|
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
|
|
RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
|
|
RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
|
|
lock, "cpu recursion");
|
|
}
|
|
|
|
static inline void debug_write_lock_after(rwlock_t *lock)
|
|
{
|
|
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
|
|
WRITE_ONCE(lock->owner, current);
|
|
}
|
|
|
|
static inline void debug_write_unlock(rwlock_t *lock)
|
|
{
|
|
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
|
|
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
|
|
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
|
|
lock, "wrong CPU");
|
|
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
|
|
WRITE_ONCE(lock->owner_cpu, -1);
|
|
}
|
|
|
|
void do_raw_write_lock(rwlock_t *lock)
|
|
{
|
|
debug_write_lock_before(lock);
|
|
arch_write_lock(&lock->raw_lock);
|
|
debug_write_lock_after(lock);
|
|
}
|
|
|
|
int do_raw_write_trylock(rwlock_t *lock)
|
|
{
|
|
int ret = arch_write_trylock(&lock->raw_lock);
|
|
|
|
if (ret)
|
|
debug_write_lock_after(lock);
|
|
#ifndef CONFIG_SMP
|
|
/*
|
|
* Must not happen on UP:
|
|
*/
|
|
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
void do_raw_write_unlock(rwlock_t *lock)
|
|
{
|
|
debug_write_unlock(lock);
|
|
arch_write_unlock(&lock->raw_lock);
|
|
}
|