forked from Minki/linux
* Fix lockdep's detection of "USED" <- "IN-NMI" inversions, from Peter
Zijlstra. * Make percpu-rwsem operations on the semaphore's ->read_count IRQ-safe because it can be used in an IRQ context, from Hou Tao. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAl9nsLQACgkQEsHwGGHe VUo0+g/7B9JzDtRSgchT095VcD8w+YcZTyiJM58q9I9OZMxi1zdJZPyoQZ2xZjnG aczDJN5H6P6OcBm949EUCHhmDEDfoZpC7Y5FEHe9dJitPmC7rRilGJuz4Im8td3N DsLhpFe8KUSqRVyygjvjM393md8tw+m4Jq+syjWri4/1wj1Fs4jHdhKWgz6b2cup JXbfjgVOkHVOTloMHnmgdHOPvkh60/LoG8r/5gzLbD8Z/FTD3BSOCTgw+8L8EB+b yiWWuEcR5LDjy7sNY2xVhTB+nkHJXe4o+HVufZoAzd1j7FDLfDVSNaZHJVp2m7Gp W47xRrzIKIJl6DUp5E4TGi9aZvkO/h5kqSOZ4MhfaeEqw9DLW/KxbherJjzVY3bb Nt74N+N+WOq6riTVx5wJkDRmtT5RaeW1kKJUaSeGMl3fxCken5CKE1WyBbae0GiU kq3tCn4t0OKCWhuiixFNg6RxuXokjfzXDiHr9aUt3x5musc9P+5YUIdOMsaHwoAH 0YrAZoRCPyE2ABUgKC8hoTc3Y+TIwji7c0B9S2GoHRTm1LZTRURlLOUQyJ4oU9Ez Vt5TILwre9lX9tTW7BlxlOCYkgRQn78vLBSOy1CEUTsORbdYnv/kCy5S5yHEILpT f3E345wzdd8zrXGQfX2jsVgNAx0F+81Fb9H9FMJSCFgjIya+SRM= =70QQ -----END PGP SIGNATURE----- Merge tag 'locking_urgent_for_v5.9_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fixes from Borislav Petkov: "Two fixes from the locking/urgent pile: - Fix lockdep's detection of "USED" <- "IN-NMI" inversions (Peter Zijlstra) - Make percpu-rwsem operations on the semaphore's ->read_count IRQ-safe because it can be used in an IRQ context (Hou Tao)" * tag 'locking_urgent_for_v5.9_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/percpu-rwsem: Use this_cpu_{inc,dec}() for read_count locking/lockdep: Fix "USED" <- "IN-NMI" inversions
This commit is contained in:
commit
3d491679b8
@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
|
||||
* anything we did within this RCU-sched read-size critical section.
|
||||
*/
|
||||
if (likely(rcu_sync_is_idle(&sem->rss)))
|
||||
__this_cpu_inc(*sem->read_count);
|
||||
this_cpu_inc(*sem->read_count);
|
||||
else
|
||||
__percpu_down_read(sem, false); /* Unconditional memory barrier */
|
||||
/*
|
||||
@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
* Same as in percpu_down_read().
|
||||
*/
|
||||
if (likely(rcu_sync_is_idle(&sem->rss)))
|
||||
__this_cpu_inc(*sem->read_count);
|
||||
this_cpu_inc(*sem->read_count);
|
||||
else
|
||||
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
|
||||
preempt_enable();
|
||||
@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
||||
* Same as in percpu_down_read().
|
||||
*/
|
||||
if (likely(rcu_sync_is_idle(&sem->rss))) {
|
||||
__this_cpu_dec(*sem->read_count);
|
||||
this_cpu_dec(*sem->read_count);
|
||||
} else {
|
||||
/*
|
||||
* slowpath; reader will only ever wake a single blocked
|
||||
@ -115,7 +115,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
|
||||
* aggregate zero, as that is the only time it matters) they
|
||||
* will also see our critical section.
|
||||
*/
|
||||
__this_cpu_dec(*sem->read_count);
|
||||
this_cpu_dec(*sem->read_count);
|
||||
rcuwait_wake_up(&sem->writer);
|
||||
}
|
||||
preempt_enable();
|
||||
|
@ -3969,13 +3969,18 @@ static int separate_irq_context(struct task_struct *curr,
|
||||
static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
enum lock_usage_bit new_bit)
|
||||
{
|
||||
unsigned int new_mask = 1 << new_bit, ret = 1;
|
||||
unsigned int old_mask, new_mask, ret = 1;
|
||||
|
||||
if (new_bit >= LOCK_USAGE_STATES) {
|
||||
DEBUG_LOCKS_WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (new_bit == LOCK_USED && this->read)
|
||||
new_bit = LOCK_USED_READ;
|
||||
|
||||
new_mask = 1 << new_bit;
|
||||
|
||||
/*
|
||||
* If already set then do not dirty the cacheline,
|
||||
* nor do any checks:
|
||||
@ -3988,13 +3993,22 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
/*
|
||||
* Make sure we didn't race:
|
||||
*/
|
||||
if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
|
||||
graph_unlock();
|
||||
return 1;
|
||||
}
|
||||
if (unlikely(hlock_class(this)->usage_mask & new_mask))
|
||||
goto unlock;
|
||||
|
||||
old_mask = hlock_class(this)->usage_mask;
|
||||
hlock_class(this)->usage_mask |= new_mask;
|
||||
|
||||
/*
|
||||
* Save one usage_traces[] entry and map both LOCK_USED and
|
||||
* LOCK_USED_READ onto the same entry.
|
||||
*/
|
||||
if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
|
||||
if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
|
||||
goto unlock;
|
||||
new_bit = LOCK_USED;
|
||||
}
|
||||
|
||||
if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
|
||||
return 0;
|
||||
|
||||
@ -4008,6 +4022,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
return 0;
|
||||
}
|
||||
|
||||
unlock:
|
||||
graph_unlock();
|
||||
|
||||
/*
|
||||
@ -4942,12 +4957,20 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
|
||||
{
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
struct lock_class *class = look_up_lock_class(lock, subclass);
|
||||
unsigned long mask = LOCKF_USED;
|
||||
|
||||
/* if it doesn't have a class (yet), it certainly hasn't been used yet */
|
||||
if (!class)
|
||||
return;
|
||||
|
||||
if (!(class->usage_mask & LOCK_USED))
|
||||
/*
|
||||
* READ locks only conflict with USED, such that if we only ever use
|
||||
* READ locks, there is no deadlock possible -- RCU.
|
||||
*/
|
||||
if (!hlock->read)
|
||||
mask |= LOCKF_USED_READ;
|
||||
|
||||
if (!(class->usage_mask & mask))
|
||||
return;
|
||||
|
||||
hlock->class_idx = class - lock_classes;
|
||||
|
@ -19,6 +19,7 @@ enum lock_usage_bit {
|
||||
#include "lockdep_states.h"
|
||||
#undef LOCKDEP_STATE
|
||||
LOCK_USED,
|
||||
LOCK_USED_READ,
|
||||
LOCK_USAGE_STATES
|
||||
};
|
||||
|
||||
@ -40,6 +41,7 @@ enum {
|
||||
#include "lockdep_states.h"
|
||||
#undef LOCKDEP_STATE
|
||||
__LOCKF(USED)
|
||||
__LOCKF(USED_READ)
|
||||
};
|
||||
|
||||
#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
|
||||
|
@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(percpu_free_rwsem);
|
||||
|
||||
static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
{
|
||||
__this_cpu_inc(*sem->read_count);
|
||||
this_cpu_inc(*sem->read_count);
|
||||
|
||||
/*
|
||||
* Due to having preemption disabled the decrement happens on
|
||||
@ -71,7 +71,7 @@ static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
if (likely(!atomic_read_acquire(&sem->block)))
|
||||
return true;
|
||||
|
||||
__this_cpu_dec(*sem->read_count);
|
||||
this_cpu_dec(*sem->read_count);
|
||||
|
||||
/* Prod writer to re-evaluate readers_active_check() */
|
||||
rcuwait_wake_up(&sem->writer);
|
||||
|
Loading…
Reference in New Issue
Block a user