Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
This commit is contained in:
@@ -138,7 +138,7 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
|
||||
* get freed - this significantly simplifies the debugging code.
|
||||
*/
|
||||
unsigned long nr_lock_classes;
|
||||
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
|
||||
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
|
||||
|
||||
static inline struct lock_class *hlock_class(struct held_lock *hlock)
|
||||
{
|
||||
@@ -1391,7 +1391,9 @@ static void print_lock_class_header(struct lock_class *class, int depth)
|
||||
|
||||
printk("%*s->", depth, "");
|
||||
print_lock_name(class);
|
||||
printk(KERN_CONT " ops: %lu", class->ops);
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
|
||||
#endif
|
||||
printk(KERN_CONT " {\n");
|
||||
|
||||
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
|
||||
@@ -2147,76 +2149,6 @@ static int check_no_collision(struct task_struct *curr,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is for building a chain between just two different classes,
|
||||
* instead of adding a new hlock upon current, which is done by
|
||||
* add_chain_cache().
|
||||
*
|
||||
* This can be called in any context with two classes, while
|
||||
* add_chain_cache() must be done within the lock owener's context
|
||||
* since it uses hlock which might be racy in another context.
|
||||
*/
|
||||
static inline int add_chain_cache_classes(unsigned int prev,
|
||||
unsigned int next,
|
||||
unsigned int irq_context,
|
||||
u64 chain_key)
|
||||
{
|
||||
struct hlist_head *hash_head = chainhashentry(chain_key);
|
||||
struct lock_chain *chain;
|
||||
|
||||
/*
|
||||
* Allocate a new chain entry from the static array, and add
|
||||
* it to the hash:
|
||||
*/
|
||||
|
||||
/*
|
||||
* We might need to take the graph lock, ensure we've got IRQs
|
||||
* disabled to make this an IRQ-safe lock.. for recursion reasons
|
||||
* lockdep won't complain about its own locking errors.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
|
||||
dump_stack();
|
||||
return 0;
|
||||
}
|
||||
|
||||
chain = lock_chains + nr_lock_chains++;
|
||||
chain->chain_key = chain_key;
|
||||
chain->irq_context = irq_context;
|
||||
chain->depth = 2;
|
||||
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
|
||||
chain->base = nr_chain_hlocks;
|
||||
nr_chain_hlocks += chain->depth;
|
||||
chain_hlocks[chain->base] = prev - 1;
|
||||
chain_hlocks[chain->base + 1] = next -1;
|
||||
}
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
/*
|
||||
* Important for check_no_collision().
|
||||
*/
|
||||
else {
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
|
||||
dump_stack();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
hlist_add_head_rcu(&chain->entry, hash_head);
|
||||
debug_atomic_inc(chain_lookup_misses);
|
||||
inc_chains();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adds a dependency chain into chain hashtable. And must be called with
|
||||
* graph_lock held.
|
||||
@@ -3262,6 +3194,10 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
|
||||
/*
|
||||
* This gets called for every mutex_lock*()/spin_lock*() operation.
|
||||
* We maintain the dependency maps and validate the locking attempt:
|
||||
*
|
||||
* The callers must make sure that IRQs are disabled before calling it,
|
||||
* otherwise we could get an interrupt which would want to take locks,
|
||||
* which would end up in lockdep again.
|
||||
*/
|
||||
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
int trylock, int read, int check, int hardirqs_off,
|
||||
@@ -3279,14 +3215,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Lockdep should run with IRQs disabled, otherwise we could
|
||||
* get an interrupt which would want to take locks, which would
|
||||
* end up in lockdep and have you got a head-ache already?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
if (!prove_locking || lock->key == &__lockdep_no_validate__)
|
||||
check = 0;
|
||||
|
||||
@@ -3300,7 +3228,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
if (!class)
|
||||
return 0;
|
||||
}
|
||||
atomic_inc((atomic_t *)&class->ops);
|
||||
|
||||
debug_class_ops_inc(class);
|
||||
|
||||
if (very_verbose(class)) {
|
||||
printk("\nacquire class [%px] %s", class->key, class->name);
|
||||
if (class->name_version > 1)
|
||||
@@ -3543,6 +3473,9 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
|
||||
{
|
||||
struct held_lock *hlock;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
|
||||
if (!__lock_acquire(hlock->instance,
|
||||
hlock_class(hlock)->subclass,
|
||||
@@ -3696,6 +3629,13 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
||||
curr->lockdep_depth = i;
|
||||
curr->curr_chain_key = hlock->prev_chain_key;
|
||||
|
||||
/*
|
||||
* The most likely case is when the unlock is on the innermost
|
||||
* lock. In this case, we are done!
|
||||
*/
|
||||
if (i == depth-1)
|
||||
return 1;
|
||||
|
||||
if (reacquire_held_locks(curr, depth, i + 1))
|
||||
return 0;
|
||||
|
||||
@@ -3703,10 +3643,14 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
||||
* We had N bottles of beer on the wall, we drank one, but now
|
||||
* there's not N-1 bottles of beer left on the wall...
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
|
||||
return 0;
|
||||
DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
|
||||
|
||||
return 1;
|
||||
/*
|
||||
* Since reacquire_held_locks() would have called check_chain_key()
|
||||
* indirectly via __lock_acquire(), we don't need to do it again
|
||||
* on return.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __lock_is_held(const struct lockdep_map *lock, int read)
|
||||
@@ -4122,7 +4066,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(!lock_stat))
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
@@ -4142,7 +4086,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(!lock_stat))
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
|
||||
@@ -152,9 +152,15 @@ struct lockdep_stats {
|
||||
int nr_find_usage_forwards_recursions;
|
||||
int nr_find_usage_backwards_checks;
|
||||
int nr_find_usage_backwards_recursions;
|
||||
|
||||
/*
|
||||
* Per lock class locking operation stat counts
|
||||
*/
|
||||
unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
|
||||
extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
|
||||
|
||||
#define __debug_atomic_inc(ptr) \
|
||||
this_cpu_inc(lockdep_stats.ptr);
|
||||
@@ -179,9 +185,30 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
|
||||
} \
|
||||
__total; \
|
||||
})
|
||||
|
||||
static inline void debug_class_ops_inc(struct lock_class *class)
|
||||
{
|
||||
int idx;
|
||||
|
||||
idx = class - lock_classes;
|
||||
__debug_atomic_inc(lock_class_ops[idx]);
|
||||
}
|
||||
|
||||
static inline unsigned long debug_class_ops_read(struct lock_class *class)
|
||||
{
|
||||
int idx, cpu;
|
||||
unsigned long ops = 0;
|
||||
|
||||
idx = class - lock_classes;
|
||||
for_each_possible_cpu(cpu)
|
||||
ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
|
||||
return ops;
|
||||
}
|
||||
|
||||
#else
|
||||
# define __debug_atomic_inc(ptr) do { } while (0)
|
||||
# define debug_atomic_inc(ptr) do { } while (0)
|
||||
# define debug_atomic_dec(ptr) do { } while (0)
|
||||
# define debug_atomic_read(ptr) 0
|
||||
# define debug_class_ops_inc(ptr) do { } while (0)
|
||||
#endif
|
||||
|
||||
@@ -68,7 +68,7 @@ static int l_show(struct seq_file *m, void *v)
|
||||
|
||||
seq_printf(m, "%p", class->key);
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
seq_printf(m, " OPS:%8ld", class->ops);
|
||||
seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
|
||||
#endif
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
|
||||
|
||||
@@ -74,12 +74,24 @@
|
||||
*/
|
||||
|
||||
#include "mcs_spinlock.h"
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
#define MAX_NODES 8
|
||||
#else
|
||||
#define MAX_NODES 4
|
||||
|
||||
/*
|
||||
* On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in
|
||||
* size and four of them will fit nicely in one 64-byte cacheline. For
|
||||
* pvqspinlock, however, we need more space for extra data. To accommodate
|
||||
* that, we insert two more long words to pad it up to 32 bytes. IOW, only
|
||||
* two of them can fit in a cacheline in this case. That is OK as it is rare
|
||||
* to have more than 2 levels of slowpath nesting in actual use. We don't
|
||||
* want to penalize pvqspinlocks to optimize for a rare case in native
|
||||
* qspinlocks.
|
||||
*/
|
||||
struct qnode {
|
||||
struct mcs_spinlock mcs;
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
long reserved[2];
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* The pending bit spinning loop count.
|
||||
@@ -101,7 +113,7 @@
|
||||
*
|
||||
* PV doubles the storage and uses the second cacheline for PV state.
|
||||
*/
|
||||
static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
|
||||
static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]);
|
||||
|
||||
/*
|
||||
* We must be able to distinguish between no-tail and the tail at 0:0,
|
||||
@@ -126,7 +138,13 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
|
||||
int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
|
||||
int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
|
||||
|
||||
return per_cpu_ptr(&mcs_nodes[idx], cpu);
|
||||
return per_cpu_ptr(&qnodes[idx].mcs, cpu);
|
||||
}
|
||||
|
||||
static inline __pure
|
||||
struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx)
|
||||
{
|
||||
return &((struct qnode *)base + idx)->mcs;
|
||||
}
|
||||
|
||||
#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
|
||||
@@ -231,6 +249,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
|
||||
}
|
||||
#endif /* _Q_PENDING_BITS == 8 */
|
||||
|
||||
/**
|
||||
* queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
|
||||
* @lock : Pointer to queued spinlock structure
|
||||
* Return: The previous lock value
|
||||
*
|
||||
* *,*,* -> *,1,*
|
||||
*/
|
||||
#ifndef queued_fetch_set_pending_acquire
|
||||
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
|
||||
{
|
||||
return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* set_locked - Set the lock bit and own the lock
|
||||
* @lock: Pointer to queued spinlock structure
|
||||
@@ -326,43 +358,48 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
/*
|
||||
* trylock || pending
|
||||
*
|
||||
* 0,0,0 -> 0,0,1 ; trylock
|
||||
* 0,0,1 -> 0,1,1 ; pending
|
||||
* 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
|
||||
*/
|
||||
val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
|
||||
if (!(val & ~_Q_LOCKED_MASK)) {
|
||||
/*
|
||||
* We're pending, wait for the owner to go away.
|
||||
*
|
||||
* *,1,1 -> *,1,0
|
||||
*
|
||||
* this wait loop must be a load-acquire such that we match the
|
||||
* store-release that clears the locked bit and create lock
|
||||
* sequentiality; this is because not all
|
||||
* clear_pending_set_locked() implementations imply full
|
||||
* barriers.
|
||||
*/
|
||||
if (val & _Q_LOCKED_MASK) {
|
||||
atomic_cond_read_acquire(&lock->val,
|
||||
!(VAL & _Q_LOCKED_MASK));
|
||||
}
|
||||
val = queued_fetch_set_pending_acquire(lock);
|
||||
|
||||
/*
|
||||
* take ownership and clear the pending bit.
|
||||
*
|
||||
* *,1,0 -> *,0,1
|
||||
*/
|
||||
clear_pending_set_locked(lock);
|
||||
qstat_inc(qstat_lock_pending, true);
|
||||
return;
|
||||
/*
|
||||
* If we observe contention, there is a concurrent locker.
|
||||
*
|
||||
* Undo and queue; our setting of PENDING might have made the
|
||||
* n,0,0 -> 0,0,0 transition fail and it will now be waiting
|
||||
* on @next to become !NULL.
|
||||
*/
|
||||
if (unlikely(val & ~_Q_LOCKED_MASK)) {
|
||||
|
||||
/* Undo PENDING if we set it. */
|
||||
if (!(val & _Q_PENDING_MASK))
|
||||
clear_pending(lock);
|
||||
|
||||
goto queue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If pending was clear but there are waiters in the queue, then
|
||||
* we need to undo our setting of pending before we queue ourselves.
|
||||
* We're pending, wait for the owner to go away.
|
||||
*
|
||||
* 0,1,1 -> 0,1,0
|
||||
*
|
||||
* this wait loop must be a load-acquire such that we match the
|
||||
* store-release that clears the locked bit and create lock
|
||||
* sequentiality; this is because not all
|
||||
* clear_pending_set_locked() implementations imply full
|
||||
* barriers.
|
||||
*/
|
||||
if (!(val & _Q_PENDING_MASK))
|
||||
clear_pending(lock);
|
||||
if (val & _Q_LOCKED_MASK)
|
||||
atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
|
||||
|
||||
/*
|
||||
* take ownership and clear the pending bit.
|
||||
*
|
||||
* 0,1,0 -> 0,0,1
|
||||
*/
|
||||
clear_pending_set_locked(lock);
|
||||
qstat_inc(qstat_lock_pending, true);
|
||||
return;
|
||||
|
||||
/*
|
||||
* End of pending bit optimistic spinning and beginning of MCS
|
||||
@@ -371,11 +408,16 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
queue:
|
||||
qstat_inc(qstat_lock_slowpath, true);
|
||||
pv_queue:
|
||||
node = this_cpu_ptr(&mcs_nodes[0]);
|
||||
node = this_cpu_ptr(&qnodes[0].mcs);
|
||||
idx = node->count++;
|
||||
tail = encode_tail(smp_processor_id(), idx);
|
||||
|
||||
node += idx;
|
||||
node = grab_mcs_node(node, idx);
|
||||
|
||||
/*
|
||||
* Keep counts of non-zero index values:
|
||||
*/
|
||||
qstat_inc(qstat_lock_idx1 + idx - 1, idx);
|
||||
|
||||
/*
|
||||
* Ensure that we increment the head node->count before initialising
|
||||
@@ -476,16 +518,25 @@ locked:
|
||||
*/
|
||||
|
||||
/*
|
||||
* In the PV case we might already have _Q_LOCKED_VAL set.
|
||||
* In the PV case we might already have _Q_LOCKED_VAL set, because
|
||||
* of lock stealing; therefore we must also allow:
|
||||
*
|
||||
* The atomic_cond_read_acquire() call above has provided the
|
||||
* necessary acquire semantics required for locking.
|
||||
* n,0,1 -> 0,0,1
|
||||
*
|
||||
* Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
|
||||
* above wait condition, therefore any concurrent setting of
|
||||
* PENDING will make the uncontended transition fail.
|
||||
*/
|
||||
if (((val & _Q_TAIL_MASK) == tail) &&
|
||||
atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
|
||||
goto release; /* No contention */
|
||||
if ((val & _Q_TAIL_MASK) == tail) {
|
||||
if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
|
||||
goto release; /* No contention */
|
||||
}
|
||||
|
||||
/* Either somebody is queued behind us or _Q_PENDING_VAL is set */
|
||||
/*
|
||||
* Either somebody is queued behind us or _Q_PENDING_VAL got set
|
||||
* which will then detect the remaining tail and queue behind us
|
||||
* ensuring we'll see a @next.
|
||||
*/
|
||||
set_locked(lock);
|
||||
|
||||
/*
|
||||
@@ -501,7 +552,7 @@ release:
|
||||
/*
|
||||
* release the node
|
||||
*/
|
||||
__this_cpu_dec(mcs_nodes[0].count);
|
||||
__this_cpu_dec(qnodes[0].mcs.count);
|
||||
}
|
||||
EXPORT_SYMBOL(queued_spin_lock_slowpath);
|
||||
|
||||
|
||||
@@ -49,8 +49,6 @@ enum vcpu_state {
|
||||
|
||||
struct pv_node {
|
||||
struct mcs_spinlock mcs;
|
||||
struct mcs_spinlock __res[3];
|
||||
|
||||
int cpu;
|
||||
u8 state;
|
||||
};
|
||||
@@ -281,7 +279,7 @@ static void pv_init_node(struct mcs_spinlock *node)
|
||||
{
|
||||
struct pv_node *pn = (struct pv_node *)node;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
|
||||
BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
|
||||
|
||||
pn->cpu = smp_processor_id();
|
||||
pn->state = vcpu_running;
|
||||
|
||||
@@ -55,6 +55,9 @@ enum qlock_stats {
|
||||
qstat_pv_wait_node,
|
||||
qstat_lock_pending,
|
||||
qstat_lock_slowpath,
|
||||
qstat_lock_idx1,
|
||||
qstat_lock_idx2,
|
||||
qstat_lock_idx3,
|
||||
qstat_num, /* Total number of statistical counters */
|
||||
qstat_reset_cnts = qstat_num,
|
||||
};
|
||||
@@ -82,6 +85,9 @@ static const char * const qstat_names[qstat_num + 1] = {
|
||||
[qstat_pv_wait_node] = "pv_wait_node",
|
||||
[qstat_lock_pending] = "lock_pending",
|
||||
[qstat_lock_slowpath] = "lock_slowpath",
|
||||
[qstat_lock_idx1] = "lock_index1",
|
||||
[qstat_lock_idx2] = "lock_index2",
|
||||
[qstat_lock_idx3] = "lock_index3",
|
||||
[qstat_reset_cnts] = "reset_counters",
|
||||
};
|
||||
|
||||
|
||||
@@ -1485,9 +1485,9 @@ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
|
||||
__rt_mutex_lock(lock, subclass);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#else /* !CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
/**
|
||||
* rt_mutex_lock - lock a rt_mutex
|
||||
*
|
||||
|
||||
@@ -180,7 +180,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
|
||||
* but it gives the spinners an early indication that the
|
||||
* readers now have the lock.
|
||||
*/
|
||||
rwsem_set_reader_owned(sem);
|
||||
__rwsem_set_reader_owned(sem, waiter->task);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -233,8 +233,19 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
|
||||
waiter.type = RWSEM_WAITING_FOR_READ;
|
||||
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
if (list_empty(&sem->wait_list))
|
||||
if (list_empty(&sem->wait_list)) {
|
||||
/*
|
||||
* In case the wait queue is empty and the lock isn't owned
|
||||
* by a writer, this reader can exit the slowpath and return
|
||||
* immediately as its RWSEM_ACTIVE_READ_BIAS has already
|
||||
* been set in the count.
|
||||
*/
|
||||
if (atomic_long_read(&sem->count) >= 0) {
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
return sem;
|
||||
}
|
||||
adjustment += RWSEM_WAITING_BIAS;
|
||||
}
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
|
||||
/* we're now waiting on the lock, but no longer actively locking */
|
||||
|
||||
@@ -117,8 +117,9 @@ EXPORT_SYMBOL(down_write_trylock);
|
||||
void up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
rwsem_release(&sem->dep_map, 1, _RET_IP_);
|
||||
DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED);
|
||||
DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
|
||||
|
||||
rwsem_clear_reader_owned(sem);
|
||||
__up_read(sem);
|
||||
}
|
||||
|
||||
@@ -181,7 +182,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
|
||||
might_sleep();
|
||||
|
||||
__down_read(sem);
|
||||
rwsem_set_reader_owned(sem);
|
||||
__rwsem_set_reader_owned(sem, NULL);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(down_read_non_owner);
|
||||
@@ -215,7 +216,7 @@ EXPORT_SYMBOL(down_write_killable_nested);
|
||||
|
||||
void up_read_non_owner(struct rw_semaphore *sem)
|
||||
{
|
||||
DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED);
|
||||
DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
|
||||
__up_read(sem);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,24 +1,30 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* The owner field of the rw_semaphore structure will be set to
|
||||
* RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear
|
||||
* the owner field when it unlocks. A reader, on the other hand, will
|
||||
* not touch the owner field when it unlocks.
|
||||
* The least significant 2 bits of the owner value has the following
|
||||
* meanings when set.
|
||||
* - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
|
||||
* - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
|
||||
* i.e. the owner(s) cannot be readily determined. It can be reader
|
||||
* owned or the owning writer is indeterminate.
|
||||
*
|
||||
* In essence, the owner field now has the following 4 states:
|
||||
* 1) 0
|
||||
* - lock is free or the owner hasn't set the field yet
|
||||
* 2) RWSEM_READER_OWNED
|
||||
* - lock is currently or previously owned by readers (lock is free
|
||||
* or not set by owner yet)
|
||||
* 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well
|
||||
* - lock is owned by an anonymous writer, so spinning on the lock
|
||||
* owner should be disabled.
|
||||
* 4) Other non-zero value
|
||||
* - a writer owns the lock and other writers can spin on the lock owner.
|
||||
* When a writer acquires a rwsem, it puts its task_struct pointer
|
||||
* into the owner field. It is cleared after an unlock.
|
||||
*
|
||||
* When a reader acquires a rwsem, it will also puts its task_struct
|
||||
* pointer into the owner field with both the RWSEM_READER_OWNED and
|
||||
* RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
|
||||
* largely be left untouched. So for a free or reader-owned rwsem,
|
||||
* the owner value may contain information about the last reader that
|
||||
* acquires the rwsem. The anonymous bit is set because that particular
|
||||
* reader may or may not still own the lock.
|
||||
*
|
||||
* That information may be helpful in debugging cases where the system
|
||||
* seems to hang on a reader owned rwsem especially if only one reader
|
||||
* is involved. Ideally we would like to track all the readers that own
|
||||
* a rwsem, but the overhead is simply too big.
|
||||
*/
|
||||
#define RWSEM_ANONYMOUSLY_OWNED (1UL << 0)
|
||||
#define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED)
|
||||
#define RWSEM_READER_OWNED (1UL << 0)
|
||||
#define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
|
||||
|
||||
#ifdef CONFIG_DEBUG_RWSEMS
|
||||
# define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
|
||||
@@ -44,15 +50,26 @@ static inline void rwsem_clear_owner(struct rw_semaphore *sem)
|
||||
WRITE_ONCE(sem->owner, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* The task_struct pointer of the last owning reader will be left in
|
||||
* the owner field.
|
||||
*
|
||||
* Note that the owner value just indicates the task has owned the rwsem
|
||||
* previously, it may not be the real owner or one of the real owners
|
||||
* anymore when that field is examined, so take it with a grain of salt.
|
||||
*/
|
||||
static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
|
||||
struct task_struct *owner)
|
||||
{
|
||||
unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED
|
||||
| RWSEM_ANONYMOUSLY_OWNED;
|
||||
|
||||
WRITE_ONCE(sem->owner, (struct task_struct *)val);
|
||||
}
|
||||
|
||||
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
||||
{
|
||||
/*
|
||||
* We check the owner value first to make sure that we will only
|
||||
* do a write to the rwsem cacheline when it is really necessary
|
||||
* to minimize cacheline contention.
|
||||
*/
|
||||
if (READ_ONCE(sem->owner) != RWSEM_READER_OWNED)
|
||||
WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
|
||||
__rwsem_set_reader_owned(sem, current);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -72,6 +89,25 @@ static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
|
||||
{
|
||||
return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RWSEMS
|
||||
/*
|
||||
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
|
||||
* is a task pointer in owner of a reader-owned rwsem, it will be the
|
||||
* real owner or one of the real owners. The only exception is when the
|
||||
* unlock is done by up_read_non_owner().
|
||||
*/
|
||||
#define rwsem_clear_reader_owned rwsem_clear_reader_owned
|
||||
static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
|
||||
| RWSEM_ANONYMOUSLY_OWNED;
|
||||
if (READ_ONCE(sem->owner) == (struct task_struct *)val)
|
||||
cmpxchg_relaxed((unsigned long *)&sem->owner, val,
|
||||
RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
static inline void rwsem_set_owner(struct rw_semaphore *sem)
|
||||
{
|
||||
@@ -81,7 +117,18 @@ static inline void rwsem_clear_owner(struct rw_semaphore *sem)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
|
||||
struct task_struct *owner)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef rwsem_clear_reader_owned
|
||||
static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user