mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 01:51:53 +00:00
1222109a53
Queued spinlock supports up to 4 levels of lock slowpath nesting - user context, soft IRQ, hard IRQ and NMI. However, we are not sure how often the nesting happens. So add 3 more per-CPU stat counters to track the number of instances where nesting index goes to 1, 2 and 3 respectively. On a dual-socket 64-core 128-thread Zen server, the following were the new stat counter values under different circumstances: State slowpath index1 index2 index3 ----- -------- ------ ------ ------- After bootup 1,012,150 82 0 0 After parallel build + perf-top 125,195,009 82 0 0 So the chance of having more than 2 levels of nesting is extremely low. [ mingo: Minor changelog edits. ] Signed-off-by: Waiman Long <longman@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/1539697507-28084-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
298 lines
7.9 KiB
C
298 lines
7.9 KiB
C
/*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* Authors: Waiman Long <waiman.long@hpe.com>
|
|
*/
|
|
|
|
/*
|
|
* When queued spinlock statistical counters are enabled, the following
|
|
* debugfs files will be created for reporting the counter values:
|
|
*
|
|
* <debugfs>/qlockstat/
|
|
* pv_hash_hops - average # of hops per hashing operation
|
|
* pv_kick_unlock - # of vCPU kicks issued at unlock time
|
|
* pv_kick_wake - # of vCPU kicks used for computing pv_latency_wake
|
|
* pv_latency_kick - average latency (ns) of vCPU kick operation
|
|
* pv_latency_wake - average latency (ns) from vCPU kick to wakeup
|
|
* pv_lock_stealing - # of lock stealing operations
|
|
* pv_spurious_wakeup - # of spurious wakeups in non-head vCPUs
|
|
* pv_wait_again - # of wait's after a queue head vCPU kick
|
|
* pv_wait_early - # of early vCPU wait's
|
|
* pv_wait_head - # of vCPU wait's at the queue head
|
|
* pv_wait_node - # of vCPU wait's at a non-head queue node
|
|
* lock_pending - # of locking operations via pending code
|
|
* lock_slowpath - # of locking operations via MCS lock queue
|
|
*
|
|
* Writing to the "reset_counters" file will reset all the above counter
|
|
* values.
|
|
*
|
|
* These statistical counters are implemented as per-cpu variables which are
|
|
* summed and computed whenever the corresponding debugfs files are read. This
|
|
* minimizes added overhead making the counters usable even in a production
|
|
* environment.
|
|
*
|
|
* There may be slight difference between pv_kick_wake and pv_kick_unlock.
|
|
*/
|
|
enum qlock_stats {
|
|
qstat_pv_hash_hops,
|
|
qstat_pv_kick_unlock,
|
|
qstat_pv_kick_wake,
|
|
qstat_pv_latency_kick,
|
|
qstat_pv_latency_wake,
|
|
qstat_pv_lock_stealing,
|
|
qstat_pv_spurious_wakeup,
|
|
qstat_pv_wait_again,
|
|
qstat_pv_wait_early,
|
|
qstat_pv_wait_head,
|
|
qstat_pv_wait_node,
|
|
qstat_lock_pending,
|
|
qstat_lock_slowpath,
|
|
qstat_lock_idx1,
|
|
qstat_lock_idx2,
|
|
qstat_lock_idx3,
|
|
qstat_num, /* Total number of statistical counters */
|
|
qstat_reset_cnts = qstat_num,
|
|
};
|
|
|
|
#ifdef CONFIG_QUEUED_LOCK_STAT
|
|
/*
|
|
* Collect pvqspinlock statistics
|
|
*/
|
|
#include <linux/debugfs.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/clock.h>
|
|
#include <linux/fs.h>
|
|
|
|
static const char * const qstat_names[qstat_num + 1] = {
|
|
[qstat_pv_hash_hops] = "pv_hash_hops",
|
|
[qstat_pv_kick_unlock] = "pv_kick_unlock",
|
|
[qstat_pv_kick_wake] = "pv_kick_wake",
|
|
[qstat_pv_spurious_wakeup] = "pv_spurious_wakeup",
|
|
[qstat_pv_latency_kick] = "pv_latency_kick",
|
|
[qstat_pv_latency_wake] = "pv_latency_wake",
|
|
[qstat_pv_lock_stealing] = "pv_lock_stealing",
|
|
[qstat_pv_wait_again] = "pv_wait_again",
|
|
[qstat_pv_wait_early] = "pv_wait_early",
|
|
[qstat_pv_wait_head] = "pv_wait_head",
|
|
[qstat_pv_wait_node] = "pv_wait_node",
|
|
[qstat_lock_pending] = "lock_pending",
|
|
[qstat_lock_slowpath] = "lock_slowpath",
|
|
[qstat_lock_idx1] = "lock_index1",
|
|
[qstat_lock_idx2] = "lock_index2",
|
|
[qstat_lock_idx3] = "lock_index3",
|
|
[qstat_reset_cnts] = "reset_counters",
|
|
};
|
|
|
|
/*
|
|
* Per-cpu counters
|
|
*/
|
|
static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]);
|
|
static DEFINE_PER_CPU(u64, pv_kick_time);
|
|
|
|
/*
|
|
* Function to read and return the qlock statistical counter values
|
|
*
|
|
* The following counters are handled specially:
|
|
* 1. qstat_pv_latency_kick
|
|
* Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
|
|
* 2. qstat_pv_latency_wake
|
|
* Average wake latency (ns) = pv_latency_wake/pv_kick_wake
|
|
* 3. qstat_pv_hash_hops
|
|
* Average hops/hash = pv_hash_hops/pv_kick_unlock
|
|
*/
|
|
static ssize_t qstat_read(struct file *file, char __user *user_buf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
char buf[64];
|
|
int cpu, counter, len;
|
|
u64 stat = 0, kicks = 0;
|
|
|
|
/*
|
|
* Get the counter ID stored in file->f_inode->i_private
|
|
*/
|
|
counter = (long)file_inode(file)->i_private;
|
|
|
|
if (counter >= qstat_num)
|
|
return -EBADF;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
stat += per_cpu(qstats[counter], cpu);
|
|
/*
|
|
* Need to sum additional counter for some of them
|
|
*/
|
|
switch (counter) {
|
|
|
|
case qstat_pv_latency_kick:
|
|
case qstat_pv_hash_hops:
|
|
kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu);
|
|
break;
|
|
|
|
case qstat_pv_latency_wake:
|
|
kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (counter == qstat_pv_hash_hops) {
|
|
u64 frac = 0;
|
|
|
|
if (kicks) {
|
|
frac = 100ULL * do_div(stat, kicks);
|
|
frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
|
|
}
|
|
|
|
/*
|
|
* Return a X.XX decimal number
|
|
*/
|
|
len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac);
|
|
} else {
|
|
/*
|
|
* Round to the nearest ns
|
|
*/
|
|
if ((counter == qstat_pv_latency_kick) ||
|
|
(counter == qstat_pv_latency_wake)) {
|
|
if (kicks)
|
|
stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
|
|
}
|
|
len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat);
|
|
}
|
|
|
|
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
}
|
|
|
|
/*
|
|
* Function to handle write request
|
|
*
|
|
* When counter = reset_cnts, reset all the counter values.
|
|
* Since the counter updates aren't atomic, the resetting is done twice
|
|
* to make sure that the counters are very likely to be all cleared.
|
|
*/
|
|
static ssize_t qstat_write(struct file *file, const char __user *user_buf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
int cpu;
|
|
|
|
/*
|
|
* Get the counter ID stored in file->f_inode->i_private
|
|
*/
|
|
if ((long)file_inode(file)->i_private != qstat_reset_cnts)
|
|
return count;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
int i;
|
|
unsigned long *ptr = per_cpu_ptr(qstats, cpu);
|
|
|
|
for (i = 0 ; i < qstat_num; i++)
|
|
WRITE_ONCE(ptr[i], 0);
|
|
}
|
|
return count;
|
|
}
|
|
|
|
/*
|
|
* Debugfs data structures
|
|
*/
|
|
static const struct file_operations fops_qstat = {
|
|
.read = qstat_read,
|
|
.write = qstat_write,
|
|
.llseek = default_llseek,
|
|
};
|
|
|
|
/*
|
|
* Initialize debugfs for the qspinlock statistical counters
|
|
*/
|
|
static int __init init_qspinlock_stat(void)
|
|
{
|
|
struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
|
|
int i;
|
|
|
|
if (!d_qstat)
|
|
goto out;
|
|
|
|
/*
|
|
* Create the debugfs files
|
|
*
|
|
* As reading from and writing to the stat files can be slow, only
|
|
* root is allowed to do the read/write to limit impact to system
|
|
* performance.
|
|
*/
|
|
for (i = 0; i < qstat_num; i++)
|
|
if (!debugfs_create_file(qstat_names[i], 0400, d_qstat,
|
|
(void *)(long)i, &fops_qstat))
|
|
goto fail_undo;
|
|
|
|
if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
|
|
(void *)(long)qstat_reset_cnts, &fops_qstat))
|
|
goto fail_undo;
|
|
|
|
return 0;
|
|
fail_undo:
|
|
debugfs_remove_recursive(d_qstat);
|
|
out:
|
|
pr_warn("Could not create 'qlockstat' debugfs entries\n");
|
|
return -ENOMEM;
|
|
}
|
|
fs_initcall(init_qspinlock_stat);
|
|
|
|
/*
|
|
* Increment the PV qspinlock statistical counters
|
|
*/
|
|
static inline void qstat_inc(enum qlock_stats stat, bool cond)
|
|
{
|
|
if (cond)
|
|
this_cpu_inc(qstats[stat]);
|
|
}
|
|
|
|
/*
|
|
* PV hash hop count
|
|
*/
|
|
static inline void qstat_hop(int hopcnt)
|
|
{
|
|
this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt);
|
|
}
|
|
|
|
/*
|
|
* Replacement function for pv_kick()
|
|
*/
|
|
static inline void __pv_kick(int cpu)
|
|
{
|
|
u64 start = sched_clock();
|
|
|
|
per_cpu(pv_kick_time, cpu) = start;
|
|
pv_kick(cpu);
|
|
this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start);
|
|
}
|
|
|
|
/*
|
|
* Replacement function for pv_wait()
|
|
*/
|
|
static inline void __pv_wait(u8 *ptr, u8 val)
|
|
{
|
|
u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
|
|
|
|
*pkick_time = 0;
|
|
pv_wait(ptr, val);
|
|
if (*pkick_time) {
|
|
this_cpu_add(qstats[qstat_pv_latency_wake],
|
|
sched_clock() - *pkick_time);
|
|
qstat_inc(qstat_pv_kick_wake, true);
|
|
}
|
|
}
|
|
|
|
#define pv_kick(c) __pv_kick(c)
|
|
#define pv_wait(p, v) __pv_wait(p, v)
|
|
|
|
#else /* CONFIG_QUEUED_LOCK_STAT */
|
|
|
|
static inline void qstat_inc(enum qlock_stats stat, bool cond) { }
|
|
static inline void qstat_hop(int hopcnt) { }
|
|
|
|
#endif /* CONFIG_QUEUED_LOCK_STAT */
|