linux-cpupower-5.5-rc1

This cpupower update for Linux 5.5-rc1 consists of bug fixes and
 improvements to make it more accurate by removing the userspace
 to kernel transition and read_msr initiated IPI delays.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEPZKym/RZuOCGeA/kCwJExA0NQxwFAl3DZ5wACgkQCwJExA0N
 QxzJ3xAAoXBry7tZQdS5tqrNQG0kemxjjCmgGvnyGuAppFkjty6BgFV58B70NXBJ
 2BwE74KwMG9rKeugE63JS7Nq+U0b37mJ7UwwtjDRcWpUw2v7IXYJunPV+P3Cj3V/
 aNLuMBe+TLX4BLWuogpqQv3jTHlmwwB31AbuKKXnCaQVKmQc7OutPZ4jtDgx0Qh4
 O+1r/fMQkcZ2Z+XvPiomafcEYM7ZTY4pdjYFVJXMAOLmHhc8IBkNuAFfgIT61Qor
 mGXJizCLQrCik9NNgU+QcUSys7dN3lM9C/bEZLMFEVDzLVpi4TghsmBQX7449htr
 xz7nz2VyBfwQpILG/0IUBhfvA8/x2MhgD5oOzbv3BnIiVvAKTJ1G3PDNFHIFrq7T
 w9F6l8pW1ce+W8amNa49cpYZhYvDhY2Es4lMt2kvDVk4UhxehpCH2Wqu+hcPho7F
 DVtfXbpi89deV8+TmKJlXr/bWqCLer9FVAXZsxlN3cbZ8syfMnA40BaDMZMRG8pZ
 Cu8SxK+rbW7UavmGJYO61SxZfw0HQc1Lwg1/VKeTZPnvaJFQh3UbEBiiFO1YNjes
 60+we7WdOteHUAav4DU1s2nBT+/EThpHzJZr1FKVqW2Wz3dIu+SmLVWU6e5KY+r0
 gybaULqxweHYPCmUVKxlx6h+TGxV4HU2GmzmbVLD7VRk8vdtAP0=
 =wfXc
 -----END PGP SIGNATURE-----

Merge tag 'linux-cpupower-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux

Pull cpupower utility updates for v5.5 from Shuah Khan:

"This cpupower update for Linux 5.5-rc1 consists of bug fixes and
 improvements to make it more accurate by removing the userspace
 to kernel transition and read_msr initiated IPI delays."

* tag 'linux-cpupower-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux:
  cpupower: ToDo: Update ToDo with ideas for per_cpu_schedule handling
  cpupower: mperf_monitor: Update cpupower to use the RDPRU instruction
  cpupower: mperf_monitor: Introduce per_cpu_schedule flag
  cpupower: Move needs_root variable into a sub-struct
  cpupower : Handle set and info subcommands correctly
  tools/power/cpupower: Fix initializer override in hsw_ext_cstates
This commit is contained in:
Rafael J. Wysocki 2019-11-07 13:58:13 +01:00
commit 9581e24c3f
13 changed files with 101 additions and 20 deletions

View File

@ -8,3 +8,17 @@ ToDos sorted by priority:
- Add another c1e debug idle monitor
-> Is by design racy with BIOS, but could be added
with a --force option and some "be careful" messages
- Add cpu_start()/cpu_stop() callbacks for monitor
-> This is to move the per_cpu logic from inside the
monitor to outside it. This can be given higher
priority in fork_it.
- Fork as many processes as there are CPUs in case the
per_cpu_schedule flag is set.
-> Bind forked process to each cpu.
-> Execute start measures via the forked processes on
each cpu.
-> Run test executable in a forked process.
-> Execute stop measures via the forked processes on
each cpu.
This would be ideal as it will not introduce noise in the
tested executable.

View File

@ -10,6 +10,7 @@
#include <errno.h>
#include <string.h>
#include <getopt.h>
#include <sys/utsname.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@ -30,6 +31,7 @@ int cmd_info(int argc, char **argv)
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
struct utsname uts;
union {
struct {
@ -39,6 +41,13 @@ int cmd_info(int argc, char **argv)
} params = {};
int ret = 0;
ret = uname(&uts);
if (!ret && (!strcmp(uts.machine, "ppc64le") ||
!strcmp(uts.machine, "ppc64"))) {
fprintf(stderr, _("Subcommand not supported on POWER.\n"));
return ret;
}
setlocale(LC_ALL, "");
textdomain(PACKAGE);

View File

@ -10,6 +10,7 @@
#include <errno.h>
#include <string.h>
#include <getopt.h>
#include <sys/utsname.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@ -31,6 +32,7 @@ int cmd_set(int argc, char **argv)
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
struct utsname uts;
union {
struct {
@ -41,6 +43,13 @@ int cmd_set(int argc, char **argv)
int perf_bias = 0;
int ret = 0;
ret = uname(&uts);
if (!ret && (!strcmp(uts.machine, "ppc64le") ||
!strcmp(uts.machine, "ppc64"))) {
fprintf(stderr, _("Subcommand not supported on POWER.\n"));
return ret;
}
setlocale(LC_ALL, "");
textdomain(PACKAGE);

View File

@ -131,6 +131,10 @@ out:
if (ext_cpuid_level >= 0x80000007 &&
(cpuid_edx(0x80000007) & (1 << 9)))
cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
if (ext_cpuid_level >= 0x80000008 &&
cpuid_ebx(0x80000008) & (1 << 4))
cpu_info->caps |= CPUPOWER_CAP_AMD_RDPRU;
}
if (cpu_info->vendor == X86_VENDOR_INTEL) {

View File

@ -69,6 +69,7 @@ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
#define CPUPOWER_CAP_HAS_TURBO_RATIO 0x00000010
#define CPUPOWER_CAP_IS_SNB 0x00000020
#define CPUPOWER_CAP_INTEL_IDA 0x00000040
#define CPUPOWER_CAP_AMD_RDPRU 0x00000080
#define CPUPOWER_AMD_CPBDIS 0x02000000

View File

@ -328,7 +328,7 @@ struct cpuidle_monitor amd_fam14h_monitor = {
.stop = amd_fam14h_stop,
.do_register = amd_fam14h_register,
.unregister = amd_fam14h_unregister,
.needs_root = 1,
.flags.needs_root = 1,
.overflow_s = OVERFLOW_MS / 1000,
};
#endif /* #if defined(__i386__) || defined(__x86_64__) */

View File

@ -207,6 +207,6 @@ struct cpuidle_monitor cpuidle_sysfs_monitor = {
.stop = cpuidle_stop,
.do_register = cpuidle_register,
.unregister = cpuidle_unregister,
.needs_root = 0,
.flags.needs_root = 0,
.overflow_s = UINT_MAX,
};

View File

@ -408,7 +408,7 @@ int cmd_monitor(int argc, char **argv)
dprint("Try to register: %s\n", all_monitors[num]->name);
test_mon = all_monitors[num]->do_register();
if (test_mon) {
if (test_mon->needs_root && !run_as_root) {
if (test_mon->flags.needs_root && !run_as_root) {
fprintf(stderr, _("Available monitor %s needs "
"root access\n"), test_mon->name);
continue;

View File

@ -60,7 +60,10 @@ struct cpuidle_monitor {
struct cpuidle_monitor* (*do_register) (void);
void (*unregister)(void);
unsigned int overflow_s;
int needs_root;
struct {
unsigned int needs_root:1;
unsigned int per_cpu_schedule:1;
} flags;
};
extern long long timespec_diff_us(struct timespec start, struct timespec end);

View File

@ -39,7 +39,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
{
.name = "PC9",
.desc = N_("Processor Package C9"),
.desc = N_("Processor Package C2"),
.id = PC9,
.range = RANGE_PACKAGE,
.get_count_percent = hsw_ext_get_count_percent,
@ -188,7 +187,7 @@ struct cpuidle_monitor intel_hsw_ext_monitor = {
.stop = hsw_ext_stop,
.do_register = hsw_ext_register,
.unregister = hsw_ext_unregister,
.needs_root = 1,
.flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};

View File

@ -19,6 +19,10 @@
#define MSR_APERF 0xE8
#define MSR_MPERF 0xE7
#define RDPRU ".byte 0x0f, 0x01, 0xfd"
#define RDPRU_ECX_MPERF 0
#define RDPRU_ECX_APERF 1
#define MSR_TSC 0x10
#define MSR_AMD_HWCR 0xc0010015
@ -86,15 +90,51 @@ static int mperf_get_tsc(unsigned long long *tsc)
return ret;
}
static int mperf_init_stats(unsigned int cpu)
static int get_aperf_mperf(int cpu, unsigned long long *aval,
unsigned long long *mval)
{
unsigned long long val;
unsigned long low_a, high_a;
unsigned long low_m, high_m;
int ret;
ret = read_msr(cpu, MSR_APERF, &val);
aperf_previous_count[cpu] = val;
ret |= read_msr(cpu, MSR_MPERF, &val);
mperf_previous_count[cpu] = val;
/*
* Running on the cpu from which we read the registers will
* prevent APERF/MPERF from going out of sync because of IPI
* latency introduced by read_msr()s.
*/
if (mperf_monitor.flags.per_cpu_schedule) {
if (bind_cpu(cpu))
return 1;
}
if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_RDPRU) {
asm volatile(RDPRU
: "=a" (low_a), "=d" (high_a)
: "c" (RDPRU_ECX_APERF));
asm volatile(RDPRU
: "=a" (low_m), "=d" (high_m)
: "c" (RDPRU_ECX_MPERF));
*aval = ((low_a) | (high_a) << 32);
*mval = ((low_m) | (high_m) << 32);
return 0;
}
ret = read_msr(cpu, MSR_APERF, aval);
ret |= read_msr(cpu, MSR_MPERF, mval);
return ret;
}
static int mperf_init_stats(unsigned int cpu)
{
unsigned long long aval, mval;
int ret;
ret = get_aperf_mperf(cpu, &aval, &mval);
aperf_previous_count[cpu] = aval;
mperf_previous_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@ -102,13 +142,12 @@ static int mperf_init_stats(unsigned int cpu)
static int mperf_measure_stats(unsigned int cpu)
{
unsigned long long val;
unsigned long long aval, mval;
int ret;
ret = read_msr(cpu, MSR_APERF, &val);
aperf_current_count[cpu] = val;
ret |= read_msr(cpu, MSR_MPERF, &val);
mperf_current_count[cpu] = val;
ret = get_aperf_mperf(cpu, &aval, &mval);
aperf_current_count[cpu] = aval;
mperf_current_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@ -305,6 +344,9 @@ struct cpuidle_monitor *mperf_register(void)
if (init_maxfreq_mode())
return NULL;
if (cpupower_cpu_info.vendor == X86_VENDOR_AMD)
mperf_monitor.flags.per_cpu_schedule = 1;
/* Free this at program termination */
is_valid = calloc(cpu_count, sizeof(int));
mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
@ -333,7 +375,7 @@ struct cpuidle_monitor mperf_monitor = {
.stop = mperf_stop,
.do_register = mperf_register,
.unregister = mperf_unregister,
.needs_root = 1,
.flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};

View File

@ -208,7 +208,7 @@ struct cpuidle_monitor intel_nhm_monitor = {
.stop = nhm_stop,
.do_register = intel_nhm_register,
.unregister = intel_nhm_unregister,
.needs_root = 1,
.flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};

View File

@ -192,7 +192,7 @@ struct cpuidle_monitor intel_snb_monitor = {
.stop = snb_stop,
.do_register = snb_register,
.unregister = snb_unregister,
.needs_root = 1,
.flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};