2015-06-03 14:25:59 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include "evsel.h"
|
|
|
|
#include "stat.h"
|
|
|
|
#include "color.h"
|
2016-03-01 18:57:52 +00:00
|
|
|
#include "pmu.h"
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
#include "rblist.h"
|
|
|
|
#include "evlist.h"
|
|
|
|
#include "expr.h"
|
2017-08-31 19:40:31 +00:00
|
|
|
#include "metricgroup.h"
|
2015-06-03 14:25:59 +00:00
|
|
|
|
|
|
|
enum {
|
|
|
|
CTX_BIT_USER = 1 << 0,
|
|
|
|
CTX_BIT_KERNEL = 1 << 1,
|
|
|
|
CTX_BIT_HV = 1 << 2,
|
|
|
|
CTX_BIT_HOST = 1 << 3,
|
|
|
|
CTX_BIT_IDLE = 1 << 4,
|
|
|
|
CTX_BIT_MAX = 1 << 5,
|
|
|
|
};
|
|
|
|
|
|
|
|
#define NUM_CTX CTX_BIT_MAX
|
|
|
|
|
perf stat: Support metrics in --per-core/socket mode
Enable metrics printing in --per-core / --per-socket mode. We need to
save the shadow metrics in a unique place. Always use the first CPU in
the aggregation. Then use the same CPU to retrieve the shadow value
later.
Example output:
% perf stat --per-core -a ./BC1s
Performance counter stats for 'system wide':
S0-C0 2 2966.020381 task-clock (msec) # 2.004 CPUs utilized (100.00%)
S0-C0 2 49 context-switches # 0.017 K/sec (100.00%)
S0-C0 2 4 cpu-migrations # 0.001 K/sec (100.00%)
S0-C0 2 467 page-faults # 0.157 K/sec
S0-C0 2 4,599,061,773 cycles # 1.551 GHz (100.00%)
S0-C0 2 9,755,886,883 instructions # 2.12 insn per cycle (100.00%)
S0-C0 2 1,906,272,125 branches # 642.704 M/sec (100.00%)
S0-C0 2 81,180,867 branch-misses # 4.26% of all branches
S0-C1 2 2965.995373 task-clock (msec) # 2.003 CPUs utilized (100.00%)
S0-C1 2 62 context-switches # 0.021 K/sec (100.00%)
S0-C1 2 8 cpu-migrations # 0.003 K/sec (100.00%)
S0-C1 2 281 page-faults # 0.095 K/sec
S0-C1 2 6,347,290 cycles # 0.002 GHz (100.00%)
S0-C1 2 4,654,156 instructions # 0.73 insn per cycle (100.00%)
S0-C1 2 947,121 branches # 0.319 M/sec (100.00%)
S0-C1 2 37,322 branch-misses # 3.94% of all branches
1.480409747 seconds time elapsed
v2: Rebase to older patches
v3: Document shadow cpus. Fix aggr_get_id argument. Fix -A shadows (Jiri)
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/1456785386-19481-4-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-29 22:36:22 +00:00
|
|
|
/*
|
|
|
|
* AGGR_GLOBAL: Use CPU 0
|
|
|
|
* AGGR_SOCKET: Use first CPU of socket
|
|
|
|
* AGGR_CORE: Use first CPU of core
|
|
|
|
* AGGR_NONE: Use matching CPU
|
|
|
|
* AGGR_THREAD: Not supported?
|
|
|
|
*/
|
2015-06-03 14:25:59 +00:00
|
|
|
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
|
2016-05-24 19:52:37 +00:00
|
|
|
static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
|
2017-05-26 19:05:38 +00:00
|
|
|
static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS];
|
|
|
|
static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS];
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
static struct rblist runtime_saved_values;
|
2016-03-01 18:57:52 +00:00
|
|
|
static bool have_frontend_stalled;
|
2015-06-03 14:25:59 +00:00
|
|
|
|
|
|
|
struct stats walltime_nsecs_stats;
|
|
|
|
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
struct saved_value {
|
|
|
|
struct rb_node rb_node;
|
|
|
|
struct perf_evsel *evsel;
|
|
|
|
int cpu;
|
|
|
|
struct stats stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
|
|
|
|
{
|
|
|
|
struct saved_value *a = container_of(rb_node,
|
|
|
|
struct saved_value,
|
|
|
|
rb_node);
|
|
|
|
const struct saved_value *b = entry;
|
|
|
|
|
|
|
|
if (a->cpu != b->cpu)
|
|
|
|
return a->cpu - b->cpu;
|
2017-07-24 23:40:03 +00:00
|
|
|
if (a->evsel == b->evsel)
|
|
|
|
return 0;
|
|
|
|
if ((char *)a->evsel < (char *)b->evsel)
|
|
|
|
return -1;
|
|
|
|
return +1;
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
|
|
|
|
const void *entry)
|
|
|
|
{
|
|
|
|
struct saved_value *nd = malloc(sizeof(struct saved_value));
|
|
|
|
|
|
|
|
if (!nd)
|
|
|
|
return NULL;
|
|
|
|
memcpy(nd, entry, sizeof(struct saved_value));
|
|
|
|
return &nd->rb_node;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
|
2017-08-31 19:40:33 +00:00
|
|
|
int cpu,
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
bool create)
|
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
struct saved_value dm = {
|
|
|
|
.cpu = cpu,
|
|
|
|
.evsel = evsel,
|
|
|
|
};
|
|
|
|
nd = rblist__find(&runtime_saved_values, &dm);
|
|
|
|
if (nd)
|
|
|
|
return container_of(nd, struct saved_value, rb_node);
|
|
|
|
if (create) {
|
|
|
|
rblist__add_node(&runtime_saved_values, &dm);
|
|
|
|
nd = rblist__find(&runtime_saved_values, &dm);
|
|
|
|
if (nd)
|
|
|
|
return container_of(nd, struct saved_value, rb_node);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-03-01 18:57:52 +00:00
|
|
|
void perf_stat__init_shadow_stats(void)
|
|
|
|
{
|
|
|
|
have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
rblist__init(&runtime_saved_values);
|
|
|
|
runtime_saved_values.node_cmp = saved_value_cmp;
|
|
|
|
runtime_saved_values.node_new = saved_value_new;
|
|
|
|
/* No delete for now */
|
2016-03-01 18:57:52 +00:00
|
|
|
}
|
|
|
|
|
2015-06-03 14:25:59 +00:00
|
|
|
static int evsel_context(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
int ctx = 0;
|
|
|
|
|
|
|
|
if (evsel->attr.exclude_kernel)
|
|
|
|
ctx |= CTX_BIT_KERNEL;
|
|
|
|
if (evsel->attr.exclude_user)
|
|
|
|
ctx |= CTX_BIT_USER;
|
|
|
|
if (evsel->attr.exclude_hv)
|
|
|
|
ctx |= CTX_BIT_HV;
|
|
|
|
if (evsel->attr.exclude_host)
|
|
|
|
ctx |= CTX_BIT_HOST;
|
|
|
|
if (evsel->attr.exclude_idle)
|
|
|
|
ctx |= CTX_BIT_IDLE;
|
|
|
|
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perf_stat__reset_shadow_stats(void)
|
|
|
|
{
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
struct rb_node *pos, *next;
|
|
|
|
|
2015-06-03 14:25:59 +00:00
|
|
|
memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
|
|
|
|
memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
|
|
|
|
memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
|
|
|
|
memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
|
|
|
|
memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
|
|
|
|
memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
|
|
|
|
memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
|
|
|
|
memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
|
|
|
|
memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
|
|
|
|
memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
|
|
|
|
memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
|
|
|
|
memset(runtime_cycles_in_tx_stats, 0,
|
|
|
|
sizeof(runtime_cycles_in_tx_stats));
|
|
|
|
memset(runtime_transaction_stats, 0,
|
|
|
|
sizeof(runtime_transaction_stats));
|
|
|
|
memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
|
|
|
|
memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
|
2016-05-24 19:52:37 +00:00
|
|
|
memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots));
|
|
|
|
memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired));
|
|
|
|
memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
|
|
|
|
memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
|
|
|
|
memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
|
2017-05-26 19:05:38 +00:00
|
|
|
memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats));
|
|
|
|
memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats));
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
|
|
|
|
next = rb_first(&runtime_saved_values.entries);
|
|
|
|
while (next) {
|
|
|
|
pos = next;
|
|
|
|
next = rb_next(pos);
|
|
|
|
memset(&container_of(pos, struct saved_value, rb_node)->stats,
|
|
|
|
0,
|
|
|
|
sizeof(struct stats));
|
|
|
|
}
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update various tracking values we maintain to print
|
|
|
|
* more semantic information such as miss/hit ratios,
|
|
|
|
* instruction rates, etc:
|
|
|
|
*/
|
|
|
|
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
|
|
|
|
int cpu)
|
|
|
|
{
|
|
|
|
int ctx = evsel_context(counter);
|
|
|
|
|
2016-05-13 06:01:02 +00:00
|
|
|
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
|
|
|
|
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
|
2015-06-03 14:25:59 +00:00
|
|
|
update_stats(&runtime_nsecs_stats[cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
|
|
|
|
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
|
2015-07-27 23:24:51 +00:00
|
|
|
update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
|
2015-06-03 14:25:59 +00:00
|
|
|
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
|
|
|
|
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_stat_evsel__is(counter, ELISION_START))
|
|
|
|
update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
|
2016-05-24 19:52:37 +00:00
|
|
|
else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
|
|
|
|
update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]);
|
|
|
|
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
|
|
|
|
update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]);
|
|
|
|
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
|
|
|
|
update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]);
|
|
|
|
else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
|
|
|
|
update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]);
|
|
|
|
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
|
|
|
|
update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]);
|
2015-06-03 14:25:59 +00:00
|
|
|
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
|
|
|
|
update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
|
|
|
|
update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
|
|
|
|
update_stats(&runtime_branches_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
|
|
|
|
update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
|
|
|
|
update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
|
|
|
|
update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
|
|
|
|
update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
|
|
|
|
update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
|
|
|
|
update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
|
2017-05-26 19:05:38 +00:00
|
|
|
else if (perf_stat_evsel__is(counter, SMI_NUM))
|
|
|
|
update_stats(&runtime_smi_num_stats[ctx][cpu], count[0]);
|
|
|
|
else if (perf_stat_evsel__is(counter, APERF))
|
|
|
|
update_stats(&runtime_aperf_stats[ctx][cpu], count[0]);
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
|
|
|
|
if (counter->collect_stat) {
|
2017-08-31 19:40:33 +00:00
|
|
|
struct saved_value *v = saved_value_lookup(counter, cpu, true);
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
update_stats(&v->stats, count[0]);
|
|
|
|
}
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* used for get_ratio_color() */
|
|
|
|
enum grc_type {
|
|
|
|
GRC_STALLED_CYCLES_FE,
|
|
|
|
GRC_STALLED_CYCLES_BE,
|
|
|
|
GRC_CACHE_MISSES,
|
|
|
|
GRC_MAX_NR
|
|
|
|
};
|
|
|
|
|
|
|
|
static const char *get_ratio_color(enum grc_type type, double ratio)
|
|
|
|
{
|
|
|
|
static const double grc_table[GRC_MAX_NR][3] = {
|
|
|
|
[GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
|
|
|
|
[GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
|
|
|
|
[GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
|
|
|
|
};
|
|
|
|
const char *color = PERF_COLOR_NORMAL;
|
|
|
|
|
|
|
|
if (ratio > grc_table[type][0])
|
|
|
|
color = PERF_COLOR_RED;
|
|
|
|
else if (ratio > grc_table[type][1])
|
|
|
|
color = PERF_COLOR_MAGENTA;
|
|
|
|
else if (ratio > grc_table[type][2])
|
|
|
|
color = PERF_COLOR_YELLOW;
|
|
|
|
|
|
|
|
return color;
|
|
|
|
}
|
|
|
|
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct perf_evsel *c2;
|
|
|
|
|
|
|
|
evlist__for_each_entry (evsel_list, c2) {
|
|
|
|
if (!strcasecmp(c2->name, name))
|
|
|
|
return c2;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark MetricExpr target events and link events using them to them. */
|
|
|
|
void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
|
|
|
|
{
|
|
|
|
struct perf_evsel *counter, *leader, **metric_events, *oc;
|
|
|
|
bool found;
|
|
|
|
const char **metric_names;
|
|
|
|
int i;
|
|
|
|
int num_metric_names;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evsel_list, counter) {
|
|
|
|
bool invalid = false;
|
|
|
|
|
|
|
|
leader = counter->leader;
|
|
|
|
if (!counter->metric_expr)
|
|
|
|
continue;
|
|
|
|
metric_events = counter->metric_events;
|
|
|
|
if (!metric_events) {
|
|
|
|
if (expr__find_other(counter->metric_expr, counter->name,
|
|
|
|
&metric_names, &num_metric_names) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
metric_events = calloc(sizeof(struct perf_evsel *),
|
|
|
|
num_metric_names + 1);
|
|
|
|
if (!metric_events)
|
|
|
|
return;
|
|
|
|
counter->metric_events = metric_events;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num_metric_names; i++) {
|
|
|
|
found = false;
|
|
|
|
if (leader) {
|
|
|
|
/* Search in group */
|
|
|
|
for_each_group_member (oc, leader) {
|
|
|
|
if (!strcasecmp(oc->name, metric_names[i])) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
/* Search ignoring groups */
|
|
|
|
oc = perf_stat__find_event(evsel_list, metric_names[i]);
|
|
|
|
}
|
|
|
|
if (!oc) {
|
|
|
|
/* Deduping one is good enough to handle duplicated PMUs. */
|
|
|
|
static char *printed;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adding events automatically would be difficult, because
|
|
|
|
* it would risk creating groups that are not schedulable.
|
|
|
|
* perf stat doesn't understand all the scheduling constraints
|
|
|
|
* of events. So we ask the user instead to add the missing
|
|
|
|
* events.
|
|
|
|
*/
|
|
|
|
if (!printed || strcasecmp(printed, metric_names[i])) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Add %s event to groups to get metric expression for %s\n",
|
|
|
|
metric_names[i],
|
|
|
|
counter->name);
|
|
|
|
printed = strdup(metric_names[i]);
|
|
|
|
}
|
|
|
|
invalid = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
metric_events[i] = oc;
|
|
|
|
oc->collect_stat = true;
|
|
|
|
}
|
|
|
|
metric_events[i] = NULL;
|
|
|
|
free(metric_names);
|
|
|
|
if (invalid) {
|
|
|
|
free(metric_events);
|
|
|
|
counter->metric_events = NULL;
|
|
|
|
counter->metric_expr = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_stalled_cycles_frontend(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel, double avg,
|
2016-01-30 17:06:49 +00:00
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
if (ratio)
|
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle",
|
|
|
|
ratio);
|
|
|
|
else
|
|
|
|
out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_stalled_cycles_backend(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel, double avg,
|
2016-01-30 17:06:49 +00:00
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
|
|
|
|
|
2016-05-13 06:01:01 +00:00
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_branch_misses(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel,
|
2016-01-30 17:06:49 +00:00
|
|
|
double avg,
|
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_branches_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_l1_dcache_misses(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel,
|
2016-01-30 17:06:49 +00:00
|
|
|
double avg,
|
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_l1_icache_misses(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel,
|
2016-01-30 17:06:49 +00:00
|
|
|
double avg,
|
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
|
2016-01-30 17:06:49 +00:00
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_dtlb_cache_misses(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel,
|
2016-01-30 17:06:49 +00:00
|
|
|
double avg,
|
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
|
2016-01-30 17:06:49 +00:00
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_itlb_cache_misses(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel,
|
2016-01-30 17:06:49 +00:00
|
|
|
double avg,
|
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
|
2016-01-30 17:06:49 +00:00
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
static void print_ll_cache_misses(int cpu,
|
2016-03-22 16:09:37 +00:00
|
|
|
struct perf_evsel *evsel,
|
2016-01-30 17:06:49 +00:00
|
|
|
double avg,
|
|
|
|
struct perf_stat_output_ctx *out)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
|
|
|
double total, ratio = 0.0;
|
|
|
|
const char *color;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg / total * 100.0;
|
|
|
|
|
|
|
|
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
|
2016-01-30 17:06:49 +00:00
|
|
|
out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
|
2016-05-24 19:52:37 +00:00
|
|
|
/*
|
|
|
|
* High level "TopDown" CPU core pipe line bottleneck break down.
|
|
|
|
*
|
|
|
|
* Basic concept following
|
|
|
|
* Yasin, A Top Down Method for Performance analysis and Counter architecture
|
|
|
|
* ISPASS14
|
|
|
|
*
|
|
|
|
* The CPU pipeline is divided into 4 areas that can be bottlenecks:
|
|
|
|
*
|
|
|
|
* Frontend -> Backend -> Retiring
|
|
|
|
* BadSpeculation in addition means out of order execution that is thrown away
|
|
|
|
* (for example branch mispredictions)
|
|
|
|
* Frontend is instruction decoding.
|
|
|
|
* Backend is execution, like computation and accessing data in memory
|
|
|
|
* Retiring is good execution that is not directly bottlenecked
|
|
|
|
*
|
|
|
|
* The formulas are computed in slots.
|
|
|
|
* A slot is an entry in the pipeline each for the pipeline width
|
|
|
|
* (for example a 4-wide pipeline has 4 slots for each cycle)
|
|
|
|
*
|
|
|
|
* Formulas:
|
|
|
|
* BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
|
|
|
|
* TotalSlots
|
|
|
|
* Retiring = SlotsRetired / TotalSlots
|
|
|
|
* FrontendBound = FetchBubbles / TotalSlots
|
|
|
|
* BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
|
|
|
|
*
|
|
|
|
* The kernel provides the mapping to the low level CPU events and any scaling
|
|
|
|
* needed for the CPU pipeline width, for example:
|
|
|
|
*
|
|
|
|
* TotalSlots = Cycles * 4
|
|
|
|
*
|
|
|
|
* The scaling factor is communicated in the sysfs unit.
|
|
|
|
*
|
|
|
|
* In some cases the CPU may not be able to measure all the formulas due to
|
|
|
|
* missing events. In this case multiple formulas are combined, as possible.
|
|
|
|
*
|
|
|
|
* Full TopDown supports more levels to sub-divide each area: for example
|
|
|
|
* BackendBound into computing bound and memory bound. For now we only
|
|
|
|
* support Level 1 TopDown.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static double sanitize_val(double x)
|
|
|
|
{
|
|
|
|
if (x < 0 && x >= -0.02)
|
|
|
|
return 0.0;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static double td_total_slots(int ctx, int cpu)
|
|
|
|
{
|
|
|
|
return avg_stats(&runtime_topdown_total_slots[ctx][cpu]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static double td_bad_spec(int ctx, int cpu)
|
|
|
|
{
|
|
|
|
double bad_spec = 0;
|
|
|
|
double total_slots;
|
|
|
|
double total;
|
|
|
|
|
|
|
|
total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) -
|
|
|
|
avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) +
|
|
|
|
avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]);
|
|
|
|
total_slots = td_total_slots(ctx, cpu);
|
|
|
|
if (total_slots)
|
|
|
|
bad_spec = total / total_slots;
|
|
|
|
return sanitize_val(bad_spec);
|
|
|
|
}
|
|
|
|
|
|
|
|
static double td_retiring(int ctx, int cpu)
|
|
|
|
{
|
|
|
|
double retiring = 0;
|
|
|
|
double total_slots = td_total_slots(ctx, cpu);
|
|
|
|
double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total_slots)
|
|
|
|
retiring = ret_slots / total_slots;
|
|
|
|
return retiring;
|
|
|
|
}
|
|
|
|
|
|
|
|
static double td_fe_bound(int ctx, int cpu)
|
|
|
|
{
|
|
|
|
double fe_bound = 0;
|
|
|
|
double total_slots = td_total_slots(ctx, cpu);
|
|
|
|
double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total_slots)
|
|
|
|
fe_bound = fetch_bub / total_slots;
|
|
|
|
return fe_bound;
|
|
|
|
}
|
|
|
|
|
|
|
|
static double td_be_bound(int ctx, int cpu)
|
|
|
|
{
|
|
|
|
double sum = (td_fe_bound(ctx, cpu) +
|
|
|
|
td_bad_spec(ctx, cpu) +
|
|
|
|
td_retiring(ctx, cpu));
|
|
|
|
if (sum == 0)
|
|
|
|
return 0;
|
|
|
|
return sanitize_val(1.0 - sum);
|
|
|
|
}
|
|
|
|
|
2017-05-26 19:05:38 +00:00
|
|
|
static void print_smi_cost(int cpu, struct perf_evsel *evsel,
|
|
|
|
struct perf_stat_output_ctx *out)
|
|
|
|
{
|
|
|
|
double smi_num, aperf, cycles, cost = 0.0;
|
|
|
|
int ctx = evsel_context(evsel);
|
|
|
|
const char *color = NULL;
|
|
|
|
|
|
|
|
smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]);
|
|
|
|
aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]);
|
|
|
|
cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if ((cycles == 0) || (aperf == 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (smi_num)
|
|
|
|
cost = (aperf - cycles) / aperf * 100.00;
|
|
|
|
|
|
|
|
if (cost > 10)
|
|
|
|
color = PERF_COLOR_RED;
|
|
|
|
out->print_metric(out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
|
|
|
|
out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
|
|
|
|
}
|
|
|
|
|
2017-08-31 19:40:28 +00:00
|
|
|
static void generic_metric(const char *metric_expr,
|
|
|
|
struct perf_evsel **metric_events,
|
|
|
|
char *name,
|
|
|
|
const char *metric_name,
|
|
|
|
double avg,
|
|
|
|
int cpu,
|
|
|
|
struct perf_stat_output_ctx *out)
|
|
|
|
{
|
|
|
|
print_metric_t print_metric = out->print_metric;
|
|
|
|
struct parse_ctx pctx;
|
|
|
|
double ratio;
|
|
|
|
int i;
|
|
|
|
void *ctxp = out->ctx;
|
|
|
|
|
|
|
|
expr__ctx_init(&pctx);
|
|
|
|
expr__add_id(&pctx, name, avg);
|
|
|
|
for (i = 0; metric_events[i]; i++) {
|
|
|
|
struct saved_value *v;
|
|
|
|
|
2017-08-31 19:40:33 +00:00
|
|
|
v = saved_value_lookup(metric_events[i], cpu, false);
|
2017-08-31 19:40:28 +00:00
|
|
|
if (!v)
|
|
|
|
break;
|
|
|
|
expr__add_id(&pctx, metric_events[i]->name, avg_stats(&v->stats));
|
|
|
|
}
|
|
|
|
if (!metric_events[i]) {
|
|
|
|
const char *p = metric_expr;
|
|
|
|
|
|
|
|
if (expr__parse(&ratio, &pctx, &p) == 0)
|
|
|
|
print_metric(ctxp, NULL, "%8.1f",
|
|
|
|
metric_name ?
|
|
|
|
metric_name :
|
|
|
|
out->force_header ? name : "",
|
|
|
|
ratio);
|
|
|
|
else
|
2017-08-31 19:40:29 +00:00
|
|
|
print_metric(ctxp, NULL, NULL,
|
|
|
|
out->force_header ?
|
|
|
|
(metric_name ? metric_name : name) : "", 0);
|
2017-08-31 19:40:28 +00:00
|
|
|
} else
|
|
|
|
print_metric(ctxp, NULL, NULL, "", 0);
|
|
|
|
}
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
|
|
|
|
double avg, int cpu,
|
2017-08-31 19:40:31 +00:00
|
|
|
struct perf_stat_output_ctx *out,
|
|
|
|
struct rblist *metric_events)
|
2015-06-03 14:25:59 +00:00
|
|
|
{
|
2016-01-30 17:06:49 +00:00
|
|
|
void *ctxp = out->ctx;
|
|
|
|
print_metric_t print_metric = out->print_metric;
|
2015-06-03 14:25:59 +00:00
|
|
|
double total, ratio = 0.0, total2;
|
2016-05-24 19:52:37 +00:00
|
|
|
const char *color = NULL;
|
2015-06-03 14:25:59 +00:00
|
|
|
int ctx = evsel_context(evsel);
|
2017-08-31 19:40:31 +00:00
|
|
|
struct metric_event *me;
|
|
|
|
int num = 1;
|
2015-06-03 14:25:59 +00:00
|
|
|
|
|
|
|
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
|
|
|
|
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
|
|
|
|
if (total) {
|
|
|
|
ratio = avg / total;
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, "%7.2f ",
|
|
|
|
"insn per cycle", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else {
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
|
|
|
|
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
|
|
|
|
|
|
|
|
if (total && avg) {
|
perf stat: Implement CSV metrics output
Now support CSV output for metrics. With the new output callbacks this
is relatively straight forward by creating new callbacks.
This allows to easily plot metrics from CSV files.
The new line callback needs to know the number of fields to skip them
correctly
Example output before:
% perf stat -x, true
0.200687,,task-clock,200687,100.00
0,,context-switches,200687,100.00
0,,cpu-migrations,200687,100.00
40,,page-faults,200687,100.00
730871,,cycles,203601,100.00
551056,,stalled-cycles-frontend,203601,100.00
<not supported>,,stalled-cycles-backend,0,100.00
385523,,instructions,203601,100.00
78028,,branches,203601,100.00
3946,,branch-misses,203601,100.00
After:
% perf stat -x, true
.502457,,task-clock,502457,100.00,0.485,CPUs utilized
0,,context-switches,502457,100.00,0.000,K/sec
0,,cpu-migrations,502457,100.00,0.000,K/sec
45,,page-faults,502457,100.00,0.090,M/sec
644692,,cycles,509102,100.00,1.283,GHz
423470,,stalled-cycles-frontend,509102,100.00,65.69,frontend cycles idle
<not supported>,,stalled-cycles-backend,0,100.00,,,,
492701,,instructions,509102,100.00,0.76,insn per cycle
,,,,,0.86,stalled cycles per insn
97767,,branches,509102,100.00,194.578,M/sec
4788,,branch-misses,509102,100.00,4.90,of all branches
or easier readable
$ perf stat -x, -o x.csv true
$ column -s, -t x.csv
0.490635 task-clock 490635 100.00 0.489 CPUs utilized
0 context-switches 490635 100.00 0.000 K/sec
0 cpu-migrations 490635 100.00 0.000 K/sec
45 page-faults 490635 100.00 0.092 M/sec
629080 cycles 497698 100.00 1.282 GHz
409498 stalled-cycles-frontend 497698 100.00 65.09 frontend cycles idle
<not supported> stalled-cycles-backend 0 100.00
491424 instructions 497698 100.00 0.78 insn per cycle
0.83 stalled cycles per insn
97278 branches 497698 100.00 198.270 M/sec
4569 branch-misses 497698 100.00 4.70 of all branches
Two new fields are added: metric value and metric name.
v2: Split out function argument changes
v3: Reenable metrics for real.
v4: Fix wrong hunk from refactoring.
v5: Remove extra "noise" printing (Jiri), but add it to the not counted case.
Print empty metrics for not counted.
v6: Avoid outputting metric on empty format.
v7: Print metric at the end
v8: Remove extra run, ena fields
v9: Avoid extra new line for unsupported counters
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: http://lkml.kernel.org/r/1456785386-19481-3-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-29 22:36:21 +00:00
|
|
|
out->new_line(ctxp);
|
2015-06-03 14:25:59 +00:00
|
|
|
ratio = total / avg;
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, "%7.2f ",
|
|
|
|
"stalled cycles per insn",
|
|
|
|
ratio);
|
2016-03-01 18:57:52 +00:00
|
|
|
} else if (have_frontend_stalled) {
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, NULL,
|
|
|
|
"stalled cycles per insn", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
2016-01-30 17:06:49 +00:00
|
|
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
|
|
|
|
if (runtime_branches_stats[ctx][cpu].n != 0)
|
|
|
|
print_branch_misses(cpu, evsel, avg, out);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "of all branches", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (
|
|
|
|
evsel->attr.type == PERF_TYPE_HW_CACHE &&
|
|
|
|
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
|
|
|
|
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
|
2016-01-30 17:06:49 +00:00
|
|
|
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
|
|
|
|
if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
|
|
|
|
print_l1_dcache_misses(cpu, evsel, avg, out);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (
|
|
|
|
evsel->attr.type == PERF_TYPE_HW_CACHE &&
|
|
|
|
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
|
|
|
|
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
|
2016-01-30 17:06:49 +00:00
|
|
|
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
|
|
|
|
if (runtime_l1_icache_stats[ctx][cpu].n != 0)
|
|
|
|
print_l1_icache_misses(cpu, evsel, avg, out);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (
|
|
|
|
evsel->attr.type == PERF_TYPE_HW_CACHE &&
|
|
|
|
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
|
|
|
|
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
|
2016-01-30 17:06:49 +00:00
|
|
|
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
|
|
|
|
if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
|
|
|
|
print_dtlb_cache_misses(cpu, evsel, avg, out);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (
|
|
|
|
evsel->attr.type == PERF_TYPE_HW_CACHE &&
|
|
|
|
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
|
|
|
|
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
|
2016-01-30 17:06:49 +00:00
|
|
|
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
|
|
|
|
if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
|
|
|
|
print_itlb_cache_misses(cpu, evsel, avg, out);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (
|
|
|
|
evsel->attr.type == PERF_TYPE_HW_CACHE &&
|
|
|
|
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
|
|
|
|
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
|
2016-01-30 17:06:49 +00:00
|
|
|
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
|
|
|
|
if (runtime_ll_cache_stats[ctx][cpu].n != 0)
|
|
|
|
print_ll_cache_misses(cpu, evsel, avg, out);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
|
|
|
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
|
2015-06-03 14:25:59 +00:00
|
|
|
total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = avg * 100 / total;
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
if (runtime_cacherefs_stats[ctx][cpu].n != 0)
|
|
|
|
print_metric(ctxp, NULL, "%8.3f %%",
|
|
|
|
"of all cache refs", ratio);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
|
2016-01-30 17:06:49 +00:00
|
|
|
print_stalled_cycles_frontend(cpu, evsel, avg, out);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
|
2016-01-30 17:06:49 +00:00
|
|
|
print_stalled_cycles_backend(cpu, evsel, avg, out);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
|
|
|
|
total = avg_stats(&runtime_nsecs_stats[cpu]);
|
|
|
|
|
|
|
|
if (total) {
|
|
|
|
ratio = avg / total;
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, "%8.3f", "GHz", ratio);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else {
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, NULL, "Ghz", 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
|
|
|
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
|
|
|
|
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
|
|
|
|
if (total)
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL,
|
|
|
|
"%7.2f%%", "transactional cycles",
|
|
|
|
100.0 * (avg / total));
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "transactional cycles",
|
|
|
|
0);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
|
|
|
|
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
|
|
|
|
total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
|
|
|
|
if (total2 < avg)
|
|
|
|
total2 = avg;
|
|
|
|
if (total)
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles",
|
2015-06-03 14:25:59 +00:00
|
|
|
100.0 * ((total2-avg) / total));
|
2016-01-30 17:06:49 +00:00
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
|
|
|
|
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
|
2015-06-03 14:25:59 +00:00
|
|
|
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
|
|
|
|
|
2015-07-27 23:24:51 +00:00
|
|
|
if (avg)
|
2015-06-03 14:25:59 +00:00
|
|
|
ratio = total / avg;
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
|
|
|
|
print_metric(ctxp, NULL, "%8.0f",
|
|
|
|
"cycles / transaction", ratio);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, "cycles / transaction",
|
|
|
|
0);
|
|
|
|
} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
|
2015-06-03 14:25:59 +00:00
|
|
|
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
|
|
|
|
|
2015-07-27 23:24:51 +00:00
|
|
|
if (avg)
|
2015-06-03 14:25:59 +00:00
|
|
|
ratio = total / avg;
|
|
|
|
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
|
2016-05-13 06:01:02 +00:00
|
|
|
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
|
|
|
|
perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
|
2015-11-03 01:50:20 +00:00
|
|
|
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
|
|
|
|
avg / ratio);
|
2015-11-03 01:50:20 +00:00
|
|
|
else
|
2016-01-30 17:06:49 +00:00
|
|
|
print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
|
2016-05-24 19:52:37 +00:00
|
|
|
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
|
|
|
|
double fe_bound = td_fe_bound(ctx, cpu);
|
|
|
|
|
|
|
|
if (fe_bound > 0.2)
|
|
|
|
color = PERF_COLOR_RED;
|
|
|
|
print_metric(ctxp, color, "%8.1f%%", "frontend bound",
|
|
|
|
fe_bound * 100.);
|
|
|
|
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
|
|
|
|
double retiring = td_retiring(ctx, cpu);
|
|
|
|
|
|
|
|
if (retiring > 0.7)
|
|
|
|
color = PERF_COLOR_GREEN;
|
|
|
|
print_metric(ctxp, color, "%8.1f%%", "retiring",
|
|
|
|
retiring * 100.);
|
|
|
|
} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
|
|
|
|
double bad_spec = td_bad_spec(ctx, cpu);
|
|
|
|
|
|
|
|
if (bad_spec > 0.1)
|
|
|
|
color = PERF_COLOR_RED;
|
|
|
|
print_metric(ctxp, color, "%8.1f%%", "bad speculation",
|
|
|
|
bad_spec * 100.);
|
|
|
|
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
|
|
|
|
double be_bound = td_be_bound(ctx, cpu);
|
|
|
|
const char *name = "backend bound";
|
|
|
|
static int have_recovery_bubbles = -1;
|
|
|
|
|
|
|
|
/* In case the CPU does not support topdown-recovery-bubbles */
|
|
|
|
if (have_recovery_bubbles < 0)
|
|
|
|
have_recovery_bubbles = pmu_have_event("cpu",
|
|
|
|
"topdown-recovery-bubbles");
|
|
|
|
if (!have_recovery_bubbles)
|
|
|
|
name = "backend bound/bad spec";
|
|
|
|
|
|
|
|
if (be_bound > 0.2)
|
|
|
|
color = PERF_COLOR_RED;
|
|
|
|
if (td_total_slots(ctx, cpu) > 0)
|
|
|
|
print_metric(ctxp, color, "%8.1f%%", name,
|
|
|
|
be_bound * 100.);
|
|
|
|
else
|
|
|
|
print_metric(ctxp, NULL, NULL, name, 0);
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-20 20:17:08 +00:00
|
|
|
} else if (evsel->metric_expr) {
|
2017-08-31 19:40:28 +00:00
|
|
|
generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
|
2017-08-31 19:40:33 +00:00
|
|
|
evsel->metric_name, avg, cpu, out);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else if (runtime_nsecs_stats[cpu].n != 0) {
|
|
|
|
char unit = 'M';
|
2016-01-30 17:06:49 +00:00
|
|
|
char unit_buf[10];
|
2015-06-03 14:25:59 +00:00
|
|
|
|
|
|
|
total = avg_stats(&runtime_nsecs_stats[cpu]);
|
|
|
|
|
|
|
|
if (total)
|
|
|
|
ratio = 1000.0 * avg / total;
|
|
|
|
if (ratio < 0.001) {
|
|
|
|
ratio *= 1000;
|
|
|
|
unit = 'K';
|
|
|
|
}
|
2016-01-30 17:06:49 +00:00
|
|
|
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
|
|
|
|
print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
|
2017-05-26 19:05:38 +00:00
|
|
|
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
|
|
|
|
print_smi_cost(cpu, evsel, out);
|
2015-06-03 14:25:59 +00:00
|
|
|
} else {
|
2017-08-31 19:40:31 +00:00
|
|
|
num = 0;
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|
2017-08-31 19:40:31 +00:00
|
|
|
|
|
|
|
if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
|
|
|
|
struct metric_expr *mexp;
|
|
|
|
|
|
|
|
list_for_each_entry (mexp, &me->head, nd) {
|
|
|
|
if (num++ > 0)
|
|
|
|
out->new_line(ctxp);
|
|
|
|
generic_metric(mexp->metric_expr, mexp->metric_events,
|
|
|
|
evsel->name, mexp->metric_name,
|
2017-08-31 19:40:33 +00:00
|
|
|
avg, cpu, out);
|
2017-08-31 19:40:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (num == 0)
|
|
|
|
print_metric(ctxp, NULL, NULL, NULL, 0);
|
2015-06-03 14:25:59 +00:00
|
|
|
}
|