mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
perf stat: Introduce config stat.bpf-counter-events
Currently, to use BPF to aggregate perf event counters, the user uses --bpf-counters option. Enable "use bpf by default" events with a config option, stat.bpf-counter-events. Events with name in the option will use BPF. This also enables mixed BPF event and regular event in the same sesssion. For example: perf config stat.bpf-counter-events=instructions perf stat -e instructions,cs The second command will use BPF for "instructions" but not "cs". Signed-off-by: Song Liu <song@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/r/20210425214333.1090950-4-song@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
fe3dd8263b
commit
112cb56164
@ -97,6 +97,8 @@ report::
|
||||
Use BPF programs to aggregate readings from perf_events. This
|
||||
allows multiple perf-stat sessions that are counting the same metric (cycles,
|
||||
instructions, etc.) to share hardware counters.
|
||||
To use BPF programs on common events by default, use
|
||||
"perf config stat.bpf-counter-events=<list_of_events>".
|
||||
|
||||
--bpf-attr-map::
|
||||
With option "--bpf-counters", different perf-stat sessions share
|
||||
|
@ -161,6 +161,7 @@ static const char *smi_cost_attrs = {
|
||||
};
|
||||
|
||||
static struct evlist *evsel_list;
|
||||
static bool all_counters_use_bpf = true;
|
||||
|
||||
static struct target target = {
|
||||
.uid = UINT_MAX,
|
||||
@ -401,6 +402,9 @@ static int read_affinity_counters(struct timespec *rs)
|
||||
struct affinity affinity;
|
||||
int i, ncpus, cpu;
|
||||
|
||||
if (all_counters_use_bpf)
|
||||
return 0;
|
||||
|
||||
if (affinity__setup(&affinity) < 0)
|
||||
return -1;
|
||||
|
||||
@ -415,6 +419,8 @@ static int read_affinity_counters(struct timespec *rs)
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
if (evsel__cpu_iter_skip(counter, cpu))
|
||||
continue;
|
||||
if (evsel__is_bpf(counter))
|
||||
continue;
|
||||
if (!counter->err) {
|
||||
counter->err = read_counter_cpu(counter, rs,
|
||||
counter->cpu_iter - 1);
|
||||
@ -431,6 +437,9 @@ static int read_bpf_map_counters(void)
|
||||
int err;
|
||||
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
if (!evsel__is_bpf(counter))
|
||||
continue;
|
||||
|
||||
err = bpf_counter__read(counter);
|
||||
if (err)
|
||||
return err;
|
||||
@ -441,14 +450,10 @@ static int read_bpf_map_counters(void)
|
||||
static void read_counters(struct timespec *rs)
|
||||
{
|
||||
struct evsel *counter;
|
||||
int err;
|
||||
|
||||
if (!stat_config.stop_read_counter) {
|
||||
if (target__has_bpf(&target))
|
||||
err = read_bpf_map_counters();
|
||||
else
|
||||
err = read_affinity_counters(rs);
|
||||
if (err < 0)
|
||||
if (read_bpf_map_counters() ||
|
||||
read_affinity_counters(rs))
|
||||
return;
|
||||
}
|
||||
|
||||
@ -537,12 +542,13 @@ static int enable_counters(void)
|
||||
struct evsel *evsel;
|
||||
int err;
|
||||
|
||||
if (target__has_bpf(&target)) {
|
||||
evlist__for_each_entry(evsel_list, evsel) {
|
||||
err = bpf_counter__enable(evsel);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
evlist__for_each_entry(evsel_list, evsel) {
|
||||
if (!evsel__is_bpf(evsel))
|
||||
continue;
|
||||
|
||||
err = bpf_counter__enable(evsel);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (stat_config.initial_delay < 0) {
|
||||
@ -786,11 +792,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
|
||||
if (affinity__setup(&affinity) < 0)
|
||||
return -1;
|
||||
|
||||
if (target__has_bpf(&target)) {
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
if (bpf_counter__load(counter, &target))
|
||||
return -1;
|
||||
}
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
if (bpf_counter__load(counter, &target))
|
||||
return -1;
|
||||
if (!evsel__is_bpf(counter))
|
||||
all_counters_use_bpf = false;
|
||||
}
|
||||
|
||||
evlist__for_each_cpu (evsel_list, i, cpu) {
|
||||
@ -807,6 +813,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
|
||||
continue;
|
||||
if (counter->reset_group || counter->errored)
|
||||
continue;
|
||||
if (evsel__is_bpf(counter))
|
||||
continue;
|
||||
try_again:
|
||||
if (create_perf_stat_counter(counter, &stat_config, &target,
|
||||
counter->cpu_iter - 1) < 0) {
|
||||
|
@ -790,7 +790,8 @@ int bpf_counter__load(struct evsel *evsel, struct target *target)
|
||||
{
|
||||
if (target->bpf_str)
|
||||
evsel->bpf_counter_ops = &bpf_program_profiler_ops;
|
||||
else if (target->use_bpf)
|
||||
else if (target->use_bpf ||
|
||||
evsel__match_bpf_counter_events(evsel->name))
|
||||
evsel->bpf_counter_ops = &bperf_ops;
|
||||
|
||||
if (evsel->bpf_counter_ops)
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "util/hist.h" /* perf_hist_config */
|
||||
#include "util/llvm-utils.h" /* perf_llvm_config */
|
||||
#include "util/stat.h" /* perf_stat__set_big_num */
|
||||
#include "util/evsel.h" /* evsel__hw_names, evsel__use_bpf_counters */
|
||||
#include "build-id.h"
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
@ -460,6 +461,9 @@ static int perf_stat_config(const char *var, const char *value)
|
||||
if (!strcmp(var, "stat.no-csv-summary"))
|
||||
perf_stat__set_no_csv_summary(perf_config_bool(var, value));
|
||||
|
||||
if (!strcmp(var, "stat.bpf-counter-events"))
|
||||
evsel__bpf_counter_events = strdup(value);
|
||||
|
||||
/* Add other config variables here. */
|
||||
return 0;
|
||||
}
|
||||
|
@ -492,6 +492,28 @@ const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
|
||||
"ref-cycles",
|
||||
};
|
||||
|
||||
char *evsel__bpf_counter_events;
|
||||
|
||||
bool evsel__match_bpf_counter_events(const char *name)
|
||||
{
|
||||
int name_len;
|
||||
bool match;
|
||||
char *ptr;
|
||||
|
||||
if (!evsel__bpf_counter_events)
|
||||
return false;
|
||||
|
||||
ptr = strstr(evsel__bpf_counter_events, name);
|
||||
name_len = strlen(name);
|
||||
|
||||
/* check name matches a full token in evsel__bpf_counter_events */
|
||||
match = (ptr != NULL) &&
|
||||
((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
|
||||
((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
static const char *__evsel__hw_name(u64 config)
|
||||
{
|
||||
if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
|
||||
|
@ -239,6 +239,11 @@ void evsel__calc_id_pos(struct evsel *evsel);
|
||||
|
||||
bool evsel__is_cache_op_valid(u8 type, u8 op);
|
||||
|
||||
static inline bool evsel__is_bpf(struct evsel *evsel)
|
||||
{
|
||||
return evsel->bpf_counter_ops != NULL;
|
||||
}
|
||||
|
||||
#define EVSEL__MAX_ALIASES 8
|
||||
|
||||
extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
|
||||
@ -246,6 +251,9 @@ extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALI
|
||||
extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
|
||||
extern const char *evsel__hw_names[PERF_COUNT_HW_MAX];
|
||||
extern const char *evsel__sw_names[PERF_COUNT_SW_MAX];
|
||||
extern char *evsel__bpf_counter_events;
|
||||
bool evsel__match_bpf_counter_events(const char *name);
|
||||
|
||||
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
|
||||
const char *evsel__name(struct evsel *evsel);
|
||||
|
||||
|
@ -66,11 +66,6 @@ static inline bool target__has_cpu(struct target *target)
|
||||
return target->system_wide || target->cpu_list;
|
||||
}
|
||||
|
||||
static inline bool target__has_bpf(struct target *target)
|
||||
{
|
||||
return target->bpf_str || target->use_bpf;
|
||||
}
|
||||
|
||||
static inline bool target__none(struct target *target)
|
||||
{
|
||||
return !target__has_task(target) && !target__has_cpu(target);
|
||||
|
Loading…
Reference in New Issue
Block a user