mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
perf evsel: Rename *perf_evsel__read*() to *evsel__read()
As those are 'struct evsel' methods, not part of tools/lib/perf/, aka libperf, to whom the perf_ prefix belongs. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
53fcfa6b8e
commit
ea08969273
@ -259,7 +259,7 @@ static int read_single_counter(struct evsel *counter, int cpu,
|
||||
count->val = val;
|
||||
return 0;
|
||||
}
|
||||
return perf_evsel__read_counter(counter, cpu, thread);
|
||||
return evsel__read_counter(counter, cpu, thread);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -284,7 +284,7 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
|
||||
|
||||
/*
|
||||
* The leader's group read loads data into its group members
|
||||
* (via perf_evsel__read_counter()) and sets their count->loaded.
|
||||
* (via evsel__read_counter()) and sets their count->loaded.
|
||||
*/
|
||||
if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
|
||||
read_single_counter(counter, cpu, thread, rs)) {
|
||||
|
@ -103,15 +103,15 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
|
||||
if (cpus->map[cpu] >= CPU_SETSIZE)
|
||||
continue;
|
||||
|
||||
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
|
||||
pr_debug("perf_evsel__read_on_cpu\n");
|
||||
if (evsel__read_on_cpu(evsel, cpu, 0) < 0) {
|
||||
pr_debug("evsel__read_on_cpu\n");
|
||||
err = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
expected = nr_openat_calls + cpu;
|
||||
if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
|
||||
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
|
||||
pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
|
||||
expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
|
||||
err = -1;
|
||||
}
|
||||
|
@ -46,13 +46,13 @@ int test__openat_syscall_event(struct test *test __maybe_unused, int subtest __m
|
||||
close(fd);
|
||||
}
|
||||
|
||||
if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
|
||||
pr_debug("perf_evsel__read_on_cpu\n");
|
||||
if (evsel__read_on_cpu(evsel, 0, 0) < 0) {
|
||||
pr_debug("evsel__read_on_cpu\n");
|
||||
goto out_close_fd;
|
||||
}
|
||||
|
||||
if (perf_counts(evsel->counts, 0, 0)->val != nr_openat_calls) {
|
||||
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
|
||||
pr_debug("evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
|
||||
nr_openat_calls, perf_counts(evsel->counts, 0, 0)->val);
|
||||
goto out_close_fd;
|
||||
}
|
||||
|
@ -1317,8 +1317,7 @@ void perf_counts_values__scale(struct perf_counts_values *count,
|
||||
*pscaled = scaled;
|
||||
}
|
||||
|
||||
static int
|
||||
perf_evsel__read_one(struct evsel *evsel, int cpu, int thread)
|
||||
static int evsel__read_one(struct evsel *evsel, int cpu, int thread)
|
||||
{
|
||||
struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
|
||||
|
||||
@ -1378,8 +1377,7 @@ perf_evsel__process_group_data(struct evsel *leader,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
perf_evsel__read_group(struct evsel *leader, int cpu, int thread)
|
||||
static int evsel__read_group(struct evsel *leader, int cpu, int thread)
|
||||
{
|
||||
struct perf_stat_evsel *ps = leader->stats;
|
||||
u64 read_format = leader->core.attr.read_format;
|
||||
@ -1409,18 +1407,17 @@ perf_evsel__read_group(struct evsel *leader, int cpu, int thread)
|
||||
return perf_evsel__process_group_data(leader, cpu, thread, data);
|
||||
}
|
||||
|
||||
int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread)
|
||||
int evsel__read_counter(struct evsel *evsel, int cpu, int thread)
|
||||
{
|
||||
u64 read_format = evsel->core.attr.read_format;
|
||||
|
||||
if (read_format & PERF_FORMAT_GROUP)
|
||||
return perf_evsel__read_group(evsel, cpu, thread);
|
||||
else
|
||||
return perf_evsel__read_one(evsel, cpu, thread);
|
||||
return evsel__read_group(evsel, cpu, thread);
|
||||
|
||||
return evsel__read_one(evsel, cpu, thread);
|
||||
}
|
||||
|
||||
int __perf_evsel__read_on_cpu(struct evsel *evsel,
|
||||
int cpu, int thread, bool scale)
|
||||
int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale)
|
||||
{
|
||||
struct perf_counts_values count;
|
||||
size_t nv = scale ? 3 : 1;
|
||||
|
@ -265,35 +265,32 @@ static inline bool evsel__match2(struct evsel *e1, struct evsel *e2)
|
||||
(e1->core.attr.config == e2->core.attr.config);
|
||||
}
|
||||
|
||||
int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread);
|
||||
int evsel__read_counter(struct evsel *evsel, int cpu, int thread);
|
||||
|
||||
int __perf_evsel__read_on_cpu(struct evsel *evsel,
|
||||
int cpu, int thread, bool scale);
|
||||
int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale);
|
||||
|
||||
/**
|
||||
* perf_evsel__read_on_cpu - Read out the results on a CPU and thread
|
||||
* evsel__read_on_cpu - Read out the results on a CPU and thread
|
||||
*
|
||||
* @evsel - event selector to read value
|
||||
* @cpu - CPU of interest
|
||||
* @thread - thread of interest
|
||||
*/
|
||||
static inline int perf_evsel__read_on_cpu(struct evsel *evsel,
|
||||
int cpu, int thread)
|
||||
static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread)
|
||||
{
|
||||
return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
|
||||
return __evsel__read_on_cpu(evsel, cpu, thread, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
|
||||
* evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
|
||||
*
|
||||
* @evsel - event selector to read value
|
||||
* @cpu - CPU of interest
|
||||
* @thread - thread of interest
|
||||
*/
|
||||
static inline int perf_evsel__read_on_cpu_scaled(struct evsel *evsel,
|
||||
int cpu, int thread)
|
||||
static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu, int thread)
|
||||
{
|
||||
return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
|
||||
return __evsel__read_on_cpu(evsel, cpu, thread, true);
|
||||
}
|
||||
|
||||
int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
|
||||
|
@ -19,8 +19,7 @@
|
||||
* However, if the leader is an AUX area event, then assume the event to sample
|
||||
* is the next event.
|
||||
*/
|
||||
static struct evsel *perf_evsel__read_sampler(struct evsel *evsel,
|
||||
struct evlist *evlist)
|
||||
static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
|
||||
{
|
||||
struct evsel *leader = evsel->leader;
|
||||
|
||||
@ -43,7 +42,7 @@ static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *ev
|
||||
if (!leader->sample_read)
|
||||
return;
|
||||
|
||||
read_sampler = perf_evsel__read_sampler(evsel, evlist);
|
||||
read_sampler = evsel__read_sampler(evsel, evlist);
|
||||
|
||||
if (evsel == read_sampler)
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user