mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
perf evsel: Rename *perf_evsel__*set_sample_*() to *evsel__*set_sample_*()
As they are not 'struct evsel' methods, not part of tools/lib/perf/, aka libperf, to whom the perf_ prefix belongs. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
347c751a64
commit
862b2f8fbc
@ -402,7 +402,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
|||||||
* when a context switch happened.
|
* when a context switch happened.
|
||||||
*/
|
*/
|
||||||
if (!perf_cpu_map__empty(cpus)) {
|
if (!perf_cpu_map__empty(cpus)) {
|
||||||
perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
|
evsel__set_sample_bit(cs_etm_evsel, CPU);
|
||||||
|
|
||||||
err = cs_etm_set_option(itr, cs_etm_evsel,
|
err = cs_etm_set_option(itr, cs_etm_evsel,
|
||||||
ETM_OPT_CTXTID | ETM_OPT_TS);
|
ETM_OPT_CTXTID | ETM_OPT_TS);
|
||||||
@ -426,7 +426,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
|||||||
|
|
||||||
/* In per-cpu case, always need the time of mmap events etc */
|
/* In per-cpu case, always need the time of mmap events etc */
|
||||||
if (!perf_cpu_map__empty(cpus))
|
if (!perf_cpu_map__empty(cpus))
|
||||||
perf_evsel__set_sample_bit(tracking_evsel, TIME);
|
evsel__set_sample_bit(tracking_evsel, TIME);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -120,9 +120,9 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
|
|||||||
*/
|
*/
|
||||||
perf_evlist__to_front(evlist, arm_spe_evsel);
|
perf_evlist__to_front(evlist, arm_spe_evsel);
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(arm_spe_evsel, CPU);
|
evsel__set_sample_bit(arm_spe_evsel, CPU);
|
||||||
perf_evsel__set_sample_bit(arm_spe_evsel, TIME);
|
evsel__set_sample_bit(arm_spe_evsel, TIME);
|
||||||
perf_evsel__set_sample_bit(arm_spe_evsel, TID);
|
evsel__set_sample_bit(arm_spe_evsel, TID);
|
||||||
|
|
||||||
/* Add dummy event to keep tracking */
|
/* Add dummy event to keep tracking */
|
||||||
err = parse_events(evlist, "dummy:u", NULL);
|
err = parse_events(evlist, "dummy:u", NULL);
|
||||||
@ -134,9 +134,9 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
|
|||||||
|
|
||||||
tracking_evsel->core.attr.freq = 0;
|
tracking_evsel->core.attr.freq = 0;
|
||||||
tracking_evsel->core.attr.sample_period = 1;
|
tracking_evsel->core.attr.sample_period = 1;
|
||||||
perf_evsel__set_sample_bit(tracking_evsel, TIME);
|
evsel__set_sample_bit(tracking_evsel, TIME);
|
||||||
perf_evsel__set_sample_bit(tracking_evsel, CPU);
|
evsel__set_sample_bit(tracking_evsel, CPU);
|
||||||
perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
|
evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
|
|||||||
* AUX event.
|
* AUX event.
|
||||||
*/
|
*/
|
||||||
if (!perf_cpu_map__empty(cpus))
|
if (!perf_cpu_map__empty(cpus))
|
||||||
perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
|
evsel__set_sample_bit(intel_bts_evsel, CPU);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add dummy event to keep tracking */
|
/* Add dummy event to keep tracking */
|
||||||
|
@ -421,8 +421,8 @@ static int intel_pt_track_switches(struct evlist *evlist)
|
|||||||
|
|
||||||
evsel = evlist__last(evlist);
|
evsel = evlist__last(evlist);
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(evsel, CPU);
|
evsel__set_sample_bit(evsel, CPU);
|
||||||
perf_evsel__set_sample_bit(evsel, TIME);
|
evsel__set_sample_bit(evsel, TIME);
|
||||||
|
|
||||||
evsel->core.system_wide = true;
|
evsel->core.system_wide = true;
|
||||||
evsel->no_aux_samples = true;
|
evsel->no_aux_samples = true;
|
||||||
@ -802,10 +802,10 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
|||||||
switch_evsel->no_aux_samples = true;
|
switch_evsel->no_aux_samples = true;
|
||||||
switch_evsel->immediate = true;
|
switch_evsel->immediate = true;
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(switch_evsel, TID);
|
evsel__set_sample_bit(switch_evsel, TID);
|
||||||
perf_evsel__set_sample_bit(switch_evsel, TIME);
|
evsel__set_sample_bit(switch_evsel, TIME);
|
||||||
perf_evsel__set_sample_bit(switch_evsel, CPU);
|
evsel__set_sample_bit(switch_evsel, CPU);
|
||||||
perf_evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
|
evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
|
||||||
|
|
||||||
opts->record_switch_events = false;
|
opts->record_switch_events = false;
|
||||||
ptr->have_sched_switch = 3;
|
ptr->have_sched_switch = 3;
|
||||||
@ -839,7 +839,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
|||||||
* AUX event.
|
* AUX event.
|
||||||
*/
|
*/
|
||||||
if (!perf_cpu_map__empty(cpus))
|
if (!perf_cpu_map__empty(cpus))
|
||||||
perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
|
evsel__set_sample_bit(intel_pt_evsel, CPU);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add dummy event to keep tracking */
|
/* Add dummy event to keep tracking */
|
||||||
@ -863,11 +863,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
|||||||
|
|
||||||
/* In per-cpu case, always need the time of mmap events etc */
|
/* In per-cpu case, always need the time of mmap events etc */
|
||||||
if (!perf_cpu_map__empty(cpus)) {
|
if (!perf_cpu_map__empty(cpus)) {
|
||||||
perf_evsel__set_sample_bit(tracking_evsel, TIME);
|
evsel__set_sample_bit(tracking_evsel, TIME);
|
||||||
/* And the CPU for switch events */
|
/* And the CPU for switch events */
|
||||||
perf_evsel__set_sample_bit(tracking_evsel, CPU);
|
evsel__set_sample_bit(tracking_evsel, CPU);
|
||||||
}
|
}
|
||||||
perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
|
evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -433,7 +433,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
|
|||||||
total_nr_samples += nr_samples;
|
total_nr_samples += nr_samples;
|
||||||
hists__collapse_resort(hists, NULL);
|
hists__collapse_resort(hists, NULL);
|
||||||
/* Don't sort callchain */
|
/* Don't sort callchain */
|
||||||
perf_evsel__reset_sample_bit(pos, CALLCHAIN);
|
evsel__reset_sample_bit(pos, CALLCHAIN);
|
||||||
perf_evsel__output_resort(pos, NULL);
|
perf_evsel__output_resort(pos, NULL);
|
||||||
|
|
||||||
if (symbol_conf.event_group &&
|
if (symbol_conf.event_group &&
|
||||||
|
@ -990,7 +990,7 @@ static void data_process(void)
|
|||||||
data__fprintf();
|
data__fprintf();
|
||||||
|
|
||||||
/* Don't sort callchain for perf diff */
|
/* Don't sort callchain for perf diff */
|
||||||
perf_evsel__reset_sample_bit(evsel_base, CALLCHAIN);
|
evsel__reset_sample_bit(evsel_base, CALLCHAIN);
|
||||||
|
|
||||||
hists__process(hists_base);
|
hists__process(hists_base);
|
||||||
}
|
}
|
||||||
|
@ -1033,16 +1033,16 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
|
|||||||
struct perf_event_attr *attr = &pos->core.attr;
|
struct perf_event_attr *attr = &pos->core.attr;
|
||||||
|
|
||||||
/* make sure these *are* set */
|
/* make sure these *are* set */
|
||||||
perf_evsel__set_sample_bit(pos, TID);
|
evsel__set_sample_bit(pos, TID);
|
||||||
perf_evsel__set_sample_bit(pos, TIME);
|
evsel__set_sample_bit(pos, TIME);
|
||||||
perf_evsel__set_sample_bit(pos, CPU);
|
evsel__set_sample_bit(pos, CPU);
|
||||||
perf_evsel__set_sample_bit(pos, RAW);
|
evsel__set_sample_bit(pos, RAW);
|
||||||
/* make sure these are *not*; want as small a sample as possible */
|
/* make sure these are *not*; want as small a sample as possible */
|
||||||
perf_evsel__reset_sample_bit(pos, PERIOD);
|
evsel__reset_sample_bit(pos, PERIOD);
|
||||||
perf_evsel__reset_sample_bit(pos, IP);
|
evsel__reset_sample_bit(pos, IP);
|
||||||
perf_evsel__reset_sample_bit(pos, CALLCHAIN);
|
evsel__reset_sample_bit(pos, CALLCHAIN);
|
||||||
perf_evsel__reset_sample_bit(pos, ADDR);
|
evsel__reset_sample_bit(pos, ADDR);
|
||||||
perf_evsel__reset_sample_bit(pos, READ);
|
evsel__reset_sample_bit(pos, READ);
|
||||||
attr->mmap = 0;
|
attr->mmap = 0;
|
||||||
attr->comm = 0;
|
attr->comm = 0;
|
||||||
attr->task = 0;
|
attr->task = 0;
|
||||||
|
@ -280,7 +280,7 @@ static int test1(struct evsel *evsel, struct machine *machine)
|
|||||||
|
|
||||||
symbol_conf.use_callchain = false;
|
symbol_conf.use_callchain = false;
|
||||||
symbol_conf.cumulate_callchain = false;
|
symbol_conf.cumulate_callchain = false;
|
||||||
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
|
evsel__reset_sample_bit(evsel, CALLCHAIN);
|
||||||
|
|
||||||
setup_sorting(NULL);
|
setup_sorting(NULL);
|
||||||
callchain_register_param(&callchain_param);
|
callchain_register_param(&callchain_param);
|
||||||
@ -427,7 +427,7 @@ static int test2(struct evsel *evsel, struct machine *machine)
|
|||||||
|
|
||||||
symbol_conf.use_callchain = true;
|
symbol_conf.use_callchain = true;
|
||||||
symbol_conf.cumulate_callchain = false;
|
symbol_conf.cumulate_callchain = false;
|
||||||
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
|
evsel__set_sample_bit(evsel, CALLCHAIN);
|
||||||
|
|
||||||
setup_sorting(NULL);
|
setup_sorting(NULL);
|
||||||
callchain_register_param(&callchain_param);
|
callchain_register_param(&callchain_param);
|
||||||
@ -485,7 +485,7 @@ static int test3(struct evsel *evsel, struct machine *machine)
|
|||||||
|
|
||||||
symbol_conf.use_callchain = false;
|
symbol_conf.use_callchain = false;
|
||||||
symbol_conf.cumulate_callchain = true;
|
symbol_conf.cumulate_callchain = true;
|
||||||
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
|
evsel__reset_sample_bit(evsel, CALLCHAIN);
|
||||||
|
|
||||||
setup_sorting(NULL);
|
setup_sorting(NULL);
|
||||||
callchain_register_param(&callchain_param);
|
callchain_register_param(&callchain_param);
|
||||||
@ -669,7 +669,7 @@ static int test4(struct evsel *evsel, struct machine *machine)
|
|||||||
|
|
||||||
symbol_conf.use_callchain = true;
|
symbol_conf.use_callchain = true;
|
||||||
symbol_conf.cumulate_callchain = true;
|
symbol_conf.cumulate_callchain = true;
|
||||||
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
|
evsel__set_sample_bit(evsel, CALLCHAIN);
|
||||||
|
|
||||||
setup_sorting(NULL);
|
setup_sorting(NULL);
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
|||||||
}
|
}
|
||||||
|
|
||||||
evsels[i]->core.attr.wakeup_events = 1;
|
evsels[i]->core.attr.wakeup_events = 1;
|
||||||
perf_evsel__set_sample_id(evsels[i], false);
|
evsel__set_sample_id(evsels[i], false);
|
||||||
|
|
||||||
evlist__add(evlist, evsels[i]);
|
evlist__add(evlist, evsels[i]);
|
||||||
|
|
||||||
|
@ -106,9 +106,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
|||||||
* Config the evsels, setting attr->comm on the first one, etc.
|
* Config the evsels, setting attr->comm on the first one, etc.
|
||||||
*/
|
*/
|
||||||
evsel = evlist__first(evlist);
|
evsel = evlist__first(evlist);
|
||||||
perf_evsel__set_sample_bit(evsel, CPU);
|
evsel__set_sample_bit(evsel, CPU);
|
||||||
perf_evsel__set_sample_bit(evsel, TID);
|
evsel__set_sample_bit(evsel, TID);
|
||||||
perf_evsel__set_sample_bit(evsel, TIME);
|
evsel__set_sample_bit(evsel, TIME);
|
||||||
perf_evlist__config(evlist, &opts, NULL);
|
perf_evlist__config(evlist, &opts, NULL);
|
||||||
|
|
||||||
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
|
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
|
||||||
|
@ -394,8 +394,8 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||||||
|
|
||||||
switch_evsel = evlist__last(evlist);
|
switch_evsel = evlist__last(evlist);
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(switch_evsel, CPU);
|
evsel__set_sample_bit(switch_evsel, CPU);
|
||||||
perf_evsel__set_sample_bit(switch_evsel, TIME);
|
evsel__set_sample_bit(switch_evsel, TIME);
|
||||||
|
|
||||||
switch_evsel->core.system_wide = true;
|
switch_evsel->core.system_wide = true;
|
||||||
switch_evsel->no_aux_samples = true;
|
switch_evsel->no_aux_samples = true;
|
||||||
@ -412,8 +412,8 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(cycles_evsel, CPU);
|
evsel__set_sample_bit(cycles_evsel, CPU);
|
||||||
perf_evsel__set_sample_bit(cycles_evsel, TIME);
|
evsel__set_sample_bit(cycles_evsel, TIME);
|
||||||
|
|
||||||
/* Fourth event */
|
/* Fourth event */
|
||||||
err = parse_events(evlist, "dummy:u", NULL);
|
err = parse_events(evlist, "dummy:u", NULL);
|
||||||
@ -429,7 +429,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||||||
tracking_evsel->core.attr.freq = 0;
|
tracking_evsel->core.attr.freq = 0;
|
||||||
tracking_evsel->core.attr.sample_period = 1;
|
tracking_evsel->core.attr.sample_period = 1;
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(tracking_evsel, TIME);
|
evsel__set_sample_bit(tracking_evsel, TIME);
|
||||||
|
|
||||||
/* Config events */
|
/* Config events */
|
||||||
perf_evlist__config(evlist, &opts, NULL);
|
perf_evlist__config(evlist, &opts, NULL);
|
||||||
|
@ -705,10 +705,10 @@ static int auxtrace_validate_aux_sample_size(struct evlist *evlist,
|
|||||||
pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
|
pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
perf_evsel__set_sample_bit(evsel, AUX);
|
evsel__set_sample_bit(evsel, AUX);
|
||||||
opts->auxtrace_sample_mode = true;
|
opts->auxtrace_sample_mode = true;
|
||||||
} else {
|
} else {
|
||||||
perf_evsel__reset_sample_bit(evsel, AUX);
|
evsel__reset_sample_bit(evsel, AUX);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -948,7 +948,7 @@ void __perf_evlist__set_sample_bit(struct evlist *evlist,
|
|||||||
struct evsel *evsel;
|
struct evsel *evsel;
|
||||||
|
|
||||||
evlist__for_each_entry(evlist, evsel)
|
evlist__for_each_entry(evlist, evsel)
|
||||||
__perf_evsel__set_sample_bit(evsel, bit);
|
__evsel__set_sample_bit(evsel, bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __perf_evlist__reset_sample_bit(struct evlist *evlist,
|
void __perf_evlist__reset_sample_bit(struct evlist *evlist,
|
||||||
@ -957,7 +957,7 @@ void __perf_evlist__reset_sample_bit(struct evlist *evlist,
|
|||||||
struct evsel *evsel;
|
struct evsel *evsel;
|
||||||
|
|
||||||
evlist__for_each_entry(evlist, evsel)
|
evlist__for_each_entry(evlist, evsel)
|
||||||
__perf_evsel__reset_sample_bit(evsel, bit);
|
__evsel__reset_sample_bit(evsel, bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
|
int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
|
||||||
|
@ -184,7 +184,7 @@ void evsel__calc_id_pos(struct evsel *evsel)
|
|||||||
evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
|
evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __perf_evsel__set_sample_bit(struct evsel *evsel,
|
void __evsel__set_sample_bit(struct evsel *evsel,
|
||||||
enum perf_event_sample_format bit)
|
enum perf_event_sample_format bit)
|
||||||
{
|
{
|
||||||
if (!(evsel->core.attr.sample_type & bit)) {
|
if (!(evsel->core.attr.sample_type & bit)) {
|
||||||
@ -194,7 +194,7 @@ void __perf_evsel__set_sample_bit(struct evsel *evsel,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __perf_evsel__reset_sample_bit(struct evsel *evsel,
|
void __evsel__reset_sample_bit(struct evsel *evsel,
|
||||||
enum perf_event_sample_format bit)
|
enum perf_event_sample_format bit)
|
||||||
{
|
{
|
||||||
if (evsel->core.attr.sample_type & bit) {
|
if (evsel->core.attr.sample_type & bit) {
|
||||||
@ -204,14 +204,14 @@ void __perf_evsel__reset_sample_bit(struct evsel *evsel,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_evsel__set_sample_id(struct evsel *evsel,
|
void evsel__set_sample_id(struct evsel *evsel,
|
||||||
bool can_sample_identifier)
|
bool can_sample_identifier)
|
||||||
{
|
{
|
||||||
if (can_sample_identifier) {
|
if (can_sample_identifier) {
|
||||||
perf_evsel__reset_sample_bit(evsel, ID);
|
evsel__reset_sample_bit(evsel, ID);
|
||||||
perf_evsel__set_sample_bit(evsel, IDENTIFIER);
|
evsel__set_sample_bit(evsel, IDENTIFIER);
|
||||||
} else {
|
} else {
|
||||||
perf_evsel__set_sample_bit(evsel, ID);
|
evsel__set_sample_bit(evsel, ID);
|
||||||
}
|
}
|
||||||
evsel->core.attr.read_format |= PERF_FORMAT_ID;
|
evsel->core.attr.read_format |= PERF_FORMAT_ID;
|
||||||
}
|
}
|
||||||
@ -689,7 +689,7 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
|
|||||||
bool function = perf_evsel__is_function_event(evsel);
|
bool function = perf_evsel__is_function_event(evsel);
|
||||||
struct perf_event_attr *attr = &evsel->core.attr;
|
struct perf_event_attr *attr = &evsel->core.attr;
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
|
evsel__set_sample_bit(evsel, CALLCHAIN);
|
||||||
|
|
||||||
attr->sample_max_stack = param->max_stack;
|
attr->sample_max_stack = param->max_stack;
|
||||||
|
|
||||||
@ -704,7 +704,7 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
|
|||||||
"to get user callchain information. "
|
"to get user callchain information. "
|
||||||
"Falling back to framepointers.\n");
|
"Falling back to framepointers.\n");
|
||||||
} else {
|
} else {
|
||||||
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
|
evsel__set_sample_bit(evsel, BRANCH_STACK);
|
||||||
attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
|
attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
|
||||||
PERF_SAMPLE_BRANCH_CALL_STACK |
|
PERF_SAMPLE_BRANCH_CALL_STACK |
|
||||||
PERF_SAMPLE_BRANCH_NO_CYCLES |
|
PERF_SAMPLE_BRANCH_NO_CYCLES |
|
||||||
@ -718,8 +718,8 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
|
|||||||
|
|
||||||
if (param->record_mode == CALLCHAIN_DWARF) {
|
if (param->record_mode == CALLCHAIN_DWARF) {
|
||||||
if (!function) {
|
if (!function) {
|
||||||
perf_evsel__set_sample_bit(evsel, REGS_USER);
|
evsel__set_sample_bit(evsel, REGS_USER);
|
||||||
perf_evsel__set_sample_bit(evsel, STACK_USER);
|
evsel__set_sample_bit(evsel, STACK_USER);
|
||||||
if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
|
if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
|
||||||
attr->sample_regs_user |= DWARF_MINIMAL_REGS;
|
attr->sample_regs_user |= DWARF_MINIMAL_REGS;
|
||||||
pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
|
pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
|
||||||
@ -755,16 +755,16 @@ perf_evsel__reset_callgraph(struct evsel *evsel,
|
|||||||
{
|
{
|
||||||
struct perf_event_attr *attr = &evsel->core.attr;
|
struct perf_event_attr *attr = &evsel->core.attr;
|
||||||
|
|
||||||
perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
|
evsel__reset_sample_bit(evsel, CALLCHAIN);
|
||||||
if (param->record_mode == CALLCHAIN_LBR) {
|
if (param->record_mode == CALLCHAIN_LBR) {
|
||||||
perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
||||||
attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
|
attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
|
||||||
PERF_SAMPLE_BRANCH_CALL_STACK |
|
PERF_SAMPLE_BRANCH_CALL_STACK |
|
||||||
PERF_SAMPLE_BRANCH_HW_INDEX);
|
PERF_SAMPLE_BRANCH_HW_INDEX);
|
||||||
}
|
}
|
||||||
if (param->record_mode == CALLCHAIN_DWARF) {
|
if (param->record_mode == CALLCHAIN_DWARF) {
|
||||||
perf_evsel__reset_sample_bit(evsel, REGS_USER);
|
evsel__reset_sample_bit(evsel, REGS_USER);
|
||||||
perf_evsel__reset_sample_bit(evsel, STACK_USER);
|
evsel__reset_sample_bit(evsel, STACK_USER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -788,32 +788,32 @@ static void apply_config_terms(struct evsel *evsel,
|
|||||||
if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
|
if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
|
||||||
attr->sample_period = term->val.period;
|
attr->sample_period = term->val.period;
|
||||||
attr->freq = 0;
|
attr->freq = 0;
|
||||||
perf_evsel__reset_sample_bit(evsel, PERIOD);
|
evsel__reset_sample_bit(evsel, PERIOD);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case PERF_EVSEL__CONFIG_TERM_FREQ:
|
case PERF_EVSEL__CONFIG_TERM_FREQ:
|
||||||
if (!(term->weak && opts->user_freq != UINT_MAX)) {
|
if (!(term->weak && opts->user_freq != UINT_MAX)) {
|
||||||
attr->sample_freq = term->val.freq;
|
attr->sample_freq = term->val.freq;
|
||||||
attr->freq = 1;
|
attr->freq = 1;
|
||||||
perf_evsel__set_sample_bit(evsel, PERIOD);
|
evsel__set_sample_bit(evsel, PERIOD);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case PERF_EVSEL__CONFIG_TERM_TIME:
|
case PERF_EVSEL__CONFIG_TERM_TIME:
|
||||||
if (term->val.time)
|
if (term->val.time)
|
||||||
perf_evsel__set_sample_bit(evsel, TIME);
|
evsel__set_sample_bit(evsel, TIME);
|
||||||
else
|
else
|
||||||
perf_evsel__reset_sample_bit(evsel, TIME);
|
evsel__reset_sample_bit(evsel, TIME);
|
||||||
break;
|
break;
|
||||||
case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
|
case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
|
||||||
callgraph_buf = term->val.str;
|
callgraph_buf = term->val.str;
|
||||||
break;
|
break;
|
||||||
case PERF_EVSEL__CONFIG_TERM_BRANCH:
|
case PERF_EVSEL__CONFIG_TERM_BRANCH:
|
||||||
if (term->val.str && strcmp(term->val.str, "no")) {
|
if (term->val.str && strcmp(term->val.str, "no")) {
|
||||||
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
|
evsel__set_sample_bit(evsel, BRANCH_STACK);
|
||||||
parse_branch_str(term->val.str,
|
parse_branch_str(term->val.str,
|
||||||
&attr->branch_sample_type);
|
&attr->branch_sample_type);
|
||||||
} else
|
} else
|
||||||
perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
||||||
break;
|
break;
|
||||||
case PERF_EVSEL__CONFIG_TERM_STACK_USER:
|
case PERF_EVSEL__CONFIG_TERM_STACK_USER:
|
||||||
dump_size = term->val.stack_user;
|
dump_size = term->val.stack_user;
|
||||||
@ -892,8 +892,8 @@ static void apply_config_terms(struct evsel *evsel,
|
|||||||
/* set perf-event callgraph */
|
/* set perf-event callgraph */
|
||||||
if (param.enabled) {
|
if (param.enabled) {
|
||||||
if (sample_address) {
|
if (sample_address) {
|
||||||
perf_evsel__set_sample_bit(evsel, ADDR);
|
evsel__set_sample_bit(evsel, ADDR);
|
||||||
perf_evsel__set_sample_bit(evsel, DATA_SRC);
|
evsel__set_sample_bit(evsel, DATA_SRC);
|
||||||
evsel->core.attr.mmap_data = track;
|
evsel->core.attr.mmap_data = track;
|
||||||
}
|
}
|
||||||
evsel__config_callchain(evsel, opts, ¶m);
|
evsel__config_callchain(evsel, opts, ¶m);
|
||||||
@ -960,17 +960,17 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
attr->inherit = !opts->no_inherit;
|
attr->inherit = !opts->no_inherit;
|
||||||
attr->write_backward = opts->overwrite ? 1 : 0;
|
attr->write_backward = opts->overwrite ? 1 : 0;
|
||||||
|
|
||||||
perf_evsel__set_sample_bit(evsel, IP);
|
evsel__set_sample_bit(evsel, IP);
|
||||||
perf_evsel__set_sample_bit(evsel, TID);
|
evsel__set_sample_bit(evsel, TID);
|
||||||
|
|
||||||
if (evsel->sample_read) {
|
if (evsel->sample_read) {
|
||||||
perf_evsel__set_sample_bit(evsel, READ);
|
evsel__set_sample_bit(evsel, READ);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need ID even in case of single event, because
|
* We need ID even in case of single event, because
|
||||||
* PERF_SAMPLE_READ process ID specific data.
|
* PERF_SAMPLE_READ process ID specific data.
|
||||||
*/
|
*/
|
||||||
perf_evsel__set_sample_id(evsel, false);
|
evsel__set_sample_id(evsel, false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Apply group format only if we belong to group
|
* Apply group format only if we belong to group
|
||||||
@ -989,7 +989,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
|
if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
|
||||||
opts->user_interval != ULLONG_MAX)) {
|
opts->user_interval != ULLONG_MAX)) {
|
||||||
if (opts->freq) {
|
if (opts->freq) {
|
||||||
perf_evsel__set_sample_bit(evsel, PERIOD);
|
evsel__set_sample_bit(evsel, PERIOD);
|
||||||
attr->freq = 1;
|
attr->freq = 1;
|
||||||
attr->sample_freq = opts->freq;
|
attr->sample_freq = opts->freq;
|
||||||
} else {
|
} else {
|
||||||
@ -1009,7 +1009,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (opts->sample_address) {
|
if (opts->sample_address) {
|
||||||
perf_evsel__set_sample_bit(evsel, ADDR);
|
evsel__set_sample_bit(evsel, ADDR);
|
||||||
attr->mmap_data = track;
|
attr->mmap_data = track;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1026,16 +1026,16 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
|
|
||||||
if (opts->sample_intr_regs) {
|
if (opts->sample_intr_regs) {
|
||||||
attr->sample_regs_intr = opts->sample_intr_regs;
|
attr->sample_regs_intr = opts->sample_intr_regs;
|
||||||
perf_evsel__set_sample_bit(evsel, REGS_INTR);
|
evsel__set_sample_bit(evsel, REGS_INTR);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts->sample_user_regs) {
|
if (opts->sample_user_regs) {
|
||||||
attr->sample_regs_user |= opts->sample_user_regs;
|
attr->sample_regs_user |= opts->sample_user_regs;
|
||||||
perf_evsel__set_sample_bit(evsel, REGS_USER);
|
evsel__set_sample_bit(evsel, REGS_USER);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (target__has_cpu(&opts->target) || opts->sample_cpu)
|
if (target__has_cpu(&opts->target) || opts->sample_cpu)
|
||||||
perf_evsel__set_sample_bit(evsel, CPU);
|
evsel__set_sample_bit(evsel, CPU);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When the user explicitly disabled time don't force it here.
|
* When the user explicitly disabled time don't force it here.
|
||||||
@ -1044,31 +1044,31 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
(!perf_missing_features.sample_id_all &&
|
(!perf_missing_features.sample_id_all &&
|
||||||
(!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
|
(!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
|
||||||
opts->sample_time_set)))
|
opts->sample_time_set)))
|
||||||
perf_evsel__set_sample_bit(evsel, TIME);
|
evsel__set_sample_bit(evsel, TIME);
|
||||||
|
|
||||||
if (opts->raw_samples && !evsel->no_aux_samples) {
|
if (opts->raw_samples && !evsel->no_aux_samples) {
|
||||||
perf_evsel__set_sample_bit(evsel, TIME);
|
evsel__set_sample_bit(evsel, TIME);
|
||||||
perf_evsel__set_sample_bit(evsel, RAW);
|
evsel__set_sample_bit(evsel, RAW);
|
||||||
perf_evsel__set_sample_bit(evsel, CPU);
|
evsel__set_sample_bit(evsel, CPU);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts->sample_address)
|
if (opts->sample_address)
|
||||||
perf_evsel__set_sample_bit(evsel, DATA_SRC);
|
evsel__set_sample_bit(evsel, DATA_SRC);
|
||||||
|
|
||||||
if (opts->sample_phys_addr)
|
if (opts->sample_phys_addr)
|
||||||
perf_evsel__set_sample_bit(evsel, PHYS_ADDR);
|
evsel__set_sample_bit(evsel, PHYS_ADDR);
|
||||||
|
|
||||||
if (opts->no_buffering) {
|
if (opts->no_buffering) {
|
||||||
attr->watermark = 0;
|
attr->watermark = 0;
|
||||||
attr->wakeup_events = 1;
|
attr->wakeup_events = 1;
|
||||||
}
|
}
|
||||||
if (opts->branch_stack && !evsel->no_aux_samples) {
|
if (opts->branch_stack && !evsel->no_aux_samples) {
|
||||||
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
|
evsel__set_sample_bit(evsel, BRANCH_STACK);
|
||||||
attr->branch_sample_type = opts->branch_stack;
|
attr->branch_sample_type = opts->branch_stack;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts->sample_weight)
|
if (opts->sample_weight)
|
||||||
perf_evsel__set_sample_bit(evsel, WEIGHT);
|
evsel__set_sample_bit(evsel, WEIGHT);
|
||||||
|
|
||||||
attr->task = track;
|
attr->task = track;
|
||||||
attr->mmap = track;
|
attr->mmap = track;
|
||||||
@ -1082,14 +1082,14 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
|
|
||||||
if (opts->record_cgroup) {
|
if (opts->record_cgroup) {
|
||||||
attr->cgroup = track && !perf_missing_features.cgroup;
|
attr->cgroup = track && !perf_missing_features.cgroup;
|
||||||
perf_evsel__set_sample_bit(evsel, CGROUP);
|
evsel__set_sample_bit(evsel, CGROUP);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts->record_switch_events)
|
if (opts->record_switch_events)
|
||||||
attr->context_switch = track;
|
attr->context_switch = track;
|
||||||
|
|
||||||
if (opts->sample_transaction)
|
if (opts->sample_transaction)
|
||||||
perf_evsel__set_sample_bit(evsel, TRANSACTION);
|
evsel__set_sample_bit(evsel, TRANSACTION);
|
||||||
|
|
||||||
if (opts->running_time) {
|
if (opts->running_time) {
|
||||||
evsel->core.attr.read_format |=
|
evsel->core.attr.read_format |=
|
||||||
@ -1152,9 +1152,9 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
/* The --period option takes the precedence. */
|
/* The --period option takes the precedence. */
|
||||||
if (opts->period_set) {
|
if (opts->period_set) {
|
||||||
if (opts->period)
|
if (opts->period)
|
||||||
perf_evsel__set_sample_bit(evsel, PERIOD);
|
evsel__set_sample_bit(evsel, PERIOD);
|
||||||
else
|
else
|
||||||
perf_evsel__reset_sample_bit(evsel, PERIOD);
|
evsel__reset_sample_bit(evsel, PERIOD);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1163,7 +1163,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
|||||||
* if BRANCH_STACK bit is set.
|
* if BRANCH_STACK bit is set.
|
||||||
*/
|
*/
|
||||||
if (opts->initial_delay && is_dummy_event(evsel))
|
if (opts->initial_delay && is_dummy_event(evsel))
|
||||||
perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
evsel__reset_sample_bit(evsel, BRANCH_STACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
int perf_evsel__set_filter(struct evsel *evsel, const char *filter)
|
int perf_evsel__set_filter(struct evsel *evsel, const char *filter)
|
||||||
|
@ -214,19 +214,16 @@ const char *evsel__name(struct evsel *evsel);
|
|||||||
const char *evsel__group_name(struct evsel *evsel);
|
const char *evsel__group_name(struct evsel *evsel);
|
||||||
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
|
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
|
||||||
|
|
||||||
void __perf_evsel__set_sample_bit(struct evsel *evsel,
|
void __evsel__set_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
|
||||||
enum perf_event_sample_format bit);
|
void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
|
||||||
void __perf_evsel__reset_sample_bit(struct evsel *evsel,
|
|
||||||
enum perf_event_sample_format bit);
|
|
||||||
|
|
||||||
#define perf_evsel__set_sample_bit(evsel, bit) \
|
#define evsel__set_sample_bit(evsel, bit) \
|
||||||
__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
|
__evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
|
||||||
|
|
||||||
#define perf_evsel__reset_sample_bit(evsel, bit) \
|
#define evsel__reset_sample_bit(evsel, bit) \
|
||||||
__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
|
__evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
|
||||||
|
|
||||||
void perf_evsel__set_sample_id(struct evsel *evsel,
|
void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
|
||||||
bool use_sample_identifier);
|
|
||||||
|
|
||||||
int perf_evsel__set_filter(struct evsel *evsel, const char *filter);
|
int perf_evsel__set_filter(struct evsel *evsel, const char *filter);
|
||||||
int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter);
|
int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter);
|
||||||
|
@ -123,7 +123,7 @@ void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
|
|||||||
|
|
||||||
if (sample_id) {
|
if (sample_id) {
|
||||||
evlist__for_each_entry(evlist, evsel)
|
evlist__for_each_entry(evlist, evsel)
|
||||||
perf_evsel__set_sample_id(evsel, use_sample_identifier);
|
evsel__set_sample_id(evsel, use_sample_identifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_evlist__set_id_pos(evlist);
|
perf_evlist__set_id_pos(evlist);
|
||||||
|
@ -108,7 +108,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist, struct target *target)
|
|||||||
bool can_sample_identifier = perf_can_sample_identifier();
|
bool can_sample_identifier = perf_can_sample_identifier();
|
||||||
|
|
||||||
evlist__for_each_entry(evlist, counter)
|
evlist__for_each_entry(evlist, counter)
|
||||||
perf_evsel__set_sample_id(counter, can_sample_identifier);
|
evsel__set_sample_id(counter, can_sample_identifier);
|
||||||
|
|
||||||
perf_evlist__set_id_pos(evlist);
|
perf_evlist__set_id_pos(evlist);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user