mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
perf/bpf: Reorder bpf_overflow_handler() ahead of __perf_event_overflow()
This will allow __perf_event_overflow() to call bpf_overflow_handler(). Signed-off-by: Kyle Huey <khuey@kylehuey.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20240412015019.7060-2-khuey@kylehuey.com
This commit is contained in:
parent
acf68d98ca
commit
4c03fe11b9
@ -9563,6 +9563,98 @@ static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *r
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static void bpf_overflow_handler(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct bpf_perf_event_data_kern ctx = {
|
||||
.data = data,
|
||||
.event = event,
|
||||
};
|
||||
struct bpf_prog *prog;
|
||||
int ret = 0;
|
||||
|
||||
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
|
||||
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
prog = READ_ONCE(event->prog);
|
||||
if (prog) {
|
||||
perf_prepare_sample(data, event, regs);
|
||||
ret = bpf_prog_run(prog, &ctx);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
__this_cpu_dec(bpf_prog_active);
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
event->orig_overflow_handler(event, data, regs);
|
||||
}
|
||||
|
||||
static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
struct bpf_prog *prog,
|
||||
u64 bpf_cookie)
|
||||
{
|
||||
if (event->overflow_handler_context)
|
||||
/* hw breakpoint or kernel counter */
|
||||
return -EINVAL;
|
||||
|
||||
if (event->prog)
|
||||
return -EEXIST;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
|
||||
return -EINVAL;
|
||||
|
||||
if (event->attr.precise_ip &&
|
||||
prog->call_get_stack &&
|
||||
(!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
|
||||
event->attr.exclude_callchain_kernel ||
|
||||
event->attr.exclude_callchain_user)) {
|
||||
/*
|
||||
* On perf_event with precise_ip, calling bpf_get_stack()
|
||||
* may trigger unwinder warnings and occasional crashes.
|
||||
* bpf_get_[stack|stackid] works around this issue by using
|
||||
* callchain attached to perf_sample_data. If the
|
||||
* perf_event does not full (kernel and user) callchain
|
||||
* attached to perf_sample_data, do not allow attaching BPF
|
||||
* program that calls bpf_get_[stack|stackid].
|
||||
*/
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
event->prog = prog;
|
||||
event->bpf_cookie = bpf_cookie;
|
||||
event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
|
||||
WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_event_free_bpf_handler(struct perf_event *event)
|
||||
{
|
||||
struct bpf_prog *prog = event->prog;
|
||||
|
||||
if (!prog)
|
||||
return;
|
||||
|
||||
WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
|
||||
event->prog = NULL;
|
||||
bpf_prog_put(prog);
|
||||
}
|
||||
#else
|
||||
static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
struct bpf_prog *prog,
|
||||
u64 bpf_cookie)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void perf_event_free_bpf_handler(struct perf_event *event)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic event overflow handling, sampling.
|
||||
*/
|
||||
@ -10441,97 +10533,6 @@ static void perf_event_free_filter(struct perf_event *event)
|
||||
ftrace_profile_free_filter(event);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static void bpf_overflow_handler(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct bpf_perf_event_data_kern ctx = {
|
||||
.data = data,
|
||||
.event = event,
|
||||
};
|
||||
struct bpf_prog *prog;
|
||||
int ret = 0;
|
||||
|
||||
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
|
||||
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
prog = READ_ONCE(event->prog);
|
||||
if (prog) {
|
||||
perf_prepare_sample(data, event, regs);
|
||||
ret = bpf_prog_run(prog, &ctx);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
__this_cpu_dec(bpf_prog_active);
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
event->orig_overflow_handler(event, data, regs);
|
||||
}
|
||||
|
||||
static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
struct bpf_prog *prog,
|
||||
u64 bpf_cookie)
|
||||
{
|
||||
if (event->overflow_handler_context)
|
||||
/* hw breakpoint or kernel counter */
|
||||
return -EINVAL;
|
||||
|
||||
if (event->prog)
|
||||
return -EEXIST;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
|
||||
return -EINVAL;
|
||||
|
||||
if (event->attr.precise_ip &&
|
||||
prog->call_get_stack &&
|
||||
(!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
|
||||
event->attr.exclude_callchain_kernel ||
|
||||
event->attr.exclude_callchain_user)) {
|
||||
/*
|
||||
* On perf_event with precise_ip, calling bpf_get_stack()
|
||||
* may trigger unwinder warnings and occasional crashes.
|
||||
* bpf_get_[stack|stackid] works around this issue by using
|
||||
* callchain attached to perf_sample_data. If the
|
||||
* perf_event does not full (kernel and user) callchain
|
||||
* attached to perf_sample_data, do not allow attaching BPF
|
||||
* program that calls bpf_get_[stack|stackid].
|
||||
*/
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
event->prog = prog;
|
||||
event->bpf_cookie = bpf_cookie;
|
||||
event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
|
||||
WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_event_free_bpf_handler(struct perf_event *event)
|
||||
{
|
||||
struct bpf_prog *prog = event->prog;
|
||||
|
||||
if (!prog)
|
||||
return;
|
||||
|
||||
WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
|
||||
event->prog = NULL;
|
||||
bpf_prog_put(prog);
|
||||
}
|
||||
#else
|
||||
static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
struct bpf_prog *prog,
|
||||
u64 bpf_cookie)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static void perf_event_free_bpf_handler(struct perf_event *event)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* returns true if the event is a tracepoint, or a kprobe/upprobe created
|
||||
* with perf_event_open()
|
||||
|
Loading…
Reference in New Issue
Block a user