forked from Minki/linux
41e9a8046c
$ trace_event tests attaching BPF program to HW_CPU_CYCLES, SW_CPU_CLOCK, HW_CACHE_L1D and other events. It runs 'dd' in the background while bpf program collects user and kernel stack trace on counter overflow. User space expects to see sys_read and sys_write in the kernel stack. $ tracex6 tests reading of various perf counters from BPF program. Both tests were refactored to increase coverage and be more accurate. Signed-off-by: Teng Qin <qinteng@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
42 lines
912 B
C
42 lines
912 B
C
#include <linux/ptrace.h>
|
|
#include <linux/version.h>
|
|
#include <uapi/linux/bpf.h>
|
|
#include "bpf_helpers.h"
|
|
|
|
struct bpf_map_def SEC("maps") counters = {
|
|
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
|
.key_size = sizeof(int),
|
|
.value_size = sizeof(u32),
|
|
.max_entries = 64,
|
|
};
|
|
struct bpf_map_def SEC("maps") values = {
|
|
.type = BPF_MAP_TYPE_HASH,
|
|
.key_size = sizeof(int),
|
|
.value_size = sizeof(u64),
|
|
.max_entries = 64,
|
|
};
|
|
|
|
SEC("kprobe/htab_map_get_next_key")
|
|
int bpf_prog1(struct pt_regs *ctx)
|
|
{
|
|
u32 key = bpf_get_smp_processor_id();
|
|
u64 count, *val;
|
|
s64 error;
|
|
|
|
count = bpf_perf_event_read(&counters, key);
|
|
error = (s64)count;
|
|
if (error <= -2 && error >= -22)
|
|
return 0;
|
|
|
|
val = bpf_map_lookup_elem(&values, &key);
|
|
if (val)
|
|
*val = count;
|
|
else
|
|
bpf_map_update_elem(&values, &key, &count, BPF_NOEXIST);
|
|
|
|
return 0;
|
|
}
|
|
|
|
char _license[] SEC("license") = "GPL";
|
|
u32 _version SEC("version") = LINUX_VERSION_CODE;
|