bpf: Count the number of times recursion was prevented

Add per-program counter for number of times recursion prevention mechanism
was triggered and expose it via show_fdinfo and bpf_prog_info.
Teach bpftool to print it.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210210033634.62081-7-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2021-02-09 19:36:31 -08:00 committed by Daniel Borkmann
parent 406c557edc
commit 9ed9e9ba23
6 changed files with 33 additions and 6 deletions

View File

@ -543,6 +543,7 @@ struct bpf_binary_header {
struct bpf_prog_stats { struct bpf_prog_stats {
u64 cnt; u64 cnt;
u64 nsecs; u64 nsecs;
u64 misses;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64)); } __aligned(2 * sizeof(u64));

View File

@ -4501,6 +4501,7 @@ struct bpf_prog_info {
__aligned_u64 prog_tags; __aligned_u64 prog_tags;
__u64 run_time_ns; __u64 run_time_ns;
__u64 run_cnt; __u64 run_cnt;
__u64 recursion_misses;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
struct bpf_map_info { struct bpf_map_info {

View File

@ -1731,25 +1731,28 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
static void bpf_prog_get_stats(const struct bpf_prog *prog, static void bpf_prog_get_stats(const struct bpf_prog *prog,
struct bpf_prog_stats *stats) struct bpf_prog_stats *stats)
{ {
u64 nsecs = 0, cnt = 0; u64 nsecs = 0, cnt = 0, misses = 0;
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const struct bpf_prog_stats *st; const struct bpf_prog_stats *st;
unsigned int start; unsigned int start;
u64 tnsecs, tcnt; u64 tnsecs, tcnt, tmisses;
st = per_cpu_ptr(prog->stats, cpu); st = per_cpu_ptr(prog->stats, cpu);
do { do {
start = u64_stats_fetch_begin_irq(&st->syncp); start = u64_stats_fetch_begin_irq(&st->syncp);
tnsecs = st->nsecs; tnsecs = st->nsecs;
tcnt = st->cnt; tcnt = st->cnt;
tmisses = st->misses;
} while (u64_stats_fetch_retry_irq(&st->syncp, start)); } while (u64_stats_fetch_retry_irq(&st->syncp, start));
nsecs += tnsecs; nsecs += tnsecs;
cnt += tcnt; cnt += tcnt;
misses += tmisses;
} }
stats->nsecs = nsecs; stats->nsecs = nsecs;
stats->cnt = cnt; stats->cnt = cnt;
stats->misses = misses;
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
@ -1768,14 +1771,16 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
"memlock:\t%llu\n" "memlock:\t%llu\n"
"prog_id:\t%u\n" "prog_id:\t%u\n"
"run_time_ns:\t%llu\n" "run_time_ns:\t%llu\n"
"run_cnt:\t%llu\n", "run_cnt:\t%llu\n"
"recursion_misses:\t%llu\n",
prog->type, prog->type,
prog->jited, prog->jited,
prog_tag, prog_tag,
prog->pages * 1ULL << PAGE_SHIFT, prog->pages * 1ULL << PAGE_SHIFT,
prog->aux->id, prog->aux->id,
stats.nsecs, stats.nsecs,
stats.cnt); stats.cnt,
stats.misses);
} }
#endif #endif
@ -3438,6 +3443,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
bpf_prog_get_stats(prog, &stats); bpf_prog_get_stats(prog, &stats);
info.run_time_ns = stats.nsecs; info.run_time_ns = stats.nsecs;
info.run_cnt = stats.cnt; info.run_cnt = stats.cnt;
info.recursion_misses = stats.misses;
if (!bpf_capable()) { if (!bpf_capable()) {
info.jited_prog_len = 0; info.jited_prog_len = 0;

View File

@ -394,6 +394,16 @@ static u64 notrace bpf_prog_start_time(void)
return start; return start;
} }
static void notrace inc_misses_counter(struct bpf_prog *prog)
{
struct bpf_prog_stats *stats;
stats = this_cpu_ptr(prog->stats);
u64_stats_update_begin(&stats->syncp);
stats->misses++;
u64_stats_update_end(&stats->syncp);
}
/* The logic is similar to BPF_PROG_RUN, but with an explicit /* The logic is similar to BPF_PROG_RUN, but with an explicit
* rcu_read_lock() and migrate_disable() which are required * rcu_read_lock() and migrate_disable() which are required
* for the trampoline. The macro is split into * for the trampoline. The macro is split into
@ -412,8 +422,10 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
{ {
rcu_read_lock(); rcu_read_lock();
migrate_disable(); migrate_disable();
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
inc_misses_counter(prog);
return 0; return 0;
}
return bpf_prog_start_time(); return bpf_prog_start_time();
} }
@ -451,8 +463,10 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
rcu_read_lock_trace(); rcu_read_lock_trace();
migrate_disable(); migrate_disable();
might_fault(); might_fault();
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
inc_misses_counter(prog);
return 0; return 0;
}
return bpf_prog_start_time(); return bpf_prog_start_time();
} }

View File

@ -368,6 +368,8 @@ static void print_prog_header_json(struct bpf_prog_info *info)
jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns); jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt); jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
} }
if (info->recursion_misses)
jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
} }
static void print_prog_json(struct bpf_prog_info *info, int fd) static void print_prog_json(struct bpf_prog_info *info, int fd)
@ -446,6 +448,8 @@ static void print_prog_header_plain(struct bpf_prog_info *info)
if (info->run_time_ns) if (info->run_time_ns)
printf(" run_time_ns %lld run_cnt %lld", printf(" run_time_ns %lld run_cnt %lld",
info->run_time_ns, info->run_cnt); info->run_time_ns, info->run_cnt);
if (info->recursion_misses)
printf(" recursion_misses %lld", info->recursion_misses);
printf("\n"); printf("\n");
} }

View File

@ -4501,6 +4501,7 @@ struct bpf_prog_info {
__aligned_u64 prog_tags; __aligned_u64 prog_tags;
__u64 run_time_ns; __u64 run_time_ns;
__u64 run_cnt; __u64 run_cnt;
__u64 recursion_misses;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
struct bpf_map_info { struct bpf_map_info {