mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 07:01:57 +00:00
7cf245a37e
Fix all files in samples/bpf to include libbpf header files with the bpf/
prefix, to be consistent with external users of the library. Also ensure
that all includes of exported libbpf header files (those that are exported
on 'make install' of the library) use bracketed includes instead of quoted.
To make sure no new files are introduced that doesn't include the bpf/
prefix in its include, remove tools/lib/bpf from the include path entirely,
and use tools/lib instead.
Fixes: 6910d7d386
("selftests/bpf: Ensure bpf_helper_defs.h are taken from selftests dir")
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/157952560911.1683545.8795966751309534150.stgit@toke.dk
258 lines
6.6 KiB
C
258 lines
6.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0
|
|
* Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
|
|
*
|
|
* XDP monitor tool, based on tracepoints
|
|
*/
|
|
#include <uapi/linux/bpf.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
struct bpf_map_def SEC("maps") redirect_err_cnt = {
|
|
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(u64),
|
|
.max_entries = 2,
|
|
/* TODO: have entries for all possible errno's */
|
|
};
|
|
|
|
#define XDP_UNKNOWN XDP_REDIRECT + 1
|
|
struct bpf_map_def SEC("maps") exception_cnt = {
|
|
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(u64),
|
|
.max_entries = XDP_UNKNOWN + 1,
|
|
};
|
|
|
|
/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
*/
|
|
struct xdp_redirect_ctx {
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
int prog_id; // offset:8; size:4; signed:1;
|
|
u32 act; // offset:12 size:4; signed:0;
|
|
int ifindex; // offset:16 size:4; signed:1;
|
|
int err; // offset:20 size:4; signed:1;
|
|
int to_ifindex; // offset:24 size:4; signed:1;
|
|
u32 map_id; // offset:28 size:4; signed:0;
|
|
int map_index; // offset:32 size:4; signed:1;
|
|
}; // offset:36
|
|
|
|
enum {
|
|
XDP_REDIRECT_SUCCESS = 0,
|
|
XDP_REDIRECT_ERROR = 1
|
|
};
|
|
|
|
static __always_inline
|
|
int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
u32 key = XDP_REDIRECT_ERROR;
|
|
int err = ctx->err;
|
|
u64 *cnt;
|
|
|
|
if (!err)
|
|
key = XDP_REDIRECT_SUCCESS;
|
|
|
|
cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key);
|
|
if (!cnt)
|
|
return 1;
|
|
*cnt += 1;
|
|
|
|
return 0; /* Indicate event was filtered (no further processing)*/
|
|
/*
|
|
* Returning 1 here would allow e.g. a perf-record tracepoint
|
|
* to see and record these events, but it doesn't work well
|
|
* in-practice as stopping perf-record also unload this
|
|
* bpf_prog. Plus, there is additional overhead of doing so.
|
|
*/
|
|
}
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect_err")
|
|
int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect_map_err")
|
|
int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|
|
|
|
/* Likely unloaded when prog starts */
|
|
SEC("tracepoint/xdp/xdp_redirect")
|
|
int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|
|
|
|
/* Likely unloaded when prog starts */
|
|
SEC("tracepoint/xdp/xdp_redirect_map")
|
|
int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|
|
|
|
/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
*/
|
|
struct xdp_exception_ctx {
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
int prog_id; // offset:8; size:4; signed:1;
|
|
u32 act; // offset:12; size:4; signed:0;
|
|
int ifindex; // offset:16; size:4; signed:1;
|
|
};
|
|
|
|
SEC("tracepoint/xdp/xdp_exception")
|
|
int trace_xdp_exception(struct xdp_exception_ctx *ctx)
|
|
{
|
|
u64 *cnt;
|
|
u32 key;
|
|
|
|
key = ctx->act;
|
|
if (key > XDP_REDIRECT)
|
|
key = XDP_UNKNOWN;
|
|
|
|
cnt = bpf_map_lookup_elem(&exception_cnt, &key);
|
|
if (!cnt)
|
|
return 1;
|
|
*cnt += 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Common stats data record shared with _user.c */
|
|
struct datarec {
|
|
u64 processed;
|
|
u64 dropped;
|
|
u64 info;
|
|
u64 err;
|
|
};
|
|
#define MAX_CPUS 64
|
|
|
|
struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
|
|
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(struct datarec),
|
|
.max_entries = MAX_CPUS,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
|
|
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(struct datarec),
|
|
.max_entries = 1,
|
|
};
|
|
|
|
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
*/
|
|
struct cpumap_enqueue_ctx {
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
int map_id; // offset:8; size:4; signed:1;
|
|
u32 act; // offset:12; size:4; signed:0;
|
|
int cpu; // offset:16; size:4; signed:1;
|
|
unsigned int drops; // offset:20; size:4; signed:0;
|
|
unsigned int processed; // offset:24; size:4; signed:0;
|
|
int to_cpu; // offset:28; size:4; signed:1;
|
|
};
|
|
|
|
SEC("tracepoint/xdp/xdp_cpumap_enqueue")
|
|
int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
|
|
{
|
|
u32 to_cpu = ctx->to_cpu;
|
|
struct datarec *rec;
|
|
|
|
if (to_cpu >= MAX_CPUS)
|
|
return 1;
|
|
|
|
rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
|
|
if (!rec)
|
|
return 0;
|
|
rec->processed += ctx->processed;
|
|
rec->dropped += ctx->drops;
|
|
|
|
/* Record bulk events, then userspace can calc average bulk size */
|
|
if (ctx->processed > 0)
|
|
rec->info += 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
*/
|
|
struct cpumap_kthread_ctx {
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
int map_id; // offset:8; size:4; signed:1;
|
|
u32 act; // offset:12; size:4; signed:0;
|
|
int cpu; // offset:16; size:4; signed:1;
|
|
unsigned int drops; // offset:20; size:4; signed:0;
|
|
unsigned int processed; // offset:24; size:4; signed:0;
|
|
int sched; // offset:28; size:4; signed:1;
|
|
};
|
|
|
|
SEC("tracepoint/xdp/xdp_cpumap_kthread")
|
|
int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
|
|
{
|
|
struct datarec *rec;
|
|
u32 key = 0;
|
|
|
|
rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
|
|
if (!rec)
|
|
return 0;
|
|
rec->processed += ctx->processed;
|
|
rec->dropped += ctx->drops;
|
|
|
|
/* Count times kthread yielded CPU via schedule call */
|
|
if (ctx->sched)
|
|
rec->info++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct bpf_map_def SEC("maps") devmap_xmit_cnt = {
|
|
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(struct datarec),
|
|
.max_entries = 1,
|
|
};
|
|
|
|
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
*/
|
|
struct devmap_xmit_ctx {
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
int from_ifindex; // offset:8; size:4; signed:1;
|
|
u32 act; // offset:12; size:4; signed:0;
|
|
int to_ifindex; // offset:16; size:4; signed:1;
|
|
int drops; // offset:20; size:4; signed:1;
|
|
int sent; // offset:24; size:4; signed:1;
|
|
int err; // offset:28; size:4; signed:1;
|
|
};
|
|
|
|
SEC("tracepoint/xdp/xdp_devmap_xmit")
|
|
int trace_xdp_devmap_xmit(struct devmap_xmit_ctx *ctx)
|
|
{
|
|
struct datarec *rec;
|
|
u32 key = 0;
|
|
|
|
rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &key);
|
|
if (!rec)
|
|
return 0;
|
|
rec->processed += ctx->sent;
|
|
rec->dropped += ctx->drops;
|
|
|
|
/* Record bulk events, then userspace can calc average bulk size */
|
|
rec->info += 1;
|
|
|
|
/* Record error cases, where no frame were sent */
|
|
if (ctx->err)
|
|
rec->err++;
|
|
|
|
/* Catch API error of drv ndo_xdp_xmit sent more than count */
|
|
if (ctx->drops < 0)
|
|
rec->err++;
|
|
|
|
return 1;
|
|
}
|