For BPF_PROG_TYPE_TRACING, the bpf_prog's ctx is an array of u64. This patch borrows the idea from BPF_CALL_x in filter.h to convert a u64 to the arg type of the traced function. The new BPF_TRACE_x has an arg to specify the return type of a bpf_prog. It will be used in the future TCP-ops bpf_prog that may return "void". The new macros are defined in the new header file "bpf_trace_helpers.h". It is under selftests/bpf/ for now. It could be moved to libbpf later after seeing more upcoming non-tracing use cases. The tests are changed to use these new macros also. Hence, the k[s]u8/16/32/64 are no longer needed and they are removed from the bpf_helpers.h. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20191123202504.1502696-1-kafai@fb.com
83 lines
1.9 KiB
C
83 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2019 Facebook */
|
|
#include <linux/bpf.h>
|
|
#include "bpf_helpers.h"
|
|
#include "bpf_trace_helpers.h"
|
|
|
|
struct sk_buff {
|
|
unsigned int len;
|
|
};
|
|
|
|
static volatile __u64 test_result;
|
|
BPF_TRACE_2("fexit/test_pkt_access", test_main,
|
|
struct sk_buff *, skb, int, ret)
|
|
{
|
|
int len;
|
|
|
|
__builtin_preserve_access_index(({
|
|
len = skb->len;
|
|
}));
|
|
if (len != 74 || ret != 0)
|
|
return 0;
|
|
test_result = 1;
|
|
return 0;
|
|
}
|
|
|
|
static volatile __u64 test_result_subprog1;
|
|
BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1,
|
|
struct sk_buff *, skb, int, ret)
|
|
{
|
|
int len;
|
|
|
|
__builtin_preserve_access_index(({
|
|
len = skb->len;
|
|
}));
|
|
if (len != 74 || ret != 148)
|
|
return 0;
|
|
test_result_subprog1 = 1;
|
|
return 0;
|
|
}
|
|
|
|
/* Though test_pkt_access_subprog2() is defined in C as:
|
|
* static __attribute__ ((noinline))
|
|
* int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
|
|
* {
|
|
* return skb->len * val;
|
|
* }
|
|
* llvm optimizations remove 'int val' argument and generate BPF assembly:
|
|
* r0 = *(u32 *)(r1 + 0)
|
|
* w0 <<= 1
|
|
* exit
|
|
* In such case the verifier falls back to conservative and
|
|
* tracing program can access arguments and return value as u64
|
|
* instead of accurate types.
|
|
*/
|
|
struct args_subprog2 {
|
|
__u64 args[5];
|
|
__u64 ret;
|
|
};
|
|
static volatile __u64 test_result_subprog2;
|
|
SEC("fexit/test_pkt_access_subprog2")
|
|
int test_subprog2(struct args_subprog2 *ctx)
|
|
{
|
|
struct sk_buff *skb = (void *)ctx->args[0];
|
|
__u64 ret;
|
|
int len;
|
|
|
|
bpf_probe_read_kernel(&len, sizeof(len),
|
|
__builtin_preserve_access_index(&skb->len));
|
|
|
|
ret = ctx->ret;
|
|
/* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
|
|
* which randomizes upper 32 bits after BPF_ALU32 insns.
|
|
* Hence after 'w0 <<= 1' upper bits of $rax are random.
|
|
* That is expected and correct. Trim them.
|
|
*/
|
|
ret = (__u32) ret;
|
|
if (len != 74 || ret != 148)
|
|
return 0;
|
|
test_result_subprog2 = 1;
|
|
return 0;
|
|
}
|
|
char _license[] SEC("license") = "GPL";
|