2019-11-14 18:57:20 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2019 Facebook */
|
2020-01-21 00:53:48 +00:00
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/ipv6.h>
|
2019-11-14 18:57:20 +00:00
|
|
|
#include <linux/bpf.h>
|
2020-01-20 13:06:45 +00:00
|
|
|
#include <bpf/bpf_helpers.h>
|
2020-01-21 00:53:48 +00:00
|
|
|
#include <bpf/bpf_endian.h>
|
2020-02-29 23:11:12 +00:00
|
|
|
#include <bpf/bpf_tracing.h>
|
2019-11-14 18:57:20 +00:00
|
|
|
|
|
|
|
struct sk_buff {
|
|
|
|
unsigned int len;
|
|
|
|
};
|
|
|
|
|
2019-11-27 20:06:50 +00:00
|
|
|
__u64 test_result = 0;
|
selftests/bpf: Add BPF_PROG, BPF_KPROBE, and BPF_KRETPROBE macros
Streamline BPF_TRACE_x macro by moving out return type and section attribute
definition out of macro itself. That makes those function look in source code
similar to other BPF programs. Additionally, simplify its usage by determining
number of arguments automatically (so just single BPF_TRACE vs a family of
BPF_TRACE_1, BPF_TRACE_2, etc). Also, allow more natural function argument
syntax without commas inbetween argument type and name.
Given this helper is useful not only for tracing tp_btf/fenty/fexit programs,
but could be used for LSM programs and others following the same pattern,
rename BPF_TRACE macro into more generic BPF_PROG. Existing BPF_TRACE_x
usages in selftests are converted to new BPF_PROG macro.
Following the same pattern, define BPF_KPROBE and BPF_KRETPROBE macros for
nicer usage of kprobe/kretprobe arguments, respectively. BPF_KRETPROBE, adopts
same convention used by fexit programs, that last defined argument is probed
function's return result.
v4->v5:
- fix test_overhead test (__set_task_comm is void) (Alexei);
v3->v4:
- rebased and fixed one more BPF_TRACE_x occurence (Alexei);
v2->v3:
- rename to shorter and as generic BPF_PROG (Alexei);
v1->v2:
- verified GCC handles pragmas as expected;
- added descriptions to macros;
- converted new STRUCT_OPS selftest to BPF_HANDLER (worked as expected);
- added original context as 'ctx' parameter, for cases where it has to be
passed into BPF helpers. This might cause an accidental naming collision,
unfortunately, but at least it's easy to work around. Fortunately, this
situation produces quite legible compilation error:
progs/bpf_dctcp.c:46:6: error: redefinition of 'ctx' with a different type: 'int' vs 'unsigned long long *'
int ctx = 123;
^
progs/bpf_dctcp.c:42:6: note: previous definition is here
void BPF_HANDLER(dctcp_init, struct sock *sk)
^
./bpf_trace_helpers.h:58:32: note: expanded from macro 'BPF_HANDLER'
____##name(unsigned long long *ctx, ##args)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20200110211634.1614739-1-andriin@fb.com
2020-01-10 21:16:34 +00:00
|
|
|
SEC("fexit/test_pkt_access")
|
|
|
|
int BPF_PROG(test_main, struct sk_buff *skb, int ret)
|
2019-11-14 18:57:20 +00:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
__builtin_preserve_access_index(({
|
|
|
|
len = skb->len;
|
|
|
|
}));
|
2019-11-23 20:25:04 +00:00
|
|
|
if (len != 74 || ret != 0)
|
2019-11-14 18:57:20 +00:00
|
|
|
return 0;
|
|
|
|
test_result = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-27 20:06:50 +00:00
|
|
|
__u64 test_result_subprog1 = 0;
|
selftests/bpf: Add BPF_PROG, BPF_KPROBE, and BPF_KRETPROBE macros
Streamline BPF_TRACE_x macro by moving out return type and section attribute
definition out of macro itself. That makes those function look in source code
similar to other BPF programs. Additionally, simplify its usage by determining
number of arguments automatically (so just single BPF_TRACE vs a family of
BPF_TRACE_1, BPF_TRACE_2, etc). Also, allow more natural function argument
syntax without commas inbetween argument type and name.
Given this helper is useful not only for tracing tp_btf/fenty/fexit programs,
but could be used for LSM programs and others following the same pattern,
rename BPF_TRACE macro into more generic BPF_PROG. Existing BPF_TRACE_x
usages in selftests are converted to new BPF_PROG macro.
Following the same pattern, define BPF_KPROBE and BPF_KRETPROBE macros for
nicer usage of kprobe/kretprobe arguments, respectively. BPF_KRETPROBE, adopts
same convention used by fexit programs, that last defined argument is probed
function's return result.
v4->v5:
- fix test_overhead test (__set_task_comm is void) (Alexei);
v3->v4:
- rebased and fixed one more BPF_TRACE_x occurence (Alexei);
v2->v3:
- rename to shorter and as generic BPF_PROG (Alexei);
v1->v2:
- verified GCC handles pragmas as expected;
- added descriptions to macros;
- converted new STRUCT_OPS selftest to BPF_HANDLER (worked as expected);
- added original context as 'ctx' parameter, for cases where it has to be
passed into BPF helpers. This might cause an accidental naming collision,
unfortunately, but at least it's easy to work around. Fortunately, this
situation produces quite legible compilation error:
progs/bpf_dctcp.c:46:6: error: redefinition of 'ctx' with a different type: 'int' vs 'unsigned long long *'
int ctx = 123;
^
progs/bpf_dctcp.c:42:6: note: previous definition is here
void BPF_HANDLER(dctcp_init, struct sock *sk)
^
./bpf_trace_helpers.h:58:32: note: expanded from macro 'BPF_HANDLER'
____##name(unsigned long long *ctx, ##args)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20200110211634.1614739-1-andriin@fb.com
2020-01-10 21:16:34 +00:00
|
|
|
SEC("fexit/test_pkt_access_subprog1")
|
|
|
|
int BPF_PROG(test_subprog1, struct sk_buff *skb, int ret)
|
2019-11-14 18:57:20 +00:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
__builtin_preserve_access_index(({
|
|
|
|
len = skb->len;
|
|
|
|
}));
|
2019-11-23 20:25:04 +00:00
|
|
|
if (len != 74 || ret != 148)
|
2019-11-14 18:57:20 +00:00
|
|
|
return 0;
|
|
|
|
test_result_subprog1 = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Though test_pkt_access_subprog2() is defined in C as:
|
|
|
|
* static __attribute__ ((noinline))
|
|
|
|
* int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb)
|
|
|
|
* {
|
|
|
|
* return skb->len * val;
|
|
|
|
* }
|
|
|
|
* llvm optimizations remove 'int val' argument and generate BPF assembly:
|
|
|
|
* r0 = *(u32 *)(r1 + 0)
|
|
|
|
* w0 <<= 1
|
|
|
|
* exit
|
|
|
|
* In such case the verifier falls back to conservative and
|
|
|
|
* tracing program can access arguments and return value as u64
|
|
|
|
* instead of accurate types.
|
|
|
|
*/
|
|
|
|
struct args_subprog2 {
|
2019-11-23 20:25:04 +00:00
|
|
|
__u64 args[5];
|
|
|
|
__u64 ret;
|
2019-11-14 18:57:20 +00:00
|
|
|
};
|
2019-11-27 20:06:50 +00:00
|
|
|
__u64 test_result_subprog2 = 0;
|
2019-11-14 18:57:20 +00:00
|
|
|
SEC("fexit/test_pkt_access_subprog2")
|
|
|
|
int test_subprog2(struct args_subprog2 *ctx)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = (void *)ctx->args[0];
|
|
|
|
__u64 ret;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
bpf_probe_read_kernel(&len, sizeof(len),
|
|
|
|
__builtin_preserve_access_index(&skb->len));
|
|
|
|
|
|
|
|
ret = ctx->ret;
|
|
|
|
/* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
|
|
|
|
* which randomizes upper 32 bits after BPF_ALU32 insns.
|
|
|
|
* Hence after 'w0 <<= 1' upper bits of $rax are random.
|
|
|
|
* That is expected and correct. Trim them.
|
|
|
|
*/
|
|
|
|
ret = (__u32) ret;
|
|
|
|
if (len != 74 || ret != 148)
|
|
|
|
return 0;
|
|
|
|
test_result_subprog2 = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
2020-01-10 06:41:21 +00:00
|
|
|
|
|
|
|
__u64 test_result_subprog3 = 0;
|
selftests/bpf: Add BPF_PROG, BPF_KPROBE, and BPF_KRETPROBE macros
Streamline BPF_TRACE_x macro by moving out return type and section attribute
definition out of macro itself. That makes those function look in source code
similar to other BPF programs. Additionally, simplify its usage by determining
number of arguments automatically (so just single BPF_TRACE vs a family of
BPF_TRACE_1, BPF_TRACE_2, etc). Also, allow more natural function argument
syntax without commas inbetween argument type and name.
Given this helper is useful not only for tracing tp_btf/fenty/fexit programs,
but could be used for LSM programs and others following the same pattern,
rename BPF_TRACE macro into more generic BPF_PROG. Existing BPF_TRACE_x
usages in selftests are converted to new BPF_PROG macro.
Following the same pattern, define BPF_KPROBE and BPF_KRETPROBE macros for
nicer usage of kprobe/kretprobe arguments, respectively. BPF_KRETPROBE, adopts
same convention used by fexit programs, that last defined argument is probed
function's return result.
v4->v5:
- fix test_overhead test (__set_task_comm is void) (Alexei);
v3->v4:
- rebased and fixed one more BPF_TRACE_x occurence (Alexei);
v2->v3:
- rename to shorter and as generic BPF_PROG (Alexei);
v1->v2:
- verified GCC handles pragmas as expected;
- added descriptions to macros;
- converted new STRUCT_OPS selftest to BPF_HANDLER (worked as expected);
- added original context as 'ctx' parameter, for cases where it has to be
passed into BPF helpers. This might cause an accidental naming collision,
unfortunately, but at least it's easy to work around. Fortunately, this
situation produces quite legible compilation error:
progs/bpf_dctcp.c:46:6: error: redefinition of 'ctx' with a different type: 'int' vs 'unsigned long long *'
int ctx = 123;
^
progs/bpf_dctcp.c:42:6: note: previous definition is here
void BPF_HANDLER(dctcp_init, struct sock *sk)
^
./bpf_trace_helpers.h:58:32: note: expanded from macro 'BPF_HANDLER'
____##name(unsigned long long *ctx, ##args)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20200110211634.1614739-1-andriin@fb.com
2020-01-10 21:16:34 +00:00
|
|
|
SEC("fexit/test_pkt_access_subprog3")
|
|
|
|
int BPF_PROG(test_subprog3, int val, struct sk_buff *skb, int ret)
|
2020-01-10 06:41:21 +00:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
|
|
|
|
__builtin_preserve_access_index(({
|
|
|
|
len = skb->len;
|
|
|
|
}));
|
|
|
|
if (len != 74 || ret != 74 * val || val != 3)
|
|
|
|
return 0;
|
|
|
|
test_result_subprog3 = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
2020-01-21 00:53:48 +00:00
|
|
|
|
|
|
|
__u64 test_get_skb_len = 0;
|
|
|
|
SEC("freplace/get_skb_len")
|
|
|
|
int new_get_skb_len(struct __sk_buff *skb)
|
|
|
|
{
|
|
|
|
int len = skb->len;
|
|
|
|
|
|
|
|
if (len != 74)
|
|
|
|
return 0;
|
|
|
|
test_get_skb_len = 1;
|
|
|
|
return 74; /* original get_skb_len() returns skb->len */
|
|
|
|
}
|
|
|
|
|
|
|
|
__u64 test_get_skb_ifindex = 0;
|
|
|
|
SEC("freplace/get_skb_ifindex")
|
|
|
|
int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var)
|
|
|
|
{
|
|
|
|
void *data_end = (void *)(long)skb->data_end;
|
|
|
|
void *data = (void *)(long)skb->data;
|
|
|
|
struct ipv6hdr ip6, *ip6p;
|
|
|
|
int ifindex = skb->ifindex;
|
|
|
|
__u32 eth_proto;
|
|
|
|
__u32 nh_off;
|
|
|
|
|
|
|
|
/* check that BPF extension can read packet via direct packet access */
|
|
|
|
if (data + 14 + sizeof(ip6) > data_end)
|
|
|
|
return 0;
|
|
|
|
ip6p = data + 14;
|
|
|
|
|
|
|
|
if (ip6p->nexthdr != 6 || ip6p->payload_len != __bpf_constant_htons(123))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* check that legacy packet access helper works too */
|
|
|
|
if (bpf_skb_load_bytes(skb, 14, &ip6, sizeof(ip6)) < 0)
|
|
|
|
return 0;
|
|
|
|
ip6p = &ip6;
|
|
|
|
if (ip6p->nexthdr != 6 || ip6p->payload_len != __bpf_constant_htons(123))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (ifindex != 1 || val != 3 || var != 1)
|
|
|
|
return 0;
|
|
|
|
test_get_skb_ifindex = 1;
|
|
|
|
return 3; /* original get_skb_ifindex() returns val * ifindex * var */
|
|
|
|
}
|
|
|
|
|
|
|
|
volatile __u64 test_get_constant = 0;
|
|
|
|
SEC("freplace/get_constant")
|
|
|
|
int new_get_constant(long val)
|
|
|
|
{
|
|
|
|
if (val != 123)
|
|
|
|
return 0;
|
|
|
|
test_get_constant = 1;
|
|
|
|
return test_get_constant; /* original get_constant() returns val - 122 */
|
|
|
|
}
|
2019-11-14 18:57:20 +00:00
|
|
|
char _license[] SEC("license") = "GPL";
|