Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2020-09-01

The following pull-request contains BPF updates for your *net-next* tree.

There are two small conflicts when pulling, resolve as follows:

1) Merge conflict in tools/lib/bpf/libbpf.c between 88a8212028 ("libbpf: Factor
   out common ELF operations and improve logging") in bpf-next and 1e891e513e
   ("libbpf: Fix map index used in error message") in net-next. Resolve by taking
   the hunk in bpf-next:

        [...]
        scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
        data = elf_sec_data(obj, scn);
        if (!scn || !data) {
                pr_warn("elf: failed to get %s map definitions for %s\n",
                        MAPS_ELF_SEC, obj->path);
                return -EINVAL;
        }
        [...]

2) Merge conflict in drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c between
   9647c57b11 ("xsk: i40e: ice: ixgbe: mlx5: Test for dma_need_sync earlier for
   better performance") in bpf-next and e20f0dbf20 ("net/mlx5e: RX, Add a prefetch
   command for small L1_CACHE_BYTES") in net-next. Resolve the two locations by retaining
   net_prefetch() and taking xsk_buff_dma_sync_for_cpu() from bpf-next. Should look like:

        [...]
        xdp_set_data_meta_invalid(xdp);
        xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        net_prefetch(xdp->data);
        [...]

We've added 133 non-merge commits during the last 14 day(s) which contain
a total of 246 files changed, 13832 insertions(+), 3105 deletions(-).

The main changes are:

1) Initial support for sleepable BPF programs along with bpf_copy_from_user() helper
   for tracing to reliably access user memory, from Alexei Starovoitov.

2) Add BPF infra for writing and parsing TCP header options, from Martin KaFai Lau.

3) bpf_d_path() helper for returning full path for given 'struct path', from Jiri Olsa.

4) AF_XDP support for shared umems between devices and queues, from Magnus Karlsson.

5) Initial prep work for full BPF-to-BPF call support in libbpf, from Andrii Nakryiko.

6) Generalize bpf_sk_storage map & add local storage for inodes, from KP Singh.

7) Implement sockmap/hash updates from BPF context, from Lorenz Bauer.

8) BPF xor verification for scalar types & add BPF link iterator, from Yonghong Song.

9) Use target's prog type for BPF_PROG_TYPE_EXT prog verification, from Udip Pant.

10) Rework BPF tracing samples to use libbpf loader, from Daniel T. Lee.

11) Fix xdpsock sample to really cycle through all buffers, from Weqaar Janjua.

12) Improve type safety for tun/veth XDP frame handling, from Maciej Żenczykowski.

13) Various smaller cleanups and improvements all over the place.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2020-09-01 13:05:08 -07:00
246 changed files with 13815 additions and 3088 deletions

View File

@@ -316,7 +316,7 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS), \
$(TRUNNER_BPF_LDFLAGS))

View File

@@ -43,3 +43,24 @@ This is due to a llvm BPF backend bug. The fix
https://reviews.llvm.org/D78466
has been pushed to llvm 10.x release branch and will be
available in 10.0.1. The fix is available in llvm 11.0.0 trunk.
BPF CO-RE-based tests and Clang version
=======================================
A set of selftests use BPF target-specific built-ins, which might require
bleeding-edge Clang versions (Clang 12 nightly at this time).
Few sub-tests of core_reloc test suit (part of test_progs test runner) require
the following built-ins, listed with corresponding Clang diffs introducing
them to Clang/LLVM. These sub-tests are going to be skipped if Clang is too
old to support them, they shouldn't cause build failures or runtime test
failures:
- __builtin_btf_type_id() ([0], [1], [2]);
- __builtin_preserve_type_info(), __builtin_preserve_enum_value() ([3], [4]).
[0] https://reviews.llvm.org/D74572
[1] https://reviews.llvm.org/D74668
[2] https://reviews.llvm.org/D85174
[3] https://reviews.llvm.org/D83878
[4] https://reviews.llvm.org/D83242

View File

@@ -317,6 +317,7 @@ extern const struct bench bench_trig_tp;
extern const struct bench bench_trig_rawtp;
extern const struct bench bench_trig_kprobe;
extern const struct bench bench_trig_fentry;
extern const struct bench bench_trig_fentry_sleep;
extern const struct bench bench_trig_fmodret;
extern const struct bench bench_rb_libbpf;
extern const struct bench bench_rb_custom;
@@ -338,6 +339,7 @@ static const struct bench *benchs[] = {
&bench_trig_rawtp,
&bench_trig_kprobe,
&bench_trig_fentry,
&bench_trig_fentry_sleep,
&bench_trig_fmodret,
&bench_rb_libbpf,
&bench_rb_custom,

View File

@@ -90,6 +90,12 @@ static void trigger_fentry_setup()
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
}
static void trigger_fentry_sleep_setup()
{
setup_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fentry_sleep);
}
static void trigger_fmodret_setup()
{
setup_ctx();
@@ -155,6 +161,17 @@ const struct bench bench_trig_fentry = {
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_fentry_sleep = {
.name = "trig-fentry-sleep",
.validate = trigger_validate,
.setup = trigger_fentry_sleep_setup,
.producer_thread = trigger_producer,
.consumer_thread = trigger_consumer,
.measure = trigger_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_trig_fmodret = {
.name = "trig-fmodret",
.validate = trigger_validate,

View File

@@ -104,6 +104,43 @@ error_close:
return -1;
}
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms)
{
struct sockaddr_storage addr;
socklen_t addrlen = sizeof(addr);
struct sockaddr_in *addr_in;
int fd, ret;
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
return -1;
}
addr_in = (struct sockaddr_in *)&addr;
fd = socket(addr_in->sin_family, SOCK_STREAM, 0);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
if (settimeo(fd, timeout_ms))
goto error_close;
ret = sendto(fd, data, data_len, MSG_FASTOPEN, (struct sockaddr *)&addr,
addrlen);
if (ret != data_len) {
log_err("sendto(data, %u) != %d\n", data_len, ret);
goto error_close;
}
return fd;
error_close:
save_errno_close(fd);
return -1;
}
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
socklen_t addrlen)

View File

@@ -37,6 +37,8 @@ int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
int connect_to_fd(int server_fd, int timeout_ms);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
int timeout_ms);
int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len);

View File

@@ -53,7 +53,7 @@ static int kern_sync_rcu(void)
return err;
}
void test_btf_map_in_map(void)
static void test_lookup_update(void)
{
int err, key = 0, val, i;
struct test_btf_map_in_map *skel;
@@ -143,3 +143,36 @@ void test_btf_map_in_map(void)
cleanup:
test_btf_map_in_map__destroy(skel);
}
static void test_diff_size(void)
{
struct test_btf_map_in_map *skel;
int err, inner_map_fd, zero = 0;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
return;
inner_map_fd = bpf_map__fd(skel->maps.sockarr_sz2);
err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_sockarr), &zero,
&inner_map_fd, 0);
CHECK(err, "outer_sockarr inner map size check",
"cannot use a different size inner_map\n");
inner_map_fd = bpf_map__fd(skel->maps.inner_map_sz2);
err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &zero,
&inner_map_fd, 0);
CHECK(!err, "outer_arr inner map size check",
"incorrectly updated with a different size inner_map\n");
test_btf_map_in_map__destroy(skel);
}
void test_btf_map_in_map(void)
{
if (test__start_subtest("lookup_update"))
test_lookup_update();
if (test__start_subtest("diff_size"))
test_diff_size();
}

View File

@@ -3,6 +3,9 @@
#include "progs/core_reloc_types.h"
#include <sys/mman.h>
#include <sys/syscall.h>
#include <bpf/btf.h>
static int duration = 0;
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
@@ -177,14 +180,13 @@
.fails = true, \
}
#define EXISTENCE_CASE_COMMON(name) \
#define FIELD_EXISTS_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_existence.o", \
.btf_src_file = "btf__core_reloc_" #name ".o", \
.relaxed_core_relocs = true
.btf_src_file = "btf__core_reloc_" #name ".o" \
#define EXISTENCE_ERR_CASE(name) { \
EXISTENCE_CASE_COMMON(name), \
#define FIELD_EXISTS_ERR_CASE(name) { \
FIELD_EXISTS_CASE_COMMON(name), \
.fails = true, \
}
@@ -253,6 +255,61 @@
.fails = true, \
}
#define TYPE_BASED_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_type_based.o", \
.btf_src_file = "btf__core_reloc_" #name ".o" \
#define TYPE_BASED_CASE(name, ...) { \
TYPE_BASED_CASE_COMMON(name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_type_based_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_type_based_output), \
}
#define TYPE_BASED_ERR_CASE(name) { \
TYPE_BASED_CASE_COMMON(name), \
.fails = true, \
}
#define TYPE_ID_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_type_id.o", \
.btf_src_file = "btf__core_reloc_" #name ".o" \
#define TYPE_ID_CASE(name, setup_fn) { \
TYPE_ID_CASE_COMMON(name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_type_id_output) {}, \
.output_len = sizeof(struct core_reloc_type_id_output), \
.setup = setup_fn, \
}
#define TYPE_ID_ERR_CASE(name) { \
TYPE_ID_CASE_COMMON(name), \
.fails = true, \
}
#define ENUMVAL_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_enumval.o", \
.btf_src_file = "btf__core_reloc_" #name ".o" \
#define ENUMVAL_CASE(name, ...) { \
ENUMVAL_CASE_COMMON(name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_enumval_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_enumval_output), \
}
#define ENUMVAL_ERR_CASE(name) { \
ENUMVAL_CASE_COMMON(name), \
.fails = true, \
}
struct core_reloc_test_case;
typedef int (*setup_test_fn)(struct core_reloc_test_case *test);
struct core_reloc_test_case {
const char *case_name;
const char *bpf_obj_file;
@@ -264,8 +321,136 @@ struct core_reloc_test_case {
bool fails;
bool relaxed_core_relocs;
bool direct_raw_tp;
setup_test_fn setup;
};
static int find_btf_type(const struct btf *btf, const char *name, __u32 kind)
{
int id;
id = btf__find_by_name_kind(btf, name, kind);
if (CHECK(id <= 0, "find_type_id", "failed to find '%s', kind %d: %d\n", name, kind, id))
return -1;
return id;
}
static int setup_type_id_case_local(struct core_reloc_test_case *test)
{
struct core_reloc_type_id_output *exp = (void *)test->output;
struct btf *local_btf = btf__parse(test->bpf_obj_file, NULL);
struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
const struct btf_type *t;
const char *name;
int i;
if (CHECK(IS_ERR(local_btf), "local_btf", "failed: %ld\n", PTR_ERR(local_btf)) ||
CHECK(IS_ERR(targ_btf), "targ_btf", "failed: %ld\n", PTR_ERR(targ_btf))) {
btf__free(local_btf);
btf__free(targ_btf);
return -EINVAL;
}
exp->local_anon_struct = -1;
exp->local_anon_union = -1;
exp->local_anon_enum = -1;
exp->local_anon_func_proto_ptr = -1;
exp->local_anon_void_ptr = -1;
exp->local_anon_arr = -1;
for (i = 1; i <= btf__get_nr_types(local_btf); i++)
{
t = btf__type_by_id(local_btf, i);
/* we are interested only in anonymous types */
if (t->name_off)
continue;
if (btf_is_struct(t) && btf_vlen(t) &&
(name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) &&
strcmp(name, "marker_field") == 0) {
exp->local_anon_struct = i;
} else if (btf_is_union(t) && btf_vlen(t) &&
(name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) &&
strcmp(name, "marker_field") == 0) {
exp->local_anon_union = i;
} else if (btf_is_enum(t) && btf_vlen(t) &&
(name = btf__name_by_offset(local_btf, btf_enum(t)[0].name_off)) &&
strcmp(name, "MARKER_ENUM_VAL") == 0) {
exp->local_anon_enum = i;
} else if (btf_is_ptr(t) && (t = btf__type_by_id(local_btf, t->type))) {
if (btf_is_func_proto(t) && (t = btf__type_by_id(local_btf, t->type)) &&
btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) &&
strcmp(name, "_Bool") == 0) {
/* ptr -> func_proto -> _Bool */
exp->local_anon_func_proto_ptr = i;
} else if (btf_is_void(t)) {
/* ptr -> void */
exp->local_anon_void_ptr = i;
}
} else if (btf_is_array(t) && (t = btf__type_by_id(local_btf, btf_array(t)->type)) &&
btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) &&
strcmp(name, "_Bool") == 0) {
/* _Bool[] */
exp->local_anon_arr = i;
}
}
exp->local_struct = find_btf_type(local_btf, "a_struct", BTF_KIND_STRUCT);
exp->local_union = find_btf_type(local_btf, "a_union", BTF_KIND_UNION);
exp->local_enum = find_btf_type(local_btf, "an_enum", BTF_KIND_ENUM);
exp->local_int = find_btf_type(local_btf, "int", BTF_KIND_INT);
exp->local_struct_typedef = find_btf_type(local_btf, "named_struct_typedef", BTF_KIND_TYPEDEF);
exp->local_func_proto_typedef = find_btf_type(local_btf, "func_proto_typedef", BTF_KIND_TYPEDEF);
exp->local_arr_typedef = find_btf_type(local_btf, "arr_typedef", BTF_KIND_TYPEDEF);
btf__free(local_btf);
btf__free(targ_btf);
return 0;
}
static int setup_type_id_case_success(struct core_reloc_test_case *test) {
struct core_reloc_type_id_output *exp = (void *)test->output;
struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
int err;
err = setup_type_id_case_local(test);
if (err)
return err;
targ_btf = btf__parse(test->btf_src_file, NULL);
exp->targ_struct = find_btf_type(targ_btf, "a_struct", BTF_KIND_STRUCT);
exp->targ_union = find_btf_type(targ_btf, "a_union", BTF_KIND_UNION);
exp->targ_enum = find_btf_type(targ_btf, "an_enum", BTF_KIND_ENUM);
exp->targ_int = find_btf_type(targ_btf, "int", BTF_KIND_INT);
exp->targ_struct_typedef = find_btf_type(targ_btf, "named_struct_typedef", BTF_KIND_TYPEDEF);
exp->targ_func_proto_typedef = find_btf_type(targ_btf, "func_proto_typedef", BTF_KIND_TYPEDEF);
exp->targ_arr_typedef = find_btf_type(targ_btf, "arr_typedef", BTF_KIND_TYPEDEF);
btf__free(targ_btf);
return 0;
}
static int setup_type_id_case_failure(struct core_reloc_test_case *test)
{
struct core_reloc_type_id_output *exp = (void *)test->output;
int err;
err = setup_type_id_case_local(test);
if (err)
return err;
exp->targ_struct = 0;
exp->targ_union = 0;
exp->targ_enum = 0;
exp->targ_int = 0;
exp->targ_struct_typedef = 0;
exp->targ_func_proto_typedef = 0;
exp->targ_arr_typedef = 0;
return 0;
}
static struct core_reloc_test_case test_cases[] = {
/* validate we can find kernel image and use its BTF for relocs */
{
@@ -364,7 +549,7 @@ static struct core_reloc_test_case test_cases[] = {
/* validate field existence checks */
{
EXISTENCE_CASE_COMMON(existence),
FIELD_EXISTS_CASE_COMMON(existence),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence) {
.a = 1,
.b = 2,
@@ -388,7 +573,7 @@ static struct core_reloc_test_case test_cases[] = {
.output_len = sizeof(struct core_reloc_existence_output),
},
{
EXISTENCE_CASE_COMMON(existence___minimal),
FIELD_EXISTS_CASE_COMMON(existence___minimal),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) {
.a = 42,
},
@@ -408,12 +593,12 @@ static struct core_reloc_test_case test_cases[] = {
.output_len = sizeof(struct core_reloc_existence_output),
},
EXISTENCE_ERR_CASE(existence__err_int_sz),
EXISTENCE_ERR_CASE(existence__err_int_type),
EXISTENCE_ERR_CASE(existence__err_int_kind),
EXISTENCE_ERR_CASE(existence__err_arr_kind),
EXISTENCE_ERR_CASE(existence__err_arr_value_type),
EXISTENCE_ERR_CASE(existence__err_struct_type),
FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
FIELD_EXISTS_ERR_CASE(existence__err_int_type),
FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
/* bitfield relocation checks */
BITFIELDS_CASE(bitfields, {
@@ -452,11 +637,117 @@ static struct core_reloc_test_case test_cases[] = {
/* size relocation checks */
SIZE_CASE(size),
SIZE_CASE(size___diff_sz),
SIZE_ERR_CASE(size___err_ambiguous),
/* validate type existence and size relocations */
TYPE_BASED_CASE(type_based, {
.struct_exists = 1,
.union_exists = 1,
.enum_exists = 1,
.typedef_named_struct_exists = 1,
.typedef_anon_struct_exists = 1,
.typedef_struct_ptr_exists = 1,
.typedef_int_exists = 1,
.typedef_enum_exists = 1,
.typedef_void_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
.struct_sz = sizeof(struct a_struct),
.union_sz = sizeof(union a_union),
.enum_sz = sizeof(enum an_enum),
.typedef_named_struct_sz = sizeof(named_struct_typedef),
.typedef_anon_struct_sz = sizeof(anon_struct_typedef),
.typedef_struct_ptr_sz = sizeof(struct_ptr_typedef),
.typedef_int_sz = sizeof(int_typedef),
.typedef_enum_sz = sizeof(enum_typedef),
.typedef_void_ptr_sz = sizeof(void_ptr_typedef),
.typedef_func_proto_sz = sizeof(func_proto_typedef),
.typedef_arr_sz = sizeof(arr_typedef),
}),
TYPE_BASED_CASE(type_based___all_missing, {
/* all zeros */
}),
TYPE_BASED_CASE(type_based___diff_sz, {
.struct_exists = 1,
.union_exists = 1,
.enum_exists = 1,
.typedef_named_struct_exists = 1,
.typedef_anon_struct_exists = 1,
.typedef_struct_ptr_exists = 1,
.typedef_int_exists = 1,
.typedef_enum_exists = 1,
.typedef_void_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
.struct_sz = sizeof(struct a_struct___diff_sz),
.union_sz = sizeof(union a_union___diff_sz),
.enum_sz = sizeof(enum an_enum___diff_sz),
.typedef_named_struct_sz = sizeof(named_struct_typedef___diff_sz),
.typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff_sz),
.typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff_sz),
.typedef_int_sz = sizeof(int_typedef___diff_sz),
.typedef_enum_sz = sizeof(enum_typedef___diff_sz),
.typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff_sz),
.typedef_func_proto_sz = sizeof(func_proto_typedef___diff_sz),
.typedef_arr_sz = sizeof(arr_typedef___diff_sz),
}),
TYPE_BASED_CASE(type_based___incompat, {
.enum_exists = 1,
.enum_sz = sizeof(enum an_enum),
}),
TYPE_BASED_CASE(type_based___fn_wrong_args, {
.struct_exists = 1,
.struct_sz = sizeof(struct a_struct),
}),
/* BTF_TYPE_ID_LOCAL/BTF_TYPE_ID_TARGET tests */
TYPE_ID_CASE(type_id, setup_type_id_case_success),
TYPE_ID_CASE(type_id___missing_targets, setup_type_id_case_failure),
/* Enumerator value existence and value relocations */
ENUMVAL_CASE(enumval, {
.named_val1_exists = true,
.named_val2_exists = true,
.named_val3_exists = true,
.anon_val1_exists = true,
.anon_val2_exists = true,
.anon_val3_exists = true,
.named_val1 = 1,
.named_val2 = 2,
.anon_val1 = 0x10,
.anon_val2 = 0x20,
}),
ENUMVAL_CASE(enumval___diff, {
.named_val1_exists = true,
.named_val2_exists = true,
.named_val3_exists = true,
.anon_val1_exists = true,
.anon_val2_exists = true,
.anon_val3_exists = true,
.named_val1 = 101,
.named_val2 = 202,
.anon_val1 = 0x11,
.anon_val2 = 0x22,
}),
ENUMVAL_CASE(enumval___val3_missing, {
.named_val1_exists = true,
.named_val2_exists = true,
.named_val3_exists = false,
.anon_val1_exists = true,
.anon_val2_exists = true,
.anon_val3_exists = false,
.named_val1 = 111,
.named_val2 = 222,
.anon_val1 = 0x111,
.anon_val2 = 0x222,
}),
ENUMVAL_ERR_CASE(enumval___err_missing),
};
struct data {
char in[256];
char out[256];
bool skip;
uint64_t my_pid_tgid;
};
@@ -472,7 +763,7 @@ void test_core_reloc(void)
struct bpf_object_load_attr load_attr = {};
struct core_reloc_test_case *test_case;
const char *tp_name, *probe_name;
int err, duration = 0, i, equal;
int err, i, equal;
struct bpf_link *link = NULL;
struct bpf_map *data_map;
struct bpf_program *prog;
@@ -488,11 +779,13 @@ void test_core_reloc(void)
if (!test__start_subtest(test_case->case_name))
continue;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.relaxed_core_relocs = test_case->relaxed_core_relocs,
);
if (test_case->setup) {
err = test_case->setup(test_case);
if (CHECK(err, "test_setup", "test #%d setup failed: %d\n", i, err))
continue;
}
obj = bpf_object__open_file(test_case->bpf_obj_file, &opts);
obj = bpf_object__open_file(test_case->bpf_obj_file, NULL);
if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
test_case->bpf_obj_file, PTR_ERR(obj)))
continue;
@@ -515,15 +808,10 @@ void test_core_reloc(void)
load_attr.log_level = 0;
load_attr.target_btf_path = test_case->btf_src_file;
err = bpf_object__load_xattr(&load_attr);
if (test_case->fails) {
CHECK(!err, "obj_load_fail",
"should fail to load prog '%s'\n", probe_name);
if (err) {
if (!test_case->fails)
CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
goto cleanup;
} else {
if (CHECK(err, "obj_load",
"failed to load prog '%s': %d\n",
probe_name, err))
goto cleanup;
}
data_map = bpf_object__find_map_by_name(obj, "test_cor.bss");
@@ -551,6 +839,16 @@ void test_core_reloc(void)
/* trigger test run */
usleep(1);
if (data->skip) {
test__skip();
goto cleanup;
}
if (test_case->fails) {
CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
goto cleanup;
}
equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
if (CHECK(!equal, "check_result",

View File

@@ -0,0 +1,147 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include <sys/stat.h>
#include <linux/sched.h>
#include <sys/syscall.h>
#define MAX_PATH_LEN 128
#define MAX_FILES 7
#include "test_d_path.skel.h"
static int duration;
static struct {
__u32 cnt;
char paths[MAX_FILES][MAX_PATH_LEN];
} src;
static int set_pathname(int fd, pid_t pid)
{
char buf[MAX_PATH_LEN];
snprintf(buf, MAX_PATH_LEN, "/proc/%d/fd/%d", pid, fd);
return readlink(buf, src.paths[src.cnt++], MAX_PATH_LEN);
}
static int trigger_fstat_events(pid_t pid)
{
int sockfd = -1, procfd = -1, devfd = -1;
int localfd = -1, indicatorfd = -1;
int pipefd[2] = { -1, -1 };
struct stat fileStat;
int ret = -1;
/* unmountable pseudo-filesystems */
if (CHECK(pipe(pipefd) < 0, "trigger", "pipe failed\n"))
return ret;
/* unmountable pseudo-filesystems */
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (CHECK(sockfd < 0, "trigger", "socket failed\n"))
goto out_close;
/* mountable pseudo-filesystems */
procfd = open("/proc/self/comm", O_RDONLY);
if (CHECK(procfd < 0, "trigger", "open /proc/self/comm failed\n"))
goto out_close;
devfd = open("/dev/urandom", O_RDONLY);
if (CHECK(devfd < 0, "trigger", "open /dev/urandom failed\n"))
goto out_close;
localfd = open("/tmp/d_path_loadgen.txt", O_CREAT | O_RDONLY, 0644);
if (CHECK(localfd < 0, "trigger", "open /tmp/d_path_loadgen.txt failed\n"))
goto out_close;
/* bpf_d_path will return path with (deleted) */
remove("/tmp/d_path_loadgen.txt");
indicatorfd = open("/tmp/", O_PATH);
if (CHECK(indicatorfd < 0, "trigger", "open /tmp/ failed\n"))
goto out_close;
ret = set_pathname(pipefd[0], pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[0]\n"))
goto out_close;
ret = set_pathname(pipefd[1], pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[1]\n"))
goto out_close;
ret = set_pathname(sockfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for socket\n"))
goto out_close;
ret = set_pathname(procfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for proc\n"))
goto out_close;
ret = set_pathname(devfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for dev\n"))
goto out_close;
ret = set_pathname(localfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for file\n"))
goto out_close;
ret = set_pathname(indicatorfd, pid);
if (CHECK(ret < 0, "trigger", "set_pathname failed for dir\n"))
goto out_close;
/* triggers vfs_getattr */
fstat(pipefd[0], &fileStat);
fstat(pipefd[1], &fileStat);
fstat(sockfd, &fileStat);
fstat(procfd, &fileStat);
fstat(devfd, &fileStat);
fstat(localfd, &fileStat);
fstat(indicatorfd, &fileStat);
out_close:
/* triggers filp_close */
close(pipefd[0]);
close(pipefd[1]);
close(sockfd);
close(procfd);
close(devfd);
close(localfd);
close(indicatorfd);
return ret;
}
void test_d_path(void)
{
struct test_d_path__bss *bss;
struct test_d_path *skel;
int err;
skel = test_d_path__open_and_load();
if (CHECK(!skel, "setup", "d_path skeleton failed\n"))
goto cleanup;
err = test_d_path__attach(skel);
if (CHECK(err, "setup", "attach failed: %d\n", err))
goto cleanup;
bss = skel->bss;
bss->my_pid = getpid();
err = trigger_fstat_events(bss->my_pid);
if (err < 0)
goto cleanup;
for (int i = 0; i < MAX_FILES; i++) {
CHECK(strncmp(src.paths[i], bss->paths_stat[i], MAX_PATH_LEN),
"check",
"failed to get stat path[%d]: %s vs %s\n",
i, src.paths[i], bss->paths_stat[i]);
CHECK(strncmp(src.paths[i], bss->paths_close[i], MAX_PATH_LEN),
"check",
"failed to get close path[%d]: %s vs %s\n",
i, src.paths[i], bss->paths_close[i]);
/* The d_path helper returns size plus NUL char, hence + 1 */
CHECK(bss->rets_stat[i] != strlen(bss->paths_stat[i]) + 1,
"check",
"failed to match stat return [%d]: %d vs %zd [%s]\n",
i, bss->rets_stat[i], strlen(bss->paths_stat[i]) + 1,
bss->paths_stat[i]);
CHECK(bss->rets_close[i] != strlen(bss->paths_stat[i]) + 1,
"check",
"failed to match stat return [%d]: %d vs %zd [%s]\n",
i, bss->rets_close[i], strlen(bss->paths_close[i]) + 1,
bss->paths_stat[i]);
}
cleanup:
test_d_path__destroy(skel);
}

View File

@@ -123,6 +123,7 @@ static void test_func_replace(void)
"freplace/get_skb_len",
"freplace/get_skb_ifindex",
"freplace/get_constant",
"freplace/test_pkt_write_access_subprog",
};
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
"./test_pkt_access.o",
@@ -141,10 +142,77 @@ static void test_func_replace_verify(void)
prog_name, false);
}
static void test_func_sockmap_update(void)
{
const char *prog_name[] = {
"freplace/cls_redirect",
};
test_fexit_bpf2bpf_common("./freplace_cls_redirect.o",
"./test_cls_redirect.o",
ARRAY_SIZE(prog_name),
prog_name, false);
}
static void test_obj_load_failure_common(const char *obj_file,
const char *target_obj_file)
{
/*
* standalone test that asserts failure to load freplace prog
* because of invalid return code.
*/
struct bpf_object *obj = NULL, *pkt_obj;
int err, pkt_fd;
__u32 duration = 0;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
target_obj_file, err, errno))
return;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.attach_prog_fd = pkt_fd,
);
obj = bpf_object__open_file(obj_file, &opts);
if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
"failed to open %s: %ld\n", obj_file,
PTR_ERR(obj)))
goto close_prog;
/* It should fail to load the program */
err = bpf_object__load(obj);
if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err))
goto close_prog;
close_prog:
if (!IS_ERR_OR_NULL(obj))
bpf_object__close(obj);
bpf_object__close(pkt_obj);
}
static void test_func_replace_return_code(void)
{
/* test invalid return code in the replaced program */
test_obj_load_failure_common("./freplace_connect_v4_prog.o",
"./connect4_prog.o");
}
static void test_func_map_prog_compatibility(void)
{
/* test with spin lock map value in the replaced program */
test_obj_load_failure_common("./freplace_attach_probe.o",
"./test_attach_probe.o");
}
void test_fexit_bpf2bpf(void)
{
test_target_no_callees();
test_target_yes_callees();
test_func_replace();
test_func_replace_verify();
test_func_sockmap_update();
test_func_replace_return_code();
test_func_map_prog_compatibility();
}

View File

@@ -7,6 +7,8 @@
#include "test_perf_buffer.skel.h"
#include "bpf/libbpf_internal.h"
static int duration;
/* AddressSanitizer sometimes crashes due to data dereference below, due to
* this being mmap()'ed memory. Disable instrumentation with
* no_sanitize_address attribute
@@ -24,13 +26,31 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
CPU_SET(cpu, cpu_seen);
}
int trigger_on_cpu(int cpu)
{
cpu_set_t cpu_set;
int err;
CPU_ZERO(&cpu_set);
CPU_SET(cpu, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
return err;
usleep(1);
return 0;
}
void test_perf_buffer(void)
{
int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
int err, on_len, nr_on_cpus = 0, nr_cpus, i;
struct perf_buffer_opts pb_opts = {};
struct test_perf_buffer *skel;
cpu_set_t cpu_set, cpu_seen;
cpu_set_t cpu_seen;
struct perf_buffer *pb;
int last_fd = -1, fd;
bool *online;
nr_cpus = libbpf_num_possible_cpus();
@@ -63,6 +83,9 @@ void test_perf_buffer(void)
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
goto out_close;
CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
"bad fd: %d\n", perf_buffer__epoll_fd(pb));
/* trigger kprobe on every CPU */
CPU_ZERO(&cpu_seen);
for (i = 0; i < nr_cpus; i++) {
@@ -71,16 +94,8 @@ void test_perf_buffer(void)
continue;
}
CPU_ZERO(&cpu_set);
CPU_SET(i, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
&cpu_set);
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
i, err))
if (trigger_on_cpu(i))
goto out_close;
usleep(1);
}
/* read perf buffer */
@@ -92,6 +107,34 @@ void test_perf_buffer(void)
"expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
goto out_free_pb;
if (CHECK(perf_buffer__buffer_cnt(pb) != nr_cpus, "buf_cnt",
"got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_cpus))
goto out_close;
for (i = 0; i < nr_cpus; i++) {
if (i >= on_len || !online[i])
continue;
fd = perf_buffer__buffer_fd(pb, i);
CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
last_fd = fd;
err = perf_buffer__consume_buffer(pb, i);
if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
goto out_close;
CPU_CLR(i, &cpu_seen);
if (trigger_on_cpu(i))
goto out_close;
err = perf_buffer__consume_buffer(pb, i);
if (CHECK(err, "consume_buf", "cpu %d, err %d\n", i, err))
goto out_close;
if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
goto out_close;
}
out_free_pb:
perf_buffer__free(pb);
out_close:

View File

@@ -47,6 +47,15 @@ BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
BTF_SET_START(test_set)
BTF_ID(typedef, S)
BTF_ID(typedef, T)
BTF_ID(typedef, U)
BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
BTF_SET_END(test_set)
static int
__resolve_symbol(struct btf *btf, int type_id)
{
@@ -116,12 +125,40 @@ int test_resolve_btfids(void)
*/
for (j = 0; j < ARRAY_SIZE(test_lists); j++) {
test_list = test_lists[j];
for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) {
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
ret = CHECK(test_list[i] != test_symbols[i].id,
"id_check",
"wrong ID for %s (%d != %d)\n",
test_symbols[i].name,
test_list[i], test_symbols[i].id);
if (ret)
return ret;
}
}
/* Check BTF_SET_START(test_set) IDs */
for (i = 0; i < test_set.cnt; i++) {
bool found = false;
for (j = 0; j < ARRAY_SIZE(test_symbols); j++) {
if (test_symbols[j].id != test_set.ids[i])
continue;
found = true;
break;
}
ret = CHECK(!found, "id_check",
"ID %d not found in test_symbols\n",
test_set.ids[i]);
if (ret)
break;
if (i > 0) {
ret = CHECK(test_set.ids[i - 1] > test_set.ids[i],
"sort_check",
"test_set is not sorted\n");
if (ret)
break;
}
}

View File

@@ -49,7 +49,7 @@ configure_stack(void)
sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
"direct-action object-file ./test_sk_assign.o",
"section classifier/sk_assign_test",
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "");
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
if (CHECK(system(tc_cmd), "BPF load failed;",
"run with -vv for more info\n"))
return false;
@@ -268,6 +268,7 @@ void test_sk_assign(void)
int server = -1;
int server_map;
int self_net;
int i;
self_net = open(NS_SELF, O_RDONLY);
if (CHECK_FAIL(self_net < 0)) {
@@ -286,7 +287,7 @@ void test_sk_assign(void)
goto cleanup;
}
for (int i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
for (i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
struct test_sk_cfg *test = &tests[i];
const struct sockaddr *addr;
const int zero = 0;

View File

@@ -4,6 +4,8 @@
#include "test_progs.h"
#include "test_skmsg_load_helpers.skel.h"
#include "test_sockmap_update.skel.h"
#include "test_sockmap_invalid_update.skel.h"
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
@@ -101,6 +103,74 @@ out:
test_skmsg_load_helpers__destroy(skel);
}
static void test_sockmap_update(enum bpf_map_type map_type)
{
struct bpf_prog_test_run_attr tattr;
int err, prog, src, dst, duration = 0;
struct test_sockmap_update *skel;
__u64 src_cookie, dst_cookie;
const __u32 zero = 0;
char dummy[14] = {0};
__s64 sk;
sk = connected_socket_v4();
if (CHECK(sk == -1, "connected_socket_v4", "cannot connect\n"))
return;
skel = test_sockmap_update__open_and_load();
if (CHECK(!skel, "open_and_load", "cannot load skeleton\n"))
goto close_sk;
prog = bpf_program__fd(skel->progs.copy_sock_map);
src = bpf_map__fd(skel->maps.src);
if (map_type == BPF_MAP_TYPE_SOCKMAP)
dst = bpf_map__fd(skel->maps.dst_sock_map);
else
dst = bpf_map__fd(skel->maps.dst_sock_hash);
err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
if (CHECK(err, "update_elem(src)", "errno=%u\n", errno))
goto out;
err = bpf_map_lookup_elem(src, &zero, &src_cookie);
if (CHECK(err, "lookup_elem(src, cookie)", "errno=%u\n", errno))
goto out;
tattr = (struct bpf_prog_test_run_attr){
.prog_fd = prog,
.repeat = 1,
.data_in = dummy,
.data_size_in = sizeof(dummy),
};
err = bpf_prog_test_run_xattr(&tattr);
if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run",
"errno=%u retval=%u\n", errno, tattr.retval))
goto out;
err = bpf_map_lookup_elem(dst, &zero, &dst_cookie);
if (CHECK(err, "lookup_elem(dst, cookie)", "errno=%u\n", errno))
goto out;
CHECK(dst_cookie != src_cookie, "cookie mismatch", "%llu != %llu\n",
dst_cookie, src_cookie);
out:
test_sockmap_update__destroy(skel);
close_sk:
close(sk);
}
static void test_sockmap_invalid_update(void)
{
struct test_sockmap_invalid_update *skel;
int duration = 0;
skel = test_sockmap_invalid_update__open_and_load();
if (CHECK(skel, "open_and_load", "verifier accepted map_update\n"))
test_sockmap_invalid_update__destroy(skel);
}
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
@@ -111,4 +181,10 @@ void test_sockmap_basic(void)
test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash sk_msg load helpers"))
test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap update"))
test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash update"))
test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap update in unsafe context"))
test_sockmap_invalid_update();
}

View File

@@ -0,0 +1,622 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <linux/compiler.h>
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "test_tcp_hdr_options.h"
#include "test_tcp_hdr_options.skel.h"
#include "test_misc_tcp_hdr_options.skel.h"
#define LO_ADDR6 "::eB9F"
#define CG_NAME "/tcpbpf-hdr-opt-test"
struct bpf_test_option exp_passive_estab_in;
struct bpf_test_option exp_active_estab_in;
struct bpf_test_option exp_passive_fin_in;
struct bpf_test_option exp_active_fin_in;
struct hdr_stg exp_passive_hdr_stg;
struct hdr_stg exp_active_hdr_stg = { .active = true, };
static struct test_misc_tcp_hdr_options *misc_skel;
static struct test_tcp_hdr_options *skel;
static int lport_linum_map_fd;
static int hdr_stg_map_fd;
static __u32 duration;
static int cg_fd;
struct sk_fds {
int srv_fd;
int passive_fd;
int active_fd;
int passive_lport;
int active_lport;
};
static int add_lo_addr(void)
{
char ip_addr_cmd[256];
int cmdlen;
cmdlen = snprintf(ip_addr_cmd, sizeof(ip_addr_cmd),
"ip -6 addr add %s/128 dev lo scope host",
LO_ADDR6);
if (CHECK(cmdlen >= sizeof(ip_addr_cmd), "compile ip cmd",
"failed to add host addr %s to lo. ip cmdlen is too long\n",
LO_ADDR6))
return -1;
if (CHECK(system(ip_addr_cmd), "run ip cmd",
"failed to add host addr %s to lo\n", LO_ADDR6))
return -1;
return 0;
}
static int create_netns(void)
{
if (CHECK(unshare(CLONE_NEWNET), "create netns",
"unshare(CLONE_NEWNET): %s (%d)",
strerror(errno), errno))
return -1;
if (CHECK(system("ip link set dev lo up"), "run ip cmd",
"failed to bring lo link up\n"))
return -1;
if (add_lo_addr())
return -1;
return 0;
}
static int write_sysctl(const char *sysctl, const char *value)
{
int fd, err, len;
fd = open(sysctl, O_WRONLY);
if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n",
sysctl, strerror(errno), errno))
return -1;
len = strlen(value);
err = write(fd, value, len);
close(fd);
if (CHECK(err != len, "write sysctl",
"write(%s, %s): err:%d %s (%d)\n",
sysctl, value, err, strerror(errno), errno))
return -1;
return 0;
}
static void print_hdr_stg(const struct hdr_stg *hdr_stg, const char *prefix)
{
fprintf(stderr, "%s{active:%u, resend_syn:%u, syncookie:%u, fastopen:%u}\n",
prefix ? : "", hdr_stg->active, hdr_stg->resend_syn,
hdr_stg->syncookie, hdr_stg->fastopen);
}
static void print_option(const struct bpf_test_option *opt, const char *prefix)
{
fprintf(stderr, "%s{flags:0x%x, max_delack_ms:%u, rand:0x%x}\n",
prefix ? : "", opt->flags, opt->max_delack_ms, opt->rand);
}
static void sk_fds_close(struct sk_fds *sk_fds)
{
close(sk_fds->srv_fd);
close(sk_fds->passive_fd);
close(sk_fds->active_fd);
}
static int sk_fds_shutdown(struct sk_fds *sk_fds)
{
int ret, abyte;
shutdown(sk_fds->active_fd, SHUT_WR);
ret = read(sk_fds->passive_fd, &abyte, sizeof(abyte));
if (CHECK(ret != 0, "read-after-shutdown(passive_fd):",
"ret:%d %s (%d)\n",
ret, strerror(errno), errno))
return -1;
shutdown(sk_fds->passive_fd, SHUT_WR);
ret = read(sk_fds->active_fd, &abyte, sizeof(abyte));
if (CHECK(ret != 0, "read-after-shutdown(active_fd):",
"ret:%d %s (%d)\n",
ret, strerror(errno), errno))
return -1;
return 0;
}
static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open)
{
const char fast[] = "FAST!!!";
struct sockaddr_in6 addr6;
socklen_t len;
sk_fds->srv_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
if (CHECK(sk_fds->srv_fd == -1, "start_server", "%s (%d)\n",
strerror(errno), errno))
goto error;
if (fast_open)
sk_fds->active_fd = fastopen_connect(sk_fds->srv_fd, fast,
sizeof(fast), 0);
else
sk_fds->active_fd = connect_to_fd(sk_fds->srv_fd, 0);
if (CHECK_FAIL(sk_fds->active_fd == -1)) {
close(sk_fds->srv_fd);
goto error;
}
len = sizeof(addr6);
if (CHECK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6,
&len), "getsockname(srv_fd)", "%s (%d)\n",
strerror(errno), errno))
goto error_close;
sk_fds->passive_lport = ntohs(addr6.sin6_port);
len = sizeof(addr6);
if (CHECK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6,
&len), "getsockname(active_fd)", "%s (%d)\n",
strerror(errno), errno))
goto error_close;
sk_fds->active_lport = ntohs(addr6.sin6_port);
sk_fds->passive_fd = accept(sk_fds->srv_fd, NULL, 0);
if (CHECK(sk_fds->passive_fd == -1, "accept(srv_fd)", "%s (%d)\n",
strerror(errno), errno))
goto error_close;
if (fast_open) {
char bytes_in[sizeof(fast)];
int ret;
ret = read(sk_fds->passive_fd, bytes_in, sizeof(bytes_in));
if (CHECK(ret != sizeof(fast), "read fastopen syn data",
"expected=%lu actual=%d\n", sizeof(fast), ret)) {
close(sk_fds->passive_fd);
goto error_close;
}
}
return 0;
error_close:
close(sk_fds->active_fd);
close(sk_fds->srv_fd);
error:
memset(sk_fds, -1, sizeof(*sk_fds));
return -1;
}
static int check_hdr_opt(const struct bpf_test_option *exp,
const struct bpf_test_option *act,
const char *hdr_desc)
{
if (CHECK(memcmp(exp, act, sizeof(*exp)),
"expected-vs-actual", "unexpected %s\n", hdr_desc)) {
print_option(exp, "expected: ");
print_option(act, " actual: ");
return -1;
}
return 0;
}
static int check_hdr_stg(const struct hdr_stg *exp, int fd,
const char *stg_desc)
{
struct hdr_stg act;
if (CHECK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act),
"map_lookup(hdr_stg_map_fd)", "%s %s (%d)\n",
stg_desc, strerror(errno), errno))
return -1;
if (CHECK(memcmp(exp, &act, sizeof(*exp)),
"expected-vs-actual", "unexpected %s\n", stg_desc)) {
print_hdr_stg(exp, "expected: ");
print_hdr_stg(&act, " actual: ");
return -1;
}
return 0;
}
static int check_error_linum(const struct sk_fds *sk_fds)
{
unsigned int nr_errors = 0;
struct linum_err linum_err;
int lport;
lport = sk_fds->passive_lport;
if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
fprintf(stderr,
"bpf prog error out at lport:passive(%d), linum:%u err:%d\n",
lport, linum_err.linum, linum_err.err);
nr_errors++;
}
lport = sk_fds->active_lport;
if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
fprintf(stderr,
"bpf prog error out at lport:active(%d), linum:%u err:%d\n",
lport, linum_err.linum, linum_err.err);
nr_errors++;
}
return nr_errors;
}
static void check_hdr_and_close_fds(struct sk_fds *sk_fds)
{
if (sk_fds_shutdown(sk_fds))
goto check_linum;
if (check_hdr_stg(&exp_passive_hdr_stg, sk_fds->passive_fd,
"passive_hdr_stg"))
goto check_linum;
if (check_hdr_stg(&exp_active_hdr_stg, sk_fds->active_fd,
"active_hdr_stg"))
goto check_linum;
if (check_hdr_opt(&exp_passive_estab_in, &skel->bss->passive_estab_in,
"passive_estab_in"))
goto check_linum;
if (check_hdr_opt(&exp_active_estab_in, &skel->bss->active_estab_in,
"active_estab_in"))
goto check_linum;
if (check_hdr_opt(&exp_passive_fin_in, &skel->bss->passive_fin_in,
"passive_fin_in"))
goto check_linum;
check_hdr_opt(&exp_active_fin_in, &skel->bss->active_fin_in,
"active_fin_in");
check_linum:
CHECK_FAIL(check_error_linum(sk_fds));
sk_fds_close(sk_fds);
}
static void prepare_out(void)
{
skel->bss->active_syn_out = exp_passive_estab_in;
skel->bss->passive_synack_out = exp_active_estab_in;
skel->bss->active_fin_out = exp_passive_fin_in;
skel->bss->passive_fin_out = exp_active_fin_in;
}
static void reset_test(void)
{
size_t optsize = sizeof(struct bpf_test_option);
int lport, err;
memset(&skel->bss->passive_synack_out, 0, optsize);
memset(&skel->bss->passive_fin_out, 0, optsize);
memset(&skel->bss->passive_estab_in, 0, optsize);
memset(&skel->bss->passive_fin_in, 0, optsize);
memset(&skel->bss->active_syn_out, 0, optsize);
memset(&skel->bss->active_fin_out, 0, optsize);
memset(&skel->bss->active_estab_in, 0, optsize);
memset(&skel->bss->active_fin_in, 0, optsize);
skel->data->test_kind = TCPOPT_EXP;
skel->data->test_magic = 0xeB9F;
memset(&exp_passive_estab_in, 0, optsize);
memset(&exp_active_estab_in, 0, optsize);
memset(&exp_passive_fin_in, 0, optsize);
memset(&exp_active_fin_in, 0, optsize);
memset(&exp_passive_hdr_stg, 0, sizeof(exp_passive_hdr_stg));
memset(&exp_active_hdr_stg, 0, sizeof(exp_active_hdr_stg));
exp_active_hdr_stg.active = true;
err = bpf_map_get_next_key(lport_linum_map_fd, NULL, &lport);
while (!err) {
bpf_map_delete_elem(lport_linum_map_fd, &lport);
err = bpf_map_get_next_key(lport_linum_map_fd, &lport, &lport);
}
}
static void fastopen_estab(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
exp_passive_hdr_stg.fastopen = true;
prepare_out();
/* Allow fastopen without fastopen cookie */
if (write_sysctl("/proc/sys/net/ipv4/tcp_fastopen", "1543"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, true)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void syncookie_estab(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS |
OPTION_F_RESEND;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
exp_passive_hdr_stg.syncookie = true;
exp_active_hdr_stg.resend_syn = true,
prepare_out();
/* Clear the RESEND to ensure the bpf prog can learn
* want_cookie and set the RESEND by itself.
*/
skel->bss->passive_synack_out.flags &= ~OPTION_F_RESEND;
/* Enforce syncookie mode */
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void fin(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_fin_in.flags = OPTION_F_RAND;
exp_passive_fin_in.rand = 0xfa;
exp_active_fin_in.flags = OPTION_F_RAND;
exp_active_fin_in.rand = 0xce;
prepare_out();
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void __simple_estab(bool exprm)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
prepare_out();
if (!exprm) {
skel->data->test_kind = 0xB9;
skel->data->test_magic = 0;
}
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void no_exprm_estab(void)
{
__simple_estab(false);
}
static void simple_estab(void)
{
__simple_estab(true);
}
static void misc(void)
{
const char send_msg[] = "MISC!!!";
char recv_msg[sizeof(send_msg)];
const unsigned int nr_data = 2;
struct bpf_link *link;
struct sk_fds sk_fds;
int i, ret;
lport_linum_map_fd = bpf_map__fd(misc_skel->maps.lport_linum_map);
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(misc_skel->progs.misc_estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(misc_estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
for (i = 0; i < nr_data; i++) {
/* MSG_EOR to ensure skb will not be combined */
ret = send(sk_fds.active_fd, send_msg, sizeof(send_msg),
MSG_EOR);
if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n",
ret))
goto check_linum;
ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
if (CHECK(ret != sizeof(send_msg), "read(msg)", "ret:%d\n",
ret))
goto check_linum;
}
if (sk_fds_shutdown(&sk_fds))
goto check_linum;
CHECK(misc_skel->bss->nr_syn != 1, "unexpected nr_syn",
"expected (1) != actual (%u)\n",
misc_skel->bss->nr_syn);
CHECK(misc_skel->bss->nr_data != nr_data, "unexpected nr_data",
"expected (%u) != actual (%u)\n",
nr_data, misc_skel->bss->nr_data);
/* The last ACK may have been delayed, so it is either 1 or 2. */
CHECK(misc_skel->bss->nr_pure_ack != 1 &&
misc_skel->bss->nr_pure_ack != 2,
"unexpected nr_pure_ack",
"expected (1 or 2) != actual (%u)\n",
misc_skel->bss->nr_pure_ack);
CHECK(misc_skel->bss->nr_fin != 1, "unexpected nr_fin",
"expected (1) != actual (%u)\n",
misc_skel->bss->nr_fin);
check_linum:
CHECK_FAIL(check_error_linum(&sk_fds));
sk_fds_close(&sk_fds);
bpf_link__destroy(link);
}
struct test {
const char *desc;
void (*run)(void);
};
#define DEF_TEST(name) { #name, name }
static struct test tests[] = {
DEF_TEST(simple_estab),
DEF_TEST(no_exprm_estab),
DEF_TEST(syncookie_estab),
DEF_TEST(fastopen_estab),
DEF_TEST(fin),
DEF_TEST(misc),
};
void test_tcp_hdr_options(void)
{
int i;
skel = test_tcp_hdr_options__open_and_load();
if (CHECK(!skel, "open and load skel", "failed"))
return;
misc_skel = test_misc_tcp_hdr_options__open_and_load();
if (CHECK(!misc_skel, "open and load misc test skel", "failed"))
goto skel_destroy;
cg_fd = test__join_cgroup(CG_NAME);
if (CHECK_FAIL(cg_fd < 0))
goto skel_destroy;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!test__start_subtest(tests[i].desc))
continue;
if (create_netns())
break;
tests[i].run();
reset_test();
}
close(cg_fd);
skel_destroy:
test_misc_tcp_hdr_options__destroy(misc_skel);
test_tcp_hdr_options__destroy(skel);
}

View File

@@ -0,0 +1,94 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <test_progs.h>
#define TDIR "/sys/kernel/debug"
static int read_iter(char *file)
{
/* 1024 should be enough to get contiguous 4 "iter" letters at some point */
char buf[1024];
int fd, len;
fd = open(file, 0);
if (fd < 0)
return -1;
while ((len = read(fd, buf, sizeof(buf))) > 0)
if (strstr(buf, "iter")) {
close(fd);
return 0;
}
close(fd);
return -1;
}
static int fn(void)
{
int err, duration = 0;
err = unshare(CLONE_NEWNS);
if (CHECK(err, "unshare", "failed: %d\n", errno))
goto out;
err = mount("", "/", "", MS_REC | MS_PRIVATE, NULL);
if (CHECK(err, "mount /", "failed: %d\n", errno))
goto out;
err = umount(TDIR);
if (CHECK(err, "umount " TDIR, "failed: %d\n", errno))
goto out;
err = mount("none", TDIR, "tmpfs", 0, NULL);
if (CHECK(err, "mount", "mount root failed: %d\n", errno))
goto out;
err = mkdir(TDIR "/fs1", 0777);
if (CHECK(err, "mkdir "TDIR"/fs1", "failed: %d\n", errno))
goto out;
err = mkdir(TDIR "/fs2", 0777);
if (CHECK(err, "mkdir "TDIR"/fs2", "failed: %d\n", errno))
goto out;
err = mount("bpf", TDIR "/fs1", "bpf", 0, NULL);
if (CHECK(err, "mount bpffs "TDIR"/fs1", "failed: %d\n", errno))
goto out;
err = mount("bpf", TDIR "/fs2", "bpf", 0, NULL);
if (CHECK(err, "mount bpffs " TDIR "/fs2", "failed: %d\n", errno))
goto out;
err = read_iter(TDIR "/fs1/maps.debug");
if (CHECK(err, "reading " TDIR "/fs1/maps.debug", "failed\n"))
goto out;
err = read_iter(TDIR "/fs2/progs.debug");
if (CHECK(err, "reading " TDIR "/fs2/progs.debug", "failed\n"))
goto out;
out:
umount(TDIR "/fs1");
umount(TDIR "/fs2");
rmdir(TDIR "/fs1");
rmdir(TDIR "/fs2");
umount(TDIR);
exit(err);
}
void test_test_bpffs(void)
{
int err, duration = 0, status = 0;
pid_t pid;
pid = fork();
if (CHECK(pid == -1, "clone", "clone failed %d", errno))
return;
if (pid == 0)
fn();
err = waitpid(pid, &status, 0);
if (CHECK(err == -1 && errno != ECHILD, "waitpid", "failed %d", errno))
return;
if (CHECK(WEXITSTATUS(status), "bpffs test ", "failed %d", WEXITSTATUS(status)))
return;
}

View File

@@ -0,0 +1,60 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Google LLC.
*/
#include <test_progs.h>
#include <linux/limits.h>
#include "local_storage.skel.h"
#include "network_helpers.h"
int create_and_unlink_file(void)
{
char fname[PATH_MAX] = "/tmp/fileXXXXXX";
int fd;
fd = mkstemp(fname);
if (fd < 0)
return fd;
close(fd);
unlink(fname);
return 0;
}
void test_test_local_storage(void)
{
struct local_storage *skel = NULL;
int err, duration = 0, serv_sk = -1;
skel = local_storage__open_and_load();
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
goto close_prog;
err = local_storage__attach(skel);
if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
goto close_prog;
skel->bss->monitored_pid = getpid();
err = create_and_unlink_file();
if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno))
goto close_prog;
CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
"inode_local_storage not set\n");
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
goto close_prog;
CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
"sk_local_storage not set\n");
close(serv_sk);
close_prog:
local_storage__destroy(skel);
}

View File

@@ -10,6 +10,7 @@
#include <unistd.h>
#include <malloc.h>
#include <stdlib.h>
#include <unistd.h>
#include "lsm.skel.h"
@@ -55,6 +56,7 @@ void test_test_lsm(void)
{
struct lsm *skel = NULL;
int err, duration = 0;
int buf = 1234;
skel = lsm__open_and_load();
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
@@ -81,6 +83,13 @@ void test_test_lsm(void)
CHECK(skel->bss->mprotect_count != 1, "mprotect_count",
"mprotect_count = %d\n", skel->bss->mprotect_count);
syscall(__NR_setdomainname, &buf, -2L);
syscall(__NR_setdomainname, 0, -3L);
syscall(__NR_setdomainname, ~0L, -4L);
CHECK(skel->bss->copy_test != 3, "copy_test",
"copy_test = %d\n", skel->bss->copy_test);
close_prog:
lsm__destroy(skel);
}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_enumval x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_enumval___diff x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_enumval___err_missing x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_enumval___val3_missing x) {}

View File

@@ -0,0 +1,4 @@
#include "core_reloc_types.h"
void f(struct core_reloc_size___err_ambiguous1 x,
struct core_reloc_size___err_ambiguous2 y) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_type_based x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___all_missing x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___diff_sz x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___fn_wrong_args x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_type_based___incompat x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_type_id x) {}

View File

@@ -0,0 +1,3 @@
#include "core_reloc_types.h"
void f(struct core_reloc_type_id___missing_targets x) {}

View File

@@ -652,7 +652,7 @@ struct core_reloc_misc_extensible {
};
/*
* EXISTENCE
* FIELD EXISTENCE
*/
struct core_reloc_existence_output {
int a_exists;
@@ -809,3 +809,353 @@ struct core_reloc_size___diff_sz {
void *ptr_field;
enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
};
/* Error case of two candidates with the fields (int_field) at the same
* offset, but with differing final relocation values: size 4 vs size 1
*/
struct core_reloc_size___err_ambiguous1 {
/* int at offset 0 */
int int_field;
struct { int x; } struct_field;
union { int x; } union_field;
int arr_field[4];
void *ptr_field;
enum { VALUE___1 = 123 } enum_field;
};
struct core_reloc_size___err_ambiguous2 {
/* char at offset 0 */
char int_field;
struct { int x; } struct_field;
union { int x; } union_field;
int arr_field[4];
void *ptr_field;
enum { VALUE___2 = 123 } enum_field;
};
/*
* TYPE EXISTENCE & SIZE
*/
struct core_reloc_type_based_output {
bool struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
bool typedef_anon_struct_exists;
bool typedef_struct_ptr_exists;
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
int struct_sz;
int union_sz;
int enum_sz;
int typedef_named_struct_sz;
int typedef_anon_struct_sz;
int typedef_struct_ptr_sz;
int typedef_int_sz;
int typedef_enum_sz;
int typedef_void_ptr_sz;
int typedef_func_proto_sz;
int typedef_arr_sz;
};
struct a_struct {
int x;
};
union a_union {
int y;
int z;
};
typedef struct a_struct named_struct_typedef;
typedef struct { int x, y, z; } anon_struct_typedef;
typedef struct {
int a, b, c;
} *struct_ptr_typedef;
enum an_enum {
AN_ENUM_VAL1 = 1,
AN_ENUM_VAL2 = 2,
AN_ENUM_VAL3 = 3,
};
typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
typedef int (*func_proto_typedef)(long);
typedef char arr_typedef[20];
struct core_reloc_type_based {
struct a_struct f1;
union a_union f2;
enum an_enum f3;
named_struct_typedef f4;
anon_struct_typedef f5;
struct_ptr_typedef f6;
int_typedef f7;
enum_typedef f8;
void_ptr_typedef f9;
func_proto_typedef f10;
arr_typedef f11;
};
/* no types in target */
struct core_reloc_type_based___all_missing {
};
/* different type sizes, extra modifiers, anon vs named enums, etc */
struct a_struct___diff_sz {
long x;
int y;
char z;
};
union a_union___diff_sz {
char yy;
char zz;
};
typedef struct a_struct___diff_sz named_struct_typedef___diff_sz;
typedef struct { long xx, yy, zzz; } anon_struct_typedef___diff_sz;
typedef struct {
char aa[1], bb[2], cc[3];
} *struct_ptr_typedef___diff_sz;
enum an_enum___diff_sz {
AN_ENUM_VAL1___diff_sz = 0x123412341234,
AN_ENUM_VAL2___diff_sz = 2,
};
typedef unsigned long int_typedef___diff_sz;
typedef enum an_enum___diff_sz enum_typedef___diff_sz;
typedef const void * const void_ptr_typedef___diff_sz;
typedef int_typedef___diff_sz (*func_proto_typedef___diff_sz)(char);
typedef int arr_typedef___diff_sz[2];
struct core_reloc_type_based___diff_sz {
struct a_struct___diff_sz f1;
union a_union___diff_sz f2;
enum an_enum___diff_sz f3;
named_struct_typedef___diff_sz f4;
anon_struct_typedef___diff_sz f5;
struct_ptr_typedef___diff_sz f6;
int_typedef___diff_sz f7;
enum_typedef___diff_sz f8;
void_ptr_typedef___diff_sz f9;
func_proto_typedef___diff_sz f10;
arr_typedef___diff_sz f11;
};
/* incompatibilities between target and local types */
union a_struct___incompat { /* union instead of struct */
int x;
};
struct a_union___incompat { /* struct instead of union */
int y;
int z;
};
/* typedef to union, not to struct */
typedef union a_struct___incompat named_struct_typedef___incompat;
/* typedef to void pointer, instead of struct */
typedef void *anon_struct_typedef___incompat;
/* extra pointer indirection */
typedef struct {
int a, b, c;
} **struct_ptr_typedef___incompat;
/* typedef of a struct with int, instead of int */
typedef struct { int x; } int_typedef___incompat;
/* typedef to func_proto, instead of enum */
typedef int (*enum_typedef___incompat)(void);
/* pointer to char instead of void */
typedef char *void_ptr_typedef___incompat;
/* void return type instead of int */
typedef void (*func_proto_typedef___incompat)(long);
/* multi-dimensional array instead of a single-dimensional */
typedef int arr_typedef___incompat[20][2];
struct core_reloc_type_based___incompat {
union a_struct___incompat f1;
struct a_union___incompat f2;
/* the only valid one is enum, to check that something still succeeds */
enum an_enum f3;
named_struct_typedef___incompat f4;
anon_struct_typedef___incompat f5;
struct_ptr_typedef___incompat f6;
int_typedef___incompat f7;
enum_typedef___incompat f8;
void_ptr_typedef___incompat f9;
func_proto_typedef___incompat f10;
arr_typedef___incompat f11;
};
/* func_proto with incompatible signature */
typedef void (*func_proto_typedef___fn_wrong_ret1)(long);
typedef int * (*func_proto_typedef___fn_wrong_ret2)(long);
typedef struct { int x; } int_struct_typedef;
typedef int_struct_typedef (*func_proto_typedef___fn_wrong_ret3)(long);
typedef int (*func_proto_typedef___fn_wrong_arg)(void *);
typedef int (*func_proto_typedef___fn_wrong_arg_cnt1)(long, long);
typedef int (*func_proto_typedef___fn_wrong_arg_cnt2)(void);
struct core_reloc_type_based___fn_wrong_args {
/* one valid type to make sure relos still work */
struct a_struct f1;
func_proto_typedef___fn_wrong_ret1 f2;
func_proto_typedef___fn_wrong_ret2 f3;
func_proto_typedef___fn_wrong_ret3 f4;
func_proto_typedef___fn_wrong_arg f5;
func_proto_typedef___fn_wrong_arg_cnt1 f6;
func_proto_typedef___fn_wrong_arg_cnt2 f7;
};
/*
* TYPE ID MAPPING (LOCAL AND TARGET)
*/
struct core_reloc_type_id_output {
int local_anon_struct;
int local_anon_union;
int local_anon_enum;
int local_anon_func_proto_ptr;
int local_anon_void_ptr;
int local_anon_arr;
int local_struct;
int local_union;
int local_enum;
int local_int;
int local_struct_typedef;
int local_func_proto_typedef;
int local_arr_typedef;
int targ_struct;
int targ_union;
int targ_enum;
int targ_int;
int targ_struct_typedef;
int targ_func_proto_typedef;
int targ_arr_typedef;
};
struct core_reloc_type_id {
struct a_struct f1;
union a_union f2;
enum an_enum f3;
named_struct_typedef f4;
func_proto_typedef f5;
arr_typedef f6;
};
struct core_reloc_type_id___missing_targets {
/* nothing */
};
/*
* ENUMERATOR VALUE EXISTENCE AND VALUE RELOCATION
*/
struct core_reloc_enumval_output {
bool named_val1_exists;
bool named_val2_exists;
bool named_val3_exists;
bool anon_val1_exists;
bool anon_val2_exists;
bool anon_val3_exists;
int named_val1;
int named_val2;
int anon_val1;
int anon_val2;
};
enum named_enum {
NAMED_ENUM_VAL1 = 1,
NAMED_ENUM_VAL2 = 2,
NAMED_ENUM_VAL3 = 3,
};
typedef enum {
ANON_ENUM_VAL1 = 0x10,
ANON_ENUM_VAL2 = 0x20,
ANON_ENUM_VAL3 = 0x30,
} anon_enum;
struct core_reloc_enumval {
enum named_enum f1;
anon_enum f2;
};
/* differing enumerator values */
enum named_enum___diff {
NAMED_ENUM_VAL1___diff = 101,
NAMED_ENUM_VAL2___diff = 202,
NAMED_ENUM_VAL3___diff = 303,
};
typedef enum {
ANON_ENUM_VAL1___diff = 0x11,
ANON_ENUM_VAL2___diff = 0x22,
ANON_ENUM_VAL3___diff = 0x33,
} anon_enum___diff;
struct core_reloc_enumval___diff {
enum named_enum___diff f1;
anon_enum___diff f2;
};
/* missing (optional) third enum value */
enum named_enum___val3_missing {
NAMED_ENUM_VAL1___val3_missing = 111,
NAMED_ENUM_VAL2___val3_missing = 222,
};
typedef enum {
ANON_ENUM_VAL1___val3_missing = 0x111,
ANON_ENUM_VAL2___val3_missing = 0x222,
} anon_enum___val3_missing;
struct core_reloc_enumval___val3_missing {
enum named_enum___val3_missing f1;
anon_enum___val3_missing f2;
};
/* missing (mandatory) second enum value, should fail */
enum named_enum___err_missing {
NAMED_ENUM_VAL1___err_missing = 1,
NAMED_ENUM_VAL3___err_missing = 3,
};
typedef enum {
ANON_ENUM_VAL1___err_missing = 0x111,
ANON_ENUM_VAL3___err_missing = 0x222,
} anon_enum___err_missing;
struct core_reloc_enumval___err_missing {
enum named_enum___err_missing f1;
anon_enum___err_missing f2;
};

View File

@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
@@ -151,4 +153,29 @@ int new_get_constant(long val)
test_get_constant = 1;
return test_get_constant; /* original get_constant() returns val - 122 */
}
__u64 test_pkt_write_access_subprog = 0;
SEC("freplace/test_pkt_write_access_subprog")
int new_test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
struct tcphdr *tcp;
if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
return -1;
tcp = data + off;
if (tcp + 1 > data_end)
return -1;
/* make modifications to the packet data */
tcp->check++;
tcp->syn = 0;
test_pkt_write_access_subprog = 1;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,40 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define VAR_NUM 2
struct hmap_elem {
struct bpf_spin_lock lock;
int var[VAR_NUM];
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct hmap_elem);
} hash_map SEC(".maps");
SEC("freplace/handle_kprobe")
int new_handle_kprobe(struct pt_regs *ctx)
{
struct hmap_elem zero = {}, *val;
int key = 0;
val = bpf_map_lookup_elem(&hash_map, &key);
if (!val)
return 1;
/* spin_lock in hash map */
bpf_spin_lock(&val->lock);
val->var[0] = 99;
bpf_spin_unlock(&val->lock);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
struct bpf_map_def SEC("maps") sock_map = {
.type = BPF_MAP_TYPE_SOCKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 2,
};
SEC("freplace/cls_redirect")
int freplace_cls_redirect_test(struct __sk_buff *skb)
{
int ret = 0;
const int zero = 0;
struct bpf_sock *sk;
sk = bpf_map_lookup_elem(&sock_map, &zero);
if (!sk)
return TC_ACT_SHOT;
ret = bpf_map_update_elem(&sock_map, &zero, sk, 0);
bpf_sk_release(sk);
return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/stddef.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("freplace/connect_v4_prog")
int new_connect_v4_prog(struct bpf_sock_addr *ctx)
{
// return value thats in invalid range
return 255;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,140 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#define DUMMY_STORAGE_VALUE 0xdeadbeef
int monitored_pid = 0;
int inode_storage_result = -1;
int sk_storage_result = -1;
struct dummy_storage {
__u32 value;
};
struct {
__uint(type, BPF_MAP_TYPE_INODE_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct dummy_storage);
} inode_storage_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
__type(key, int);
__type(value, struct dummy_storage);
} sk_storage_map SEC(".maps");
/* TODO Use vmlinux.h once BTF pruning for embedded types is fixed.
*/
struct sock {} __attribute__((preserve_access_index));
struct sockaddr {} __attribute__((preserve_access_index));
struct socket {
struct sock *sk;
} __attribute__((preserve_access_index));
struct inode {} __attribute__((preserve_access_index));
struct dentry {
struct inode *d_inode;
} __attribute__((preserve_access_index));
struct file {
struct inode *f_inode;
} __attribute__((preserve_access_index));
SEC("lsm/inode_unlink")
int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct dummy_storage *storage;
if (pid != monitored_pid)
return 0;
storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
if (storage->value == DUMMY_STORAGE_VALUE)
inode_storage_result = -1;
inode_storage_result =
bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
return 0;
}
SEC("lsm/socket_bind")
int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
int addrlen)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct dummy_storage *storage;
if (pid != monitored_pid)
return 0;
storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
if (storage->value == DUMMY_STORAGE_VALUE)
sk_storage_result = -1;
sk_storage_result = bpf_sk_storage_delete(&sk_storage_map, sock->sk);
return 0;
}
SEC("lsm/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
int protocol, int kern)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct dummy_storage *storage;
if (pid != monitored_pid)
return 0;
storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
storage->value = DUMMY_STORAGE_VALUE;
return 0;
}
SEC("lsm/file_open")
int BPF_PROG(file_open, struct file *file)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct dummy_storage *storage;
if (pid != monitored_pid)
return 0;
if (!file->f_inode)
return 0;
storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
storage->value = DUMMY_STORAGE_VALUE;
return 0;
}

View File

@@ -9,6 +9,27 @@
#include <bpf/bpf_tracing.h>
#include <errno.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} hash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} lru_hash SEC(".maps");
char _license[] SEC("license") = "GPL";
int monitored_pid = 0;
@@ -36,13 +57,54 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
return ret;
}
SEC("lsm/bprm_committed_creds")
SEC("lsm.s/bprm_committed_creds")
int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
char args[64];
__u32 key = 0;
__u64 *value;
if (monitored_pid == pid)
bprm_count++;
bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start);
bpf_copy_from_user(args, sizeof(args), (void *)bprm->mm->arg_start);
value = bpf_map_lookup_elem(&array, &key);
if (value)
*value = 0;
value = bpf_map_lookup_elem(&hash, &key);
if (value)
*value = 0;
value = bpf_map_lookup_elem(&lru_hash, &key);
if (value)
*value = 0;
return 0;
}
SEC("lsm/task_free") /* lsm/ is ok, lsm.s/ fails */
int BPF_PROG(test_task_free, struct task_struct *task)
{
return 0;
}
int copy_test = 0;
SEC("fentry.s/__x64_sys_setdomainname")
int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
{
void *ptr = (void *)PT_REGS_PARM1(regs);
int len = PT_REGS_PARM2(regs);
int buf = 0;
long ret;
ret = bpf_copy_from_user(&buf, sizeof(buf), ptr);
if (len == -2 && ret == 0 && buf == 1234)
copy_test++;
if (len == -3 && ret == -EFAULT)
copy_test++;
if (len == -4 && ret == -EFAULT)
copy_test++;
return 0;
}

View File

@@ -589,7 +589,7 @@ static inline int check_stack(void)
return 1;
}
struct bpf_sk_storage_map {
struct bpf_local_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
@@ -602,8 +602,8 @@ struct {
static inline int check_sk_storage(void)
{
struct bpf_sk_storage_map *sk_storage =
(struct bpf_sk_storage_map *)&m_sk_storage;
struct bpf_local_storage_map *sk_storage =
(struct bpf_local_storage_map *)&m_sk_storage;
struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));

View File

@@ -11,6 +11,13 @@ struct inner_map {
} inner_map1 SEC(".maps"),
inner_map2 SEC(".maps");
struct inner_map_sz2 {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, int);
} inner_map_sz2 SEC(".maps");
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
@@ -50,6 +57,30 @@ struct outer_hash {
},
};
struct sockarr_sz1 {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} sockarr_sz1 SEC(".maps");
struct sockarr_sz2 {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, int);
} sockarr_sz2 SEC(".maps");
struct outer_sockarr_sz1 {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 1);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
__array(values, struct sockarr_sz1);
} outer_sockarr SEC(".maps") = {
.values = { (void *)&sockarr_sz1 },
};
int input = 0;
SEC("raw_tp/sys_enter")

View File

@@ -0,0 +1,72 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
} data = {};
enum named_enum {
NAMED_ENUM_VAL1 = 1,
NAMED_ENUM_VAL2 = 2,
NAMED_ENUM_VAL3 = 3,
};
typedef enum {
ANON_ENUM_VAL1 = 0x10,
ANON_ENUM_VAL2 = 0x20,
ANON_ENUM_VAL3 = 0x30,
} anon_enum;
struct core_reloc_enumval_output {
bool named_val1_exists;
bool named_val2_exists;
bool named_val3_exists;
bool anon_val1_exists;
bool anon_val2_exists;
bool anon_val3_exists;
int named_val1;
int named_val2;
int anon_val1;
int anon_val2;
};
SEC("raw_tracepoint/sys_enter")
int test_core_enumval(void *ctx)
{
#if __has_builtin(__builtin_preserve_enum_value)
struct core_reloc_enumval_output *out = (void *)&data.out;
enum named_enum named = 0;
anon_enum anon = 0;
out->named_val1_exists = bpf_core_enum_value_exists(named, NAMED_ENUM_VAL1);
out->named_val2_exists = bpf_core_enum_value_exists(enum named_enum, NAMED_ENUM_VAL2);
out->named_val3_exists = bpf_core_enum_value_exists(enum named_enum, NAMED_ENUM_VAL3);
out->anon_val1_exists = bpf_core_enum_value_exists(anon, ANON_ENUM_VAL1);
out->anon_val2_exists = bpf_core_enum_value_exists(anon_enum, ANON_ENUM_VAL2);
out->anon_val3_exists = bpf_core_enum_value_exists(anon_enum, ANON_ENUM_VAL3);
out->named_val1 = bpf_core_enum_value(named, NAMED_ENUM_VAL1);
out->named_val2 = bpf_core_enum_value(named, NAMED_ENUM_VAL2);
/* NAMED_ENUM_VAL3 value is optional */
out->anon_val1 = bpf_core_enum_value(anon, ANON_ENUM_VAL1);
out->anon_val2 = bpf_core_enum_value(anon, ANON_ENUM_VAL2);
/* ANON_ENUM_VAL3 value is optional */
#else
data.skip = true;
#endif
return 0;
}

View File

@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
@@ -11,6 +12,7 @@ char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
uint64_t my_pid_tgid;
} data = {};

View File

@@ -0,0 +1,110 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
} data = {};
struct a_struct {
int x;
};
union a_union {
int y;
int z;
};
typedef struct a_struct named_struct_typedef;
typedef struct { int x, y, z; } anon_struct_typedef;
typedef struct {
int a, b, c;
} *struct_ptr_typedef;
enum an_enum {
AN_ENUM_VAL1 = 1,
AN_ENUM_VAL2 = 2,
AN_ENUM_VAL3 = 3,
};
typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
typedef int (*func_proto_typedef)(long);
typedef char arr_typedef[20];
struct core_reloc_type_based_output {
bool struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
bool typedef_anon_struct_exists;
bool typedef_struct_ptr_exists;
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
int struct_sz;
int union_sz;
int enum_sz;
int typedef_named_struct_sz;
int typedef_anon_struct_sz;
int typedef_struct_ptr_sz;
int typedef_int_sz;
int typedef_enum_sz;
int typedef_void_ptr_sz;
int typedef_func_proto_sz;
int typedef_arr_sz;
};
SEC("raw_tracepoint/sys_enter")
int test_core_type_based(void *ctx)
{
#if __has_builtin(__builtin_preserve_type_info)
struct core_reloc_type_based_output *out = (void *)&data.out;
out->struct_exists = bpf_core_type_exists(struct a_struct);
out->union_exists = bpf_core_type_exists(union a_union);
out->enum_exists = bpf_core_type_exists(enum an_enum);
out->typedef_named_struct_exists = bpf_core_type_exists(named_struct_typedef);
out->typedef_anon_struct_exists = bpf_core_type_exists(anon_struct_typedef);
out->typedef_struct_ptr_exists = bpf_core_type_exists(struct_ptr_typedef);
out->typedef_int_exists = bpf_core_type_exists(int_typedef);
out->typedef_enum_exists = bpf_core_type_exists(enum_typedef);
out->typedef_void_ptr_exists = bpf_core_type_exists(void_ptr_typedef);
out->typedef_func_proto_exists = bpf_core_type_exists(func_proto_typedef);
out->typedef_arr_exists = bpf_core_type_exists(arr_typedef);
out->struct_sz = bpf_core_type_size(struct a_struct);
out->union_sz = bpf_core_type_size(union a_union);
out->enum_sz = bpf_core_type_size(enum an_enum);
out->typedef_named_struct_sz = bpf_core_type_size(named_struct_typedef);
out->typedef_anon_struct_sz = bpf_core_type_size(anon_struct_typedef);
out->typedef_struct_ptr_sz = bpf_core_type_size(struct_ptr_typedef);
out->typedef_int_sz = bpf_core_type_size(int_typedef);
out->typedef_enum_sz = bpf_core_type_size(enum_typedef);
out->typedef_void_ptr_sz = bpf_core_type_size(void_ptr_typedef);
out->typedef_func_proto_sz = bpf_core_type_size(func_proto_typedef);
out->typedef_arr_sz = bpf_core_type_size(arr_typedef);
#else
data.skip = true;
#endif
return 0;
}

View File

@@ -0,0 +1,115 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
bool skip;
} data = {};
/* some types are shared with test_core_reloc_type_based.c */
struct a_struct {
int x;
};
union a_union {
int y;
int z;
};
enum an_enum {
AN_ENUM_VAL1 = 1,
AN_ENUM_VAL2 = 2,
AN_ENUM_VAL3 = 3,
};
typedef struct a_struct named_struct_typedef;
typedef int (*func_proto_typedef)(long);
typedef char arr_typedef[20];
struct core_reloc_type_id_output {
int local_anon_struct;
int local_anon_union;
int local_anon_enum;
int local_anon_func_proto_ptr;
int local_anon_void_ptr;
int local_anon_arr;
int local_struct;
int local_union;
int local_enum;
int local_int;
int local_struct_typedef;
int local_func_proto_typedef;
int local_arr_typedef;
int targ_struct;
int targ_union;
int targ_enum;
int targ_int;
int targ_struct_typedef;
int targ_func_proto_typedef;
int targ_arr_typedef;
};
/* preserve types even if Clang doesn't support built-in */
struct a_struct t1 = {};
union a_union t2 = {};
enum an_enum t3 = 0;
named_struct_typedef t4 = {};
func_proto_typedef t5 = 0;
arr_typedef t6 = {};
SEC("raw_tracepoint/sys_enter")
int test_core_type_id(void *ctx)
{
/* We use __builtin_btf_type_id() in this tests, but up until the time
* __builtin_preserve_type_info() was added it contained a bug that
* would make this test fail. The bug was fixed ([0]) with addition of
* __builtin_preserve_type_info(), though, so that's what we are using
* to detect whether this test has to be executed, however strange
* that might look like.
*
* [0] https://reviews.llvm.org/D85174
*/
#if __has_builtin(__builtin_preserve_type_info)
struct core_reloc_type_id_output *out = (void *)&data.out;
out->local_anon_struct = bpf_core_type_id_local(struct { int marker_field; });
out->local_anon_union = bpf_core_type_id_local(union { int marker_field; });
out->local_anon_enum = bpf_core_type_id_local(enum { MARKER_ENUM_VAL = 123 });
out->local_anon_func_proto_ptr = bpf_core_type_id_local(_Bool(*)(int));
out->local_anon_void_ptr = bpf_core_type_id_local(void *);
out->local_anon_arr = bpf_core_type_id_local(_Bool[47]);
out->local_struct = bpf_core_type_id_local(struct a_struct);
out->local_union = bpf_core_type_id_local(union a_union);
out->local_enum = bpf_core_type_id_local(enum an_enum);
out->local_int = bpf_core_type_id_local(int);
out->local_struct_typedef = bpf_core_type_id_local(named_struct_typedef);
out->local_func_proto_typedef = bpf_core_type_id_local(func_proto_typedef);
out->local_arr_typedef = bpf_core_type_id_local(arr_typedef);
out->targ_struct = bpf_core_type_id_kernel(struct a_struct);
out->targ_union = bpf_core_type_id_kernel(union a_union);
out->targ_enum = bpf_core_type_id_kernel(enum an_enum);
out->targ_int = bpf_core_type_id_kernel(int);
out->targ_struct_typedef = bpf_core_type_id_kernel(named_struct_typedef);
out->targ_func_proto_typedef = bpf_core_type_id_kernel(func_proto_typedef);
out->targ_arr_typedef = bpf_core_type_id_kernel(arr_typedef);
#else
data.skip = true;
#endif
return 0;
}

View File

@@ -0,0 +1,58 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#define MAX_PATH_LEN 128
#define MAX_FILES 7
pid_t my_pid = 0;
__u32 cnt_stat = 0;
__u32 cnt_close = 0;
char paths_stat[MAX_FILES][MAX_PATH_LEN] = {};
char paths_close[MAX_FILES][MAX_PATH_LEN] = {};
int rets_stat[MAX_FILES] = {};
int rets_close[MAX_FILES] = {};
SEC("fentry/vfs_getattr")
int BPF_PROG(prog_stat, struct path *path, struct kstat *stat,
__u32 request_mask, unsigned int query_flags)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
__u32 cnt = cnt_stat;
int ret;
if (pid != my_pid)
return 0;
if (cnt >= MAX_FILES)
return 0;
ret = bpf_d_path(path, paths_stat[cnt], MAX_PATH_LEN);
rets_stat[cnt] = ret;
cnt_stat++;
return 0;
}
SEC("fentry/filp_close")
int BPF_PROG(prog_close, struct file *file, void *id)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
__u32 cnt = cnt_close;
int ret;
if (pid != my_pid)
return 0;
if (cnt >= MAX_FILES)
return 0;
ret = bpf_d_path(&file->f_path,
paths_close[cnt], MAX_PATH_LEN);
rets_close[cnt] = ret;
cnt_close++;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,325 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/socket.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
__u16 last_addr16_n = __bpf_htons(0xeB9F);
__u16 active_lport_n = 0;
__u16 active_lport_h = 0;
__u16 passive_lport_n = 0;
__u16 passive_lport_h = 0;
/* options received at passive side */
unsigned int nr_pure_ack = 0;
unsigned int nr_data = 0;
unsigned int nr_syn = 0;
unsigned int nr_fin = 0;
/* Check the header received from the active side */
static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
{
union {
struct tcphdr th;
struct ipv6hdr ip6;
struct tcp_exprm_opt exprm_opt;
struct tcp_opt reg_opt;
__u8 data[100]; /* IPv6 (40) + Max TCP hdr (60) */
} hdr = {};
__u64 load_flags = check_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
struct tcphdr *pth;
int ret;
hdr.reg_opt.kind = 0xB9;
/* The option is 4 bytes long instead of 2 bytes */
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, 2, load_flags);
if (ret != -ENOSPC)
RET_CG_ERR(ret);
/* Test searching magic with regular kind */
hdr.reg_opt.len = 4;
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
load_flags);
if (ret != -EINVAL)
RET_CG_ERR(ret);
hdr.reg_opt.len = 0;
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
load_flags);
if (ret != 4 || hdr.reg_opt.len != 4 || hdr.reg_opt.kind != 0xB9 ||
hdr.reg_opt.data[0] != 0xfa || hdr.reg_opt.data[1] != 0xce)
RET_CG_ERR(ret);
/* Test searching experimental option with invalid kind length */
hdr.exprm_opt.kind = TCPOPT_EXP;
hdr.exprm_opt.len = 5;
hdr.exprm_opt.magic = 0;
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != -EINVAL)
RET_CG_ERR(ret);
/* Test searching experimental option with 0 magic value */
hdr.exprm_opt.len = 4;
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != -ENOMSG)
RET_CG_ERR(ret);
hdr.exprm_opt.magic = __bpf_htons(0xeB9F);
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != 4 || hdr.exprm_opt.len != 4 ||
hdr.exprm_opt.kind != TCPOPT_EXP ||
hdr.exprm_opt.magic != __bpf_htons(0xeB9F))
RET_CG_ERR(ret);
if (!check_syn)
return CG_OK;
/* Test loading from skops->syn_skb if sk_state == TCP_NEW_SYN_RECV
*
* Test loading from tp->saved_syn for other sk_state.
*/
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr.ip6,
sizeof(hdr.ip6));
if (ret != -ENOSPC)
RET_CG_ERR(ret);
if (hdr.ip6.saddr.s6_addr16[7] != last_addr16_n ||
hdr.ip6.daddr.s6_addr16[7] != last_addr16_n)
RET_CG_ERR(0);
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr, sizeof(hdr));
if (ret < 0)
RET_CG_ERR(ret);
pth = (struct tcphdr *)(&hdr.ip6 + 1);
if (pth->dest != passive_lport_n || pth->source != active_lport_n)
RET_CG_ERR(0);
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN, &hdr, sizeof(hdr));
if (ret < 0)
RET_CG_ERR(ret);
if (hdr.th.dest != passive_lport_n || hdr.th.source != active_lport_n)
RET_CG_ERR(0);
return CG_OK;
}
static int check_active_syn_in(struct bpf_sock_ops *skops)
{
return __check_active_hdr_in(skops, true);
}
static int check_active_hdr_in(struct bpf_sock_ops *skops)
{
struct tcphdr *th;
if (__check_active_hdr_in(skops, false) == CG_ERR)
return CG_ERR;
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (tcp_hdrlen(th) < skops->skb_len)
nr_data++;
if (th->fin)
nr_fin++;
if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len)
nr_pure_ack++;
return CG_OK;
}
static int active_opt_len(struct bpf_sock_ops *skops)
{
int err;
/* Reserve more than enough to allow the -EEXIST test in
* the write_active_opt().
*/
err = bpf_reserve_hdr_opt(skops, 12, 0);
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int write_active_opt(struct bpf_sock_ops *skops)
{
struct tcp_exprm_opt exprm_opt = {};
struct tcp_opt win_scale_opt = {};
struct tcp_opt reg_opt = {};
struct tcphdr *th;
int err, ret;
exprm_opt.kind = TCPOPT_EXP;
exprm_opt.len = 4;
exprm_opt.magic = __bpf_htons(0xeB9F);
reg_opt.kind = 0xB9;
reg_opt.len = 4;
reg_opt.data[0] = 0xfa;
reg_opt.data[1] = 0xce;
win_scale_opt.kind = TCPOPT_WINDOW;
err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (err)
RET_CG_ERR(err);
/* Store the same exprm option */
err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
err = bpf_store_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
if (err)
RET_CG_ERR(err);
err = bpf_store_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
/* Check the option has been written and can be searched */
ret = bpf_load_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (ret != 4 || exprm_opt.len != 4 || exprm_opt.kind != TCPOPT_EXP ||
exprm_opt.magic != __bpf_htons(0xeB9F))
RET_CG_ERR(ret);
reg_opt.len = 0;
ret = bpf_load_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
if (ret != 4 || reg_opt.len != 4 || reg_opt.kind != 0xB9 ||
reg_opt.data[0] != 0xfa || reg_opt.data[1] != 0xce)
RET_CG_ERR(ret);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (th->syn) {
active_lport_h = skops->local_port;
active_lport_n = th->source;
/* Search the win scale option written by kernel
* in the SYN packet.
*/
ret = bpf_load_hdr_opt(skops, &win_scale_opt,
sizeof(win_scale_opt), 0);
if (ret != 3 || win_scale_opt.len != 3 ||
win_scale_opt.kind != TCPOPT_WINDOW)
RET_CG_ERR(ret);
/* Write the win scale option that kernel
* has already written.
*/
err = bpf_store_hdr_opt(skops, &win_scale_opt,
sizeof(win_scale_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
/* Check the SYN from bpf_sock_ops_kern->syn_skb */
return check_active_syn_in(skops);
/* Passive side should have cleared the write hdr cb by now */
if (skops->local_port == passive_lport_h)
RET_CG_ERR(0);
return active_opt_len(skops);
}
static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
{
if (skops->local_port == passive_lport_h)
RET_CG_ERR(0);
return write_active_opt(skops);
}
static int handle_parse_hdr(struct bpf_sock_ops *skops)
{
/* Passive side is not writing any non-standard/unknown
* option, so the active side should never be called.
*/
if (skops->local_port == active_lport_h)
RET_CG_ERR(0);
return check_active_hdr_in(skops);
}
static int handle_passive_estab(struct bpf_sock_ops *skops)
{
int err;
/* No more write hdr cb */
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags &
~BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG);
/* Recheck the SYN but check the tp->saved_syn this time */
err = check_active_syn_in(skops);
if (err == CG_ERR)
return err;
nr_syn++;
/* The ack has header option written by the active side also */
return check_active_hdr_in(skops);
}
SEC("sockops/misc_estab")
int misc_estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
passive_lport_h = skops->local_port;
passive_lport_n = __bpf_htons(passive_lport_h);
bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
&true_val, sizeof(true_val));
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
return handle_parse_hdr(skops);
case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
return handle_hdr_opt_len(skops);
case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
return handle_write_hdr_opt(skops);
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
return handle_passive_estab(skops);
}
return CG_OK;
}
char _license[] SEC("license") = "GPL";

View File

@@ -79,6 +79,24 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
return skb->ifindex * val * var;
}
__attribute__ ((noinline))
int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
struct tcphdr *tcp = NULL;
if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
return -1;
tcp = data + off;
if (tcp + 1 > data_end)
return -1;
/* make modification to the packet data */
tcp->check++;
return 0;
}
SEC("classifier/test_pkt_access")
int test_pkt_access(struct __sk_buff *skb)
{
@@ -117,6 +135,8 @@ int test_pkt_access(struct __sk_buff *skb)
if (test_pkt_access_subprog3(3, skb) != skb->len * 3 * skb->ifindex)
return TC_ACT_SHOT;
if (tcp) {
if (test_pkt_write_access_subprog(skb, (void *)tcp - data))
return TC_ACT_SHOT;
if (((void *)(tcp) + 20) > data_end || proto != 6)
return TC_ACT_SHOT;
barrier(); /* to force ordering of checks */

View File

@@ -0,0 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} map SEC(".maps");
SEC("sockops")
int bpf_sockmap(struct bpf_sock_ops *skops)
{
__u32 key = 0;
if (skops->sk)
bpf_map_update_elem(&map, &key, skops->sk, 0);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} src SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} dst_sock_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} dst_sock_hash SEC(".maps");
SEC("classifier/copy_sock_map")
int copy_sock_map(void *ctx)
{
struct bpf_sock *sk;
bool failed = false;
__u32 key = 0;
sk = bpf_map_lookup_elem(&src, &key);
if (!sk)
return SK_DROP;
if (bpf_map_update_elem(&dst_sock_map, &key, sk, 0))
failed = true;
if (bpf_map_update_elem(&dst_sock_hash, &key, sk, 0))
failed = true;
bpf_sk_release(sk);
return failed ? SK_DROP : SK_PASS;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,623 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/tcp.h>
#include <linux/socket.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
#ifndef sizeof_field
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
#endif
__u8 test_kind = TCPOPT_EXP;
__u16 test_magic = 0xeB9F;
struct bpf_test_option passive_synack_out = {};
struct bpf_test_option passive_fin_out = {};
struct bpf_test_option passive_estab_in = {};
struct bpf_test_option passive_fin_in = {};
struct bpf_test_option active_syn_out = {};
struct bpf_test_option active_fin_out = {};
struct bpf_test_option active_estab_in = {};
struct bpf_test_option active_fin_in = {};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct hdr_stg);
} hdr_stg_map SEC(".maps");
static bool skops_want_cookie(const struct bpf_sock_ops *skops)
{
return skops->args[0] == BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
}
static bool skops_current_mss(const struct bpf_sock_ops *skops)
{
return skops->args[0] == BPF_WRITE_HDR_TCP_CURRENT_MSS;
}
static __u8 option_total_len(__u8 flags)
{
__u8 i, len = 1; /* +1 for flags */
if (!flags)
return 0;
/* RESEND bit does not use a byte */
for (i = OPTION_RESEND + 1; i < __NR_OPTION_FLAGS; i++)
len += !!TEST_OPTION_FLAGS(flags, i);
if (test_kind == TCPOPT_EXP)
return len + TCP_BPF_EXPOPT_BASE_LEN;
else
return len + 2; /* +1 kind, +1 kind-len */
}
static void write_test_option(const struct bpf_test_option *test_opt,
__u8 *data)
{
__u8 offset = 0;
data[offset++] = test_opt->flags;
if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_MAX_DELACK_MS))
data[offset++] = test_opt->max_delack_ms;
if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_RAND))
data[offset++] = test_opt->rand;
}
static int store_option(struct bpf_sock_ops *skops,
const struct bpf_test_option *test_opt)
{
union {
struct tcp_exprm_opt exprm;
struct tcp_opt regular;
} write_opt;
int err;
if (test_kind == TCPOPT_EXP) {
write_opt.exprm.kind = TCPOPT_EXP;
write_opt.exprm.len = option_total_len(test_opt->flags);
write_opt.exprm.magic = __bpf_htons(test_magic);
write_opt.exprm.data32 = 0;
write_test_option(test_opt, write_opt.exprm.data);
err = bpf_store_hdr_opt(skops, &write_opt.exprm,
sizeof(write_opt.exprm), 0);
} else {
write_opt.regular.kind = test_kind;
write_opt.regular.len = option_total_len(test_opt->flags);
write_opt.regular.data32 = 0;
write_test_option(test_opt, write_opt.regular.data);
err = bpf_store_hdr_opt(skops, &write_opt.regular,
sizeof(write_opt.regular), 0);
}
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int parse_test_option(struct bpf_test_option *opt, const __u8 *start)
{
opt->flags = *start++;
if (TEST_OPTION_FLAGS(opt->flags, OPTION_MAX_DELACK_MS))
opt->max_delack_ms = *start++;
if (TEST_OPTION_FLAGS(opt->flags, OPTION_RAND))
opt->rand = *start++;
return 0;
}
static int load_option(struct bpf_sock_ops *skops,
struct bpf_test_option *test_opt, bool from_syn)
{
union {
struct tcp_exprm_opt exprm;
struct tcp_opt regular;
} search_opt;
int ret, load_flags = from_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
if (test_kind == TCPOPT_EXP) {
search_opt.exprm.kind = TCPOPT_EXP;
search_opt.exprm.len = 4;
search_opt.exprm.magic = __bpf_htons(test_magic);
search_opt.exprm.data32 = 0;
ret = bpf_load_hdr_opt(skops, &search_opt.exprm,
sizeof(search_opt.exprm), load_flags);
if (ret < 0)
return ret;
return parse_test_option(test_opt, search_opt.exprm.data);
} else {
search_opt.regular.kind = test_kind;
search_opt.regular.len = 0;
search_opt.regular.data32 = 0;
ret = bpf_load_hdr_opt(skops, &search_opt.regular,
sizeof(search_opt.regular), load_flags);
if (ret < 0)
return ret;
return parse_test_option(test_opt, search_opt.regular.data);
}
}
static int synack_opt_len(struct bpf_sock_ops *skops)
{
struct bpf_test_option test_opt = {};
__u8 optlen;
int err;
if (!passive_synack_out.flags)
return CG_OK;
err = load_option(skops, &test_opt, true);
/* bpf_test_option is not found */
if (err == -ENOMSG)
return CG_OK;
if (err)
RET_CG_ERR(err);
optlen = option_total_len(passive_synack_out.flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_synack_opt(struct bpf_sock_ops *skops)
{
struct bpf_test_option opt;
if (!passive_synack_out.flags)
/* We should not even be called since no header
* space has been reserved.
*/
RET_CG_ERR(0);
opt = passive_synack_out;
if (skops_want_cookie(skops))
SET_OPTION_FLAGS(opt.flags, OPTION_RESEND);
return store_option(skops, &opt);
}
static int syn_opt_len(struct bpf_sock_ops *skops)
{
__u8 optlen;
int err;
if (!active_syn_out.flags)
return CG_OK;
optlen = option_total_len(active_syn_out.flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_syn_opt(struct bpf_sock_ops *skops)
{
if (!active_syn_out.flags)
RET_CG_ERR(0);
return store_option(skops, &active_syn_out);
}
static int fin_opt_len(struct bpf_sock_ops *skops)
{
struct bpf_test_option *opt;
struct hdr_stg *hdr_stg;
__u8 optlen;
int err;
if (!skops->sk)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->active)
opt = &active_fin_out;
else
opt = &passive_fin_out;
optlen = option_total_len(opt->flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_fin_opt(struct bpf_sock_ops *skops)
{
struct bpf_test_option *opt;
struct hdr_stg *hdr_stg;
if (!skops->sk)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->active)
opt = &active_fin_out;
else
opt = &passive_fin_out;
if (!opt->flags)
RET_CG_ERR(0);
return store_option(skops, opt);
}
static int resend_in_ack(struct bpf_sock_ops *skops)
{
struct hdr_stg *hdr_stg;
if (!skops->sk)
return -1;
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
return -1;
return !!hdr_stg->resend_syn;
}
static int nodata_opt_len(struct bpf_sock_ops *skops)
{
int resend;
resend = resend_in_ack(skops);
if (resend < 0)
RET_CG_ERR(0);
if (resend)
return syn_opt_len(skops);
return CG_OK;
}
static int write_nodata_opt(struct bpf_sock_ops *skops)
{
int resend;
resend = resend_in_ack(skops);
if (resend < 0)
RET_CG_ERR(0);
if (resend)
return write_syn_opt(skops);
return CG_OK;
}
static int data_opt_len(struct bpf_sock_ops *skops)
{
/* Same as the nodata version. Mostly to show
* an example usage on skops->skb_len.
*/
return nodata_opt_len(skops);
}
static int write_data_opt(struct bpf_sock_ops *skops)
{
return write_nodata_opt(skops);
}
static int current_mss_opt_len(struct bpf_sock_ops *skops)
{
/* Reserve maximum that may be needed */
int err;
err = bpf_reserve_hdr_opt(skops, option_total_len(OPTION_MASK), 0);
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
return synack_opt_len(skops);
if (tcp_flags & TCPHDR_SYN)
return syn_opt_len(skops);
if (tcp_flags & TCPHDR_FIN)
return fin_opt_len(skops);
if (skops_current_mss(skops))
/* The kernel is calculating the MSS */
return current_mss_opt_len(skops);
if (skops->skb_len)
return data_opt_len(skops);
return nodata_opt_len(skops);
}
static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
struct tcphdr *th;
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
return write_synack_opt(skops);
if (tcp_flags & TCPHDR_SYN)
return write_syn_opt(skops);
if (tcp_flags & TCPHDR_FIN)
return write_fin_opt(skops);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (skops->skb_len > tcp_hdrlen(th))
return write_data_opt(skops);
return write_nodata_opt(skops);
}
static int set_delack_max(struct bpf_sock_ops *skops, __u8 max_delack_ms)
{
__u32 max_delack_us = max_delack_ms * 1000;
return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_DELACK_MAX,
&max_delack_us, sizeof(max_delack_us));
}
static int set_rto_min(struct bpf_sock_ops *skops, __u8 peer_max_delack_ms)
{
__u32 min_rto_us = peer_max_delack_ms * 1000;
return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_RTO_MIN, &min_rto_us,
sizeof(min_rto_us));
}
static int handle_active_estab(struct bpf_sock_ops *skops)
{
struct hdr_stg init_stg = {
.active = true,
};
int err;
err = load_option(skops, &active_estab_in, false);
if (err && err != -ENOMSG)
RET_CG_ERR(err);
init_stg.resend_syn = TEST_OPTION_FLAGS(active_estab_in.flags,
OPTION_RESEND);
if (!skops->sk || !bpf_sk_storage_get(&hdr_stg_map, skops->sk,
&init_stg,
BPF_SK_STORAGE_GET_F_CREATE))
RET_CG_ERR(0);
if (init_stg.resend_syn)
/* Don't clear the write_hdr cb now because
* the ACK may get lost and retransmit may
* be needed.
*
* PARSE_ALL_HDR cb flag is set to learn if this
* resend_syn option has received by the peer.
*
* The header option will be resent until a valid
* packet is received at handle_parse_hdr()
* and all hdr cb flags will be cleared in
* handle_parse_hdr().
*/
set_parse_all_hdr_cb_flags(skops);
else if (!active_fin_out.flags)
/* No options will be written from now */
clear_hdr_cb_flags(skops);
if (active_syn_out.max_delack_ms) {
err = set_delack_max(skops, active_syn_out.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
if (active_estab_in.max_delack_ms) {
err = set_rto_min(skops, active_estab_in.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_passive_estab(struct bpf_sock_ops *skops)
{
struct hdr_stg init_stg = {};
struct tcphdr *th;
int err;
err = load_option(skops, &passive_estab_in, true);
if (err == -ENOENT) {
/* saved_syn is not found. It was in syncookie mode.
* We have asked the active side to resend the options
* in ACK, so try to find the bpf_test_option from ACK now.
*/
err = load_option(skops, &passive_estab_in, false);
init_stg.syncookie = true;
}
/* ENOMSG: The bpf_test_option is not found which is fine.
* Bail out now for all other errors.
*/
if (err && err != -ENOMSG)
RET_CG_ERR(err);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (th->syn) {
/* Fastopen */
/* Cannot clear cb_flags to stop write_hdr cb.
* synack is not sent yet for fast open.
* Even it was, the synack may need to be retransmitted.
*
* PARSE_ALL_HDR cb flag is set to learn
* if synack has reached the peer.
* All cb_flags will be cleared in handle_parse_hdr().
*/
set_parse_all_hdr_cb_flags(skops);
init_stg.fastopen = true;
} else if (!passive_fin_out.flags) {
/* No options will be written from now */
clear_hdr_cb_flags(skops);
}
if (!skops->sk ||
!bpf_sk_storage_get(&hdr_stg_map, skops->sk, &init_stg,
BPF_SK_STORAGE_GET_F_CREATE))
RET_CG_ERR(0);
if (passive_synack_out.max_delack_ms) {
err = set_delack_max(skops, passive_synack_out.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
if (passive_estab_in.max_delack_ms) {
err = set_rto_min(skops, passive_estab_in.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_parse_hdr(struct bpf_sock_ops *skops)
{
struct hdr_stg *hdr_stg;
struct tcphdr *th;
if (!skops->sk)
RET_CG_ERR(0);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->resend_syn || hdr_stg->fastopen)
/* The PARSE_ALL_HDR cb flag was turned on
* to ensure that the previously written
* options have reached the peer.
* Those previously written option includes:
* - Active side: resend_syn in ACK during syncookie
* or
* - Passive side: SYNACK during fastopen
*
* A valid packet has been received here after
* the 3WHS, so the PARSE_ALL_HDR cb flag
* can be cleared now.
*/
clear_parse_all_hdr_cb_flags(skops);
if (hdr_stg->resend_syn && !active_fin_out.flags)
/* Active side resent the syn option in ACK
* because the server was in syncookie mode.
* A valid packet has been received, so
* clear header cb flags if there is no
* more option to send.
*/
clear_hdr_cb_flags(skops);
if (hdr_stg->fastopen && !passive_fin_out.flags)
/* Passive side was in fastopen.
* A valid packet has been received, so
* the SYNACK has reached the peer.
* Clear header cb flags if there is no more
* option to send.
*/
clear_hdr_cb_flags(skops);
if (th->fin) {
struct bpf_test_option *fin_opt;
int err;
if (hdr_stg->active)
fin_opt = &active_fin_in;
else
fin_opt = &passive_fin_in;
err = load_option(skops, fin_opt, false);
if (err && err != -ENOMSG)
RET_CG_ERR(err);
}
return CG_OK;
}
SEC("sockops/estab")
int estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
&true_val, sizeof(true_val));
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
return handle_parse_hdr(skops);
case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
return handle_hdr_opt_len(skops);
case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
return handle_write_hdr_opt(skops);
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
return handle_passive_estab(skops);
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
return handle_active_estab(skops);
}
return CG_OK;
}
char _license[] SEC("license") = "GPL";

View File

@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep")
int handle__tp(struct trace_event_raw_sys_enter *args)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (args->id != __NR_nanosleep)
return 0;
ts = (void *)args->args[0];
if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
tp_called = true;
@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter")
int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE(regs);
if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
raw_tp_called = true;
@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter")
int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE(regs);
if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
tp_btf_called = true;

View File

@@ -39,6 +39,13 @@ int bench_trigger_fentry(void *ctx)
return 0;
}
SEC("fentry.s/__x64_sys_getpgid")
int bench_trigger_fentry_sleep(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("fmod_ret/__x64_sys_getpgid")
int bench_trigger_fmodret(void *ctx)
{

View File

@@ -156,4 +156,5 @@ cleanup:
bpf_object__close(obj);
}
}
return 0;
}

View File

@@ -0,0 +1,151 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
#ifndef _TEST_TCP_HDR_OPTIONS_H
#define _TEST_TCP_HDR_OPTIONS_H
struct bpf_test_option {
__u8 flags;
__u8 max_delack_ms;
__u8 rand;
} __attribute__((packed));
enum {
OPTION_RESEND,
OPTION_MAX_DELACK_MS,
OPTION_RAND,
__NR_OPTION_FLAGS,
};
#define OPTION_F_RESEND (1 << OPTION_RESEND)
#define OPTION_F_MAX_DELACK_MS (1 << OPTION_MAX_DELACK_MS)
#define OPTION_F_RAND (1 << OPTION_RAND)
#define OPTION_MASK ((1 << __NR_OPTION_FLAGS) - 1)
#define TEST_OPTION_FLAGS(flags, option) (1 & ((flags) >> (option)))
#define SET_OPTION_FLAGS(flags, option) ((flags) |= (1 << (option)))
/* Store in bpf_sk_storage */
struct hdr_stg {
bool active;
bool resend_syn; /* active side only */
bool syncookie; /* passive side only */
bool fastopen; /* passive side only */
};
struct linum_err {
unsigned int linum;
int err;
};
#define TCPHDR_FIN 0x01
#define TCPHDR_SYN 0x02
#define TCPHDR_RST 0x04
#define TCPHDR_PSH 0x08
#define TCPHDR_ACK 0x10
#define TCPHDR_URG 0x20
#define TCPHDR_ECE 0x40
#define TCPHDR_CWR 0x80
#define TCPHDR_SYNACK (TCPHDR_SYN | TCPHDR_ACK)
#define TCPOPT_EOL 0
#define TCPOPT_NOP 1
#define TCPOPT_WINDOW 3
#define TCPOPT_EXP 254
#define TCP_BPF_EXPOPT_BASE_LEN 4
#define MAX_TCP_HDR_LEN 60
#define MAX_TCP_OPTION_SPACE 40
#ifdef BPF_PROG_TEST_TCP_HDR_OPTIONS
#define CG_OK 1
#define CG_ERR 0
#ifndef SOL_TCP
#define SOL_TCP 6
#endif
struct tcp_exprm_opt {
__u8 kind;
__u8 len;
__u16 magic;
union {
__u8 data[4];
__u32 data32;
};
} __attribute__((packed));
struct tcp_opt {
__u8 kind;
__u8 len;
union {
__u8 data[4];
__u32 data32;
};
} __attribute__((packed));
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 2);
__type(key, int);
__type(value, struct linum_err);
} lport_linum_map SEC(".maps");
static inline unsigned int tcp_hdrlen(const struct tcphdr *th)
{
return th->doff << 2;
}
static inline __u8 skops_tcp_flags(const struct bpf_sock_ops *skops)
{
return skops->skb_tcp_flags;
}
static inline void clear_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags &
~(BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG));
}
static inline void set_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags |
BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG);
}
static inline void
clear_parse_all_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags &
~BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
}
static inline void
set_parse_all_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags |
BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
}
#define RET_CG_ERR(__err) ({ \
struct linum_err __linum_err; \
int __lport; \
\
__linum_err.linum = __LINE__; \
__linum_err.err = __err; \
__lport = skops->local_port; \
bpf_map_update_elem(&lport_linum_map, &__lport, &__linum_err, BPF_NOEXIST); \
clear_hdr_cb_flags(skops); \
clear_parse_all_hdr_cb_flags(skops); \
return CG_ERR; \
})
#endif /* BPF_PROG_TEST_TCP_HDR_OPTIONS */
#endif /* _TEST_TCP_HDR_OPTIONS_H */

View File

@@ -114,6 +114,7 @@ struct bpf_test {
bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
};
enum bpf_attach_type expected_attach_type;
const char *kfunc;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -984,8 +985,24 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
attr.log_level = 4;
attr.prog_flags = pflags;
if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
attr.attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
attr.expected_attach_type);
if (attr.attach_btf_id < 0) {
printf("FAIL\nFailed to find BTF ID for '%s'!\n",
test->kfunc);
(*errors)++;
return;
}
}
fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
/* BPF_PROG_TYPE_TRACING requires more setup and
* bpf_probe_prog_type won't give correct answer
*/
if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
!bpf_probe_prog_type(prog_type, 0)) {
printf("SKIP (unsupported program type %d)\n", prog_type);
skips++;
goto close_fds;

View File

@@ -557,3 +557,149 @@
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"bounds check for reg = 0, reg xor 1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
{
"bounds check for reg32 = 0, reg32 xor 1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 1),
BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
{
"bounds check for reg = 2, reg xor 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
{
"bounds check for reg = any, reg xor 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.result = REJECT,
.errstr = "invalid access to map value",
.errstr_unpriv = "invalid access to map value",
},
{
"bounds check for reg32 = any, reg32 xor 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3),
BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.result = REJECT,
.errstr = "invalid access to map value",
.errstr_unpriv = "invalid access to map value",
},
{
"bounds check for reg > 0, reg xor 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 0, 3),
BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
{
"bounds check for reg32 > 0, reg32 xor 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP32_IMM(BPF_JLE, BPF_REG_1, 0, 3),
BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3),
BPF_JMP32_IMM(BPF_JGE, BPF_REG_1, 0, 1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},

View File

@@ -0,0 +1,37 @@
{
"d_path accept",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_6, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
BPF_LD_IMM64(BPF_REG_3, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_FENTRY,
.kfunc = "dentry_open",
},
{
"d_path reject",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_6, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
BPF_LD_IMM64(BPF_REG_3, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "helper call is not allowed in probe",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_FENTRY,
.kfunc = "d_path",
},