2019-04-02 04:27:48 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
// Copyright (c) 2019 Facebook
|
|
|
|
#include <test_progs.h>
|
|
|
|
static int libbpf_debug_print(enum libbpf_print_level level,
|
|
|
|
const char *format, va_list args)
|
|
|
|
{
|
2019-07-28 03:25:28 +00:00
|
|
|
if (level != LIBBPF_DEBUG) {
|
2019-08-06 17:45:28 +00:00
|
|
|
vprintf(format, args);
|
2019-07-28 03:25:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2019-04-02 04:27:48 +00:00
|
|
|
|
|
|
|
if (!strstr(format, "verifier log"))
|
|
|
|
return 0;
|
2019-08-06 17:45:28 +00:00
|
|
|
vprintf("%s", args);
|
2019-07-28 03:25:28 +00:00
|
|
|
return 0;
|
2019-04-02 04:27:48 +00:00
|
|
|
}
|
|
|
|
|
2019-11-20 00:35:48 +00:00
|
|
|
extern int extra_prog_load_log_flags;
|
|
|
|
|
2019-05-22 03:14:21 +00:00
|
|
|
static int check_load(const char *file, enum bpf_prog_type type)
|
2019-04-02 04:27:48 +00:00
|
|
|
{
|
|
|
|
struct bpf_prog_load_attr attr;
|
2019-05-08 16:49:32 +00:00
|
|
|
struct bpf_object *obj = NULL;
|
2019-04-02 04:27:48 +00:00
|
|
|
int err, prog_fd;
|
|
|
|
|
|
|
|
memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
|
|
|
|
attr.file = file;
|
2019-05-22 03:14:21 +00:00
|
|
|
attr.prog_type = type;
|
2019-11-20 00:35:48 +00:00
|
|
|
attr.log_level = 4 | extra_prog_load_log_flags;
|
selftests: bpf: enable hi32 randomization for all tests
The previous libbpf patch allows user to specify "prog_flags" to bpf
program load APIs. To enable high 32-bit randomization for a test, we need
to set BPF_F_TEST_RND_HI32 in "prog_flags".
To enable such randomization for all tests, we need to make sure all places
are passing BPF_F_TEST_RND_HI32. Changing them one by one is not
convenient, also, it would be better if a test could be switched to
"normal" running mode without code change.
Given the program load APIs used across bpf selftests are mostly:
bpf_prog_load: load from file
bpf_load_program: load from raw insns
A test_stub.c is implemented for bpf seltests, it offers two functions for
testing purpose:
bpf_prog_test_load
bpf_test_load_program
The are the same as "bpf_prog_load" and "bpf_load_program", except they
also set BPF_F_TEST_RND_HI32. Given *_xattr functions are the APIs to
customize any "prog_flags", it makes little sense to put these two
functions into libbpf.
Then, the following CFLAGS are passed to compilations for host programs:
-Dbpf_prog_load=bpf_prog_test_load
-Dbpf_load_program=bpf_test_load_program
They migrate the used load APIs to the test version, hence enable high
32-bit randomization for these tests without changing source code.
Besides all these, there are several testcases are using
"bpf_prog_load_attr" directly, their call sites are updated to pass
BPF_F_TEST_RND_HI32.
Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 22:25:21 +00:00
|
|
|
attr.prog_flags = BPF_F_TEST_RND_HI32;
|
2019-04-02 04:27:48 +00:00
|
|
|
err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
|
|
|
|
bpf_object__close(obj);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-28 03:25:30 +00:00
|
|
|
struct scale_test_def {
|
|
|
|
const char *file;
|
|
|
|
enum bpf_prog_type attach_type;
|
|
|
|
bool fails;
|
|
|
|
};
|
|
|
|
|
selftests/bpf: Split out bpf_verif_scale selftests into multiple tests
Instead of using subtests in bpf_verif_scale selftest, turn each scale
sub-test into its own test. Each subtest is compltely independent and
just reuses a bit of common test running logic, so the conversion is
trivial. For convenience, keep all of BPF verifier scale tests in one
file.
This conversion shaves off a significant amount of time when running
test_progs in parallel mode. E.g., just running scale tests (-t verif_scale):
BEFORE
======
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m22.894s
user 0m0.012s
sys 0m22.797s
AFTER
=====
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m12.044s
user 0m0.024s
sys 0m27.869s
Ten second saving right there. test_progs -j is not yet ready to be
turned on by default, unfortunately, and some tests fail almost every
time, but this is a good improvement nevertheless. Ignoring few
failures, here is sequential vs parallel run times when running all
tests now:
SEQUENTIAL
==========
Summary: 206/953 PASSED, 4 SKIPPED, 0 FAILED
real 1m5.625s
user 0m4.211s
sys 0m31.650s
PARALLEL
========
Summary: 204/952 PASSED, 4 SKIPPED, 2 FAILED
real 0m35.550s
user 0m4.998s
sys 0m39.890s
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211022223228.99920-5-andrii@kernel.org
2021-10-22 22:32:28 +00:00
|
|
|
static void scale_test(const char *file,
|
|
|
|
enum bpf_prog_type attach_type,
|
|
|
|
bool should_fail)
|
|
|
|
{
|
2019-07-28 03:25:27 +00:00
|
|
|
libbpf_print_fn_t old_print_fn = NULL;
|
selftests/bpf: Split out bpf_verif_scale selftests into multiple tests
Instead of using subtests in bpf_verif_scale selftest, turn each scale
sub-test into its own test. Each subtest is compltely independent and
just reuses a bit of common test running logic, so the conversion is
trivial. For convenience, keep all of BPF verifier scale tests in one
file.
This conversion shaves off a significant amount of time when running
test_progs in parallel mode. E.g., just running scale tests (-t verif_scale):
BEFORE
======
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m22.894s
user 0m0.012s
sys 0m22.797s
AFTER
=====
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m12.044s
user 0m0.024s
sys 0m27.869s
Ten second saving right there. test_progs -j is not yet ready to be
turned on by default, unfortunately, and some tests fail almost every
time, but this is a good improvement nevertheless. Ignoring few
failures, here is sequential vs parallel run times when running all
tests now:
SEQUENTIAL
==========
Summary: 206/953 PASSED, 4 SKIPPED, 0 FAILED
real 1m5.625s
user 0m4.211s
sys 0m31.650s
PARALLEL
========
Summary: 204/952 PASSED, 4 SKIPPED, 2 FAILED
real 0m35.550s
user 0m4.998s
sys 0m39.890s
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211022223228.99920-5-andrii@kernel.org
2021-10-22 22:32:28 +00:00
|
|
|
int err;
|
2019-04-02 04:27:48 +00:00
|
|
|
|
2019-07-28 03:25:28 +00:00
|
|
|
if (env.verifier_stats) {
|
|
|
|
test__force_log();
|
2019-07-28 03:25:27 +00:00
|
|
|
old_print_fn = libbpf_set_print(libbpf_debug_print);
|
2019-07-28 03:25:28 +00:00
|
|
|
}
|
2019-04-02 04:27:48 +00:00
|
|
|
|
selftests/bpf: Split out bpf_verif_scale selftests into multiple tests
Instead of using subtests in bpf_verif_scale selftest, turn each scale
sub-test into its own test. Each subtest is compltely independent and
just reuses a bit of common test running logic, so the conversion is
trivial. For convenience, keep all of BPF verifier scale tests in one
file.
This conversion shaves off a significant amount of time when running
test_progs in parallel mode. E.g., just running scale tests (-t verif_scale):
BEFORE
======
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m22.894s
user 0m0.012s
sys 0m22.797s
AFTER
=====
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m12.044s
user 0m0.024s
sys 0m27.869s
Ten second saving right there. test_progs -j is not yet ready to be
turned on by default, unfortunately, and some tests fail almost every
time, but this is a good improvement nevertheless. Ignoring few
failures, here is sequential vs parallel run times when running all
tests now:
SEQUENTIAL
==========
Summary: 206/953 PASSED, 4 SKIPPED, 0 FAILED
real 1m5.625s
user 0m4.211s
sys 0m31.650s
PARALLEL
========
Summary: 204/952 PASSED, 4 SKIPPED, 2 FAILED
real 0m35.550s
user 0m4.998s
sys 0m39.890s
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211022223228.99920-5-andrii@kernel.org
2021-10-22 22:32:28 +00:00
|
|
|
err = check_load(file, attach_type);
|
|
|
|
if (should_fail)
|
|
|
|
ASSERT_ERR(err, "expect_error");
|
|
|
|
else
|
|
|
|
ASSERT_OK(err, "expect_success");
|
2019-06-15 19:12:24 +00:00
|
|
|
|
2019-07-28 03:25:28 +00:00
|
|
|
if (env.verifier_stats)
|
2019-07-28 03:25:27 +00:00
|
|
|
libbpf_set_print(old_print_fn);
|
2019-04-02 04:27:48 +00:00
|
|
|
}
|
selftests/bpf: Split out bpf_verif_scale selftests into multiple tests
Instead of using subtests in bpf_verif_scale selftest, turn each scale
sub-test into its own test. Each subtest is compltely independent and
just reuses a bit of common test running logic, so the conversion is
trivial. For convenience, keep all of BPF verifier scale tests in one
file.
This conversion shaves off a significant amount of time when running
test_progs in parallel mode. E.g., just running scale tests (-t verif_scale):
BEFORE
======
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m22.894s
user 0m0.012s
sys 0m22.797s
AFTER
=====
Summary: 24/0 PASSED, 0 SKIPPED, 0 FAILED
real 0m12.044s
user 0m0.024s
sys 0m27.869s
Ten second saving right there. test_progs -j is not yet ready to be
turned on by default, unfortunately, and some tests fail almost every
time, but this is a good improvement nevertheless. Ignoring few
failures, here is sequential vs parallel run times when running all
tests now:
SEQUENTIAL
==========
Summary: 206/953 PASSED, 4 SKIPPED, 0 FAILED
real 1m5.625s
user 0m4.211s
sys 0m31.650s
PARALLEL
========
Summary: 204/952 PASSED, 4 SKIPPED, 2 FAILED
real 0m35.550s
user 0m4.998s
sys 0m39.890s
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211022223228.99920-5-andrii@kernel.org
2021-10-22 22:32:28 +00:00
|
|
|
|
|
|
|
void test_verif_scale1()
|
|
|
|
{
|
|
|
|
scale_test("test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale2()
|
|
|
|
{
|
|
|
|
scale_test("test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale3()
|
|
|
|
{
|
|
|
|
scale_test("test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_pyperf_global()
|
|
|
|
{
|
|
|
|
scale_test("pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_pyperf_subprogs()
|
|
|
|
{
|
|
|
|
scale_test("pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_pyperf50()
|
|
|
|
{
|
|
|
|
/* full unroll by llvm */
|
|
|
|
scale_test("pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_pyperf100()
|
|
|
|
{
|
|
|
|
/* full unroll by llvm */
|
|
|
|
scale_test("pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_pyperf180()
|
|
|
|
{
|
|
|
|
/* full unroll by llvm */
|
|
|
|
scale_test("pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_pyperf600()
|
|
|
|
{
|
|
|
|
/* partial unroll. llvm will unroll loop ~150 times.
|
|
|
|
* C loop count -> 600.
|
|
|
|
* Asm loop count -> 4.
|
|
|
|
* 16k insns in loop body.
|
|
|
|
* Total of 5 such loops. Total program size ~82k insns.
|
|
|
|
*/
|
|
|
|
scale_test("pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_pyperf600_nounroll()
|
|
|
|
{
|
|
|
|
/* no unroll at all.
|
|
|
|
* C loop count -> 600.
|
|
|
|
* ASM loop count -> 600.
|
|
|
|
* ~110 insns in loop body.
|
|
|
|
* Total of 5 such loops. Total program size ~1500 insns.
|
|
|
|
*/
|
|
|
|
scale_test("pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_loop1()
|
|
|
|
{
|
|
|
|
scale_test("loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_loop2()
|
|
|
|
{
|
|
|
|
scale_test("loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_loop3_fail()
|
|
|
|
{
|
|
|
|
scale_test("loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_loop4()
|
|
|
|
{
|
|
|
|
scale_test("loop4.o", BPF_PROG_TYPE_SCHED_CLS, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_loop5()
|
|
|
|
{
|
|
|
|
scale_test("loop5.o", BPF_PROG_TYPE_SCHED_CLS, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_loop6()
|
|
|
|
{
|
|
|
|
scale_test("loop6.o", BPF_PROG_TYPE_KPROBE, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_strobemeta()
|
|
|
|
{
|
|
|
|
/* partial unroll. 19k insn in a loop.
|
|
|
|
* Total program size 20.8k insn.
|
|
|
|
* ~350k processed_insns
|
|
|
|
*/
|
|
|
|
scale_test("strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_strobemeta_nounroll1()
|
|
|
|
{
|
|
|
|
/* no unroll, tiny loops */
|
|
|
|
scale_test("strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_strobemeta_nounroll2()
|
|
|
|
{
|
|
|
|
/* no unroll, tiny loops */
|
|
|
|
scale_test("strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_strobemeta_subprogs()
|
|
|
|
{
|
|
|
|
/* non-inlined subprogs */
|
|
|
|
scale_test("strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_sysctl_loop1()
|
|
|
|
{
|
|
|
|
scale_test("test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_sysctl_loop2()
|
|
|
|
{
|
|
|
|
scale_test("test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_xdp_loop()
|
|
|
|
{
|
|
|
|
scale_test("test_xdp_loop.o", BPF_PROG_TYPE_XDP, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_verif_scale_seg6_loop()
|
|
|
|
{
|
|
|
|
scale_test("test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false);
|
|
|
|
}
|