Files
linux/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
Kumar Kartikeya Dwivedi e31eec77e4 bpf: selftests: Fix fd cleanup in get_branch_snapshot
Cleanup code uses while (cpu++ < cpu_cnt) for closing fds, which means
it starts iterating from 1 for closing fds. If the first fd is -1, it
skips over it and closes garbage fds (typically zero) in the remaining
array. This leads to test failures for future tests when they end up
storing fd 0 (as the slot becomes free due to close(0)) in ldimm64's BTF
fd, ending up trying to match module BTF id with vmlinux.

This was observed as spurious CI failure for the ksym_module_libbpf and
module_attach tests. The test ends up closing fd 0 and breaking libbpf's
assumption that module BTF fd will always be > 0, which leads to the
kernel thinking that we are pointing to a BTF ID in vmlinux BTF.

Fixes: 025bd7c753 (selftests/bpf: Add test for bpf_get_branch_snapshot)
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20210927145941.1383001-12-memxor@gmail.com
2021-09-29 13:25:09 -07:00

100 lines
2.3 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "get_branch_snapshot.skel.h"
static int *pfd_array;
static int cpu_cnt;
static int create_perf_events(void)
{
struct perf_event_attr attr = {0};
int cpu;
/* create perf event */
attr.size = sizeof(attr);
attr.type = PERF_TYPE_RAW;
attr.config = 0x1b00;
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
cpu_cnt = libbpf_num_possible_cpus();
pfd_array = malloc(sizeof(int) * cpu_cnt);
if (!pfd_array) {
cpu_cnt = 0;
return 1;
}
for (cpu = 0; cpu < cpu_cnt; cpu++) {
pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
-1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
if (pfd_array[cpu] < 0)
break;
}
return cpu == 0;
}
static void close_perf_events(void)
{
int cpu, fd;
for (cpu = 0; cpu < cpu_cnt; cpu++) {
fd = pfd_array[cpu];
if (fd < 0)
break;
close(fd);
}
free(pfd_array);
}
void test_get_branch_snapshot(void)
{
struct get_branch_snapshot *skel = NULL;
int err;
if (create_perf_events()) {
test__skip(); /* system doesn't support LBR */
goto cleanup;
}
skel = get_branch_snapshot__open_and_load();
if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load"))
goto cleanup;
err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low);
if (!ASSERT_OK(err, "kallsyms_find"))
goto cleanup;
err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high);
if (!ASSERT_OK(err, "kallsyms_find_next"))
goto cleanup;
err = get_branch_snapshot__attach(skel);
if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
goto cleanup;
trigger_module_test_read(100);
if (skel->bss->total_entries < 16) {
/* too few entries for the hit/waste test */
test__skip();
goto cleanup;
}
ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr");
/* Given we stop LBR in software, we will waste a few entries.
* But we should try to waste as few as possible entries. We are at
* about 7 on x86_64 systems.
* Add a check for < 10 so that we get heads-up when something
* changes and wastes too many entries.
*/
ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries");
cleanup:
get_branch_snapshot__destroy(skel);
close_perf_events();
}