bpf-next-for-netdev

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZV0kjgAKCRDbK58LschI
 gy0EAP9XwncW2OhO72DpITluFzvWPgB0N97OANKBXjzKJrRAlQD/aUe9nlvBQuad
 WsbMKLeC4wvI2X/4PEIR4ukbuZ3ypAA=
 =LMVg
 -----END PGP SIGNATURE-----

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2023-11-21

We've added 85 non-merge commits during the last 12 day(s) which contain
a total of 63 files changed, 4464 insertions(+), 1484 deletions(-).

The main changes are:

1) Huge batch of verifier changes to improve BPF register bounds logic
   and range support along with a large test suite, and verifier log
   improvements, all from Andrii Nakryiko.

2) Add a new kfunc which acquires the associated cgroup of a task within
   a specific cgroup v1 hierarchy where the latter is identified by its id,
   from Yafang Shao.

3) Extend verifier to allow bpf_refcount_acquire() of a map value field
   obtained via direct load which is a use-case needed in sched_ext,
   from Dave Marchevsky.

4) Fix bpf_get_task_stack() helper to add the correct crosstask check
   for the get_perf_callchain(), from Jordan Rome.

5) Fix BPF task_iter internals where lockless usage of next_thread()
   was wrong. The rework also simplifies the code, from Oleg Nesterov.

6) Fix uninitialized tail padding via LIBBPF_OPTS_RESET, and another
   fix for certain BPF UAPI structs to fix verifier failures seen
   in bpf_dynptr usage, from Yonghong Song.

7) Add BPF selftest fixes for map_percpu_stats flakes due to per-CPU BPF
   memory allocator not being able to allocate per-CPU pointer successfully,
   from Hou Tao.

8) Add prep work around dynptr and string handling for kfuncs which
   is later going to be used by file verification via BPF LSM and fsverity,
   from Song Liu.

9) Improve BPF selftests to update multiple prog_tests to use ASSERT_*
   macros, from Yuran Pereira.

10) Optimize LPM trie lookup to check prefixlen before walking the trie,
    from Florian Lehner.

11) Consolidate virtio/9p configs from BPF selftests in config.vm file
    given they are needed consistently across archs, from Manu Bretelle.

12) Small BPF verifier refactor to remove register_is_const(),
    from Shung-Hsi Yu.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (85 commits)
  selftests/bpf: Replaces the usage of CHECK calls for ASSERTs in vmlinux
  selftests/bpf: Replaces the usage of CHECK calls for ASSERTs in bpf_obj_id
  selftests/bpf: Replaces the usage of CHECK calls for ASSERTs in bind_perm
  selftests/bpf: Replaces the usage of CHECK calls for ASSERTs in bpf_tcp_ca
  selftests/bpf: reduce verboseness of reg_bounds selftest logs
  bpf: bpf_iter_task_next: use next_task(kit->task) rather than next_task(kit->pos)
  bpf: bpf_iter_task_next: use __next_thread() rather than next_thread()
  bpf: task_group_seq_get_next: use __next_thread() rather than next_thread()
  bpf: emit frameno for PTR_TO_STACK regs if it differs from current one
  bpf: smarter verifier log number printing logic
  bpf: omit default off=0 and imm=0 in register state log
  bpf: emit map name in register state if applicable and available
  bpf: print spilled register state in stack slot
  bpf: extract register state printing
  bpf: move verifier state printing code to kernel/bpf/log.c
  bpf: move verbose_linfo() into kernel/bpf/log.c
  bpf: rename BPF_F_TEST_SANITY_STRICT to BPF_F_TEST_REG_INVARIANTS
  bpf: Remove test for MOVSX32 with offset=32
  selftests/bpf: add iter test requiring range x range logic
  veristat: add ability to set BPF_F_TEST_SANITY_STRICT flag with -r flag
  ...
====================

Link: https://lore.kernel.org/r/20231122000500.28126-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-11-21 17:52:49 -08:00
commit 53475287da
63 changed files with 4513 additions and 1533 deletions

View File

@ -135,6 +135,30 @@ Either way, the returned buffer is either NULL, or of size buffer_szk. Without t
annotation, the verifier will reject the program if a null pointer is passed in with
a nonzero size.
2.2.5 __str Annotation
----------------------------
This annotation is used to indicate that the argument is a constant string.
An example is given below::
__bpf_kfunc bpf_get_file_xattr(..., const char *name__str, ...)
{
...
}
In this case, ``bpf_get_file_xattr()`` can be called as::
bpf_get_file_xattr(..., "xattr_name", ...);
Or::
const char name[] = "xattr_name"; /* This need to be global */
int BPF_PROG(...)
{
...
bpf_get_file_xattr(..., name, ...);
...
}
.. _BPF_kfunc_nodef:

View File

@ -186,8 +186,8 @@ enum btf_field_type {
BPF_LIST_NODE = (1 << 6),
BPF_RB_ROOT = (1 << 7),
BPF_RB_NODE = (1 << 8),
BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
BPF_RB_NODE | BPF_RB_ROOT,
BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
BPF_REFCOUNT = (1 << 9),
};
@ -1226,6 +1226,8 @@ enum bpf_dynptr_type {
int bpf_dynptr_check_size(u32 size);
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);

View File

@ -602,6 +602,7 @@ struct bpf_verifier_env {
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
bool test_state_freq; /* test verifier with different pruning frequency */
bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
@ -679,6 +680,10 @@ int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...);
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
@ -778,4 +783,76 @@ static inline bool bpf_type_has_unsafe_modifiers(u32 type)
return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
}
static inline bool type_is_ptr_alloc_obj(u32 type)
{
return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
}
static inline bool type_is_non_owning_ref(u32 type)
{
return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
}
static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
{
type = base_type(type);
return type == PTR_TO_PACKET ||
type == PTR_TO_PACKET_META;
}
static inline bool type_is_sk_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCK_COMMON ||
type == PTR_TO_TCP_SOCK ||
type == PTR_TO_XDP_SOCK;
}
static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
{
env->scratched_regs |= 1U << regno;
}
static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
{
env->scratched_stack_slots |= 1ULL << spi;
}
static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
{
return (env->scratched_regs >> regno) & 1;
}
static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
{
return (env->scratched_stack_slots >> regno) & 1;
}
static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
{
return env->scratched_regs || env->scratched_stack_slots;
}
static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
{
env->scratched_regs = 0U;
env->scratched_stack_slots = 0ULL;
}
/* Used for printing the entire verifier state. */
static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
{
env->scratched_regs = ~0U;
env->scratched_stack_slots = ~0ULL;
}
const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
const char *iter_state_str(enum bpf_iter_state state);
void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state, bool print_all);
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
#endif /* _LINUX_BPF_VERIFIER_H */

View File

@ -563,6 +563,7 @@ struct cgroup_root {
/* A list running through the active hierarchies */
struct list_head root_list;
struct rcu_head rcu;
/* Hierarchy-specific flags */
unsigned int flags;

View File

@ -69,6 +69,7 @@ struct css_task_iter {
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
extern spinlock_t css_set_lock;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@ -386,7 +387,6 @@ static inline void cgroup_unlock(void)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
rcu_read_lock_sched_held() || \
@ -853,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
#endif /* _LINUX_CGROUP_H */

View File

@ -136,7 +136,7 @@
#endif
#define __diag_ignore_all(option, comment) \
__diag_GCC(8, ignore, option)
__diag(__diag_GCC_ignore option)
/*
* Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"

View File

@ -106,6 +106,10 @@ int tnum_sbin(char *str, size_t size, struct tnum a);
struct tnum tnum_subreg(struct tnum a);
/* Returns the tnum with the lower 32-bit subreg cleared */
struct tnum tnum_clear_subreg(struct tnum a);
/* Returns the tnum with the lower 32-bit subreg in *reg* set to the lower
* 32-bit subreg in *subreg*
*/
struct tnum tnum_with_subreg(struct tnum reg, struct tnum subreg);
/* Returns the tnum with the lower 32-bit subreg set to value */
struct tnum tnum_const_subreg(struct tnum a, u32 value);
/* Returns true if 32-bit subreg @a is a known constant*/

View File

@ -1200,6 +1200,9 @@ enum bpf_perf_event_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
@ -4517,6 +4520,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* Note: the user stack will only be populated if the *task* is
* the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@ -4528,6 +4533,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@ -7151,40 +7157,31 @@ struct bpf_spin_lock {
};
struct bpf_timer {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
__u64 :64;
__u64 :64;
__u64 :64;
__u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
__u64 :64;
__u64 :64;
__u64 :64;
__u64 :64;
__u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
__u32 :32;
__u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {

View File

@ -3840,9 +3840,6 @@ end:
return ERR_PTR(ret);
}
#define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT)
#define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE)
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
{
int i;
@ -3855,13 +3852,13 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
* Hence we only need to ensure that bpf_{list_head,rb_root} ownership
* does not form cycles.
*/
if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & GRAPH_ROOT_MASK))
if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_GRAPH_ROOT))
return 0;
for (i = 0; i < rec->cnt; i++) {
struct btf_struct_meta *meta;
u32 btf_id;
if (!(rec->fields[i].type & GRAPH_ROOT_MASK))
if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
continue;
btf_id = rec->fields[i].graph_root.value_btf_id;
meta = btf_find_struct_meta(btf, btf_id);
@ -3873,7 +3870,7 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
* to check ownership cycle for a type unless it's also a
* node type.
*/
if (!(rec->field_mask & GRAPH_NODE_MASK))
if (!(rec->field_mask & BPF_GRAPH_NODE))
continue;
/* We need to ensure ownership acyclicity among all types. The
@ -3909,7 +3906,7 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
* - A is both an root and node.
* - B is only an node.
*/
if (meta->record->field_mask & GRAPH_ROOT_MASK)
if (meta->record->field_mask & BPF_GRAPH_ROOT)
return -ELOOP;
}
return 0;

View File

@ -1937,10 +1937,7 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
ma = &bpf_global_percpu_ma;
else
ma = &bpf_global_ma;
if (rec && rec->refcount_off >= 0)
bpf_mem_free_rcu(ma, p);
else
bpf_mem_free(ma, p);
bpf_mem_free_rcu(ma, p);
}
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
@ -2231,6 +2228,25 @@ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
rcu_read_unlock();
return ret;
}
/**
* bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
* specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
* hierarchy ID.
* @task: The target task
* @hierarchy_id: The ID of a cgroup1 hierarchy
*
* On success, the cgroup is returen. On failure, NULL is returned.
*/
__bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
{
struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
if (IS_ERR(cgrp))
return NULL;
return cgrp;
}
#endif /* CONFIG_CGROUPS */
/**
@ -2520,7 +2536,7 @@ BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
BTF_ID_FLAGS(func, bpf_list_push_front_impl)
BTF_ID_FLAGS(func, bpf_list_push_back_impl)
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
@ -2537,6 +2553,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
#endif
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_throw)
@ -2618,3 +2635,22 @@ static int __init kfunc_init(void)
}
late_initcall(kfunc_init);
/* Get a pointer to dynptr data up to len bytes for read only access. If
* the dynptr doesn't have continuous data up to len bytes, return NULL.
*/
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
{
return bpf_dynptr_slice(ptr, 0, NULL, len);
}
/* Get a pointer to dynptr data up to len bytes for read write access. If
* the dynptr doesn't have continuous data up to len bytes, or the dynptr
* is read only, return NULL.
*/
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
{
if (__bpf_dynptr_is_rdonly(ptr))
return NULL;
return (void *)__bpf_dynptr_data(ptr, len);
}

View File

@ -10,6 +10,8 @@
#include <linux/bpf_verifier.h>
#include <linux/math64.h>
#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
{
/* ubuf and len_total should both be specified (or not) together */
@ -325,3 +327,481 @@ __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
va_end(args);
}
EXPORT_SYMBOL_GPL(bpf_log);
static const struct bpf_line_info *
find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
{
const struct bpf_line_info *linfo;
const struct bpf_prog *prog;
u32 i, nr_linfo;
prog = env->prog;
nr_linfo = prog->aux->nr_linfo;
if (!nr_linfo || insn_off >= prog->len)
return NULL;
linfo = prog->aux->linfo;
for (i = 1; i < nr_linfo; i++)
if (insn_off < linfo[i].insn_off)
break;
return &linfo[i - 1];
}
static const char *ltrim(const char *s)
{
while (isspace(*s))
s++;
return s;
}
__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...)
{
const struct bpf_line_info *linfo;
if (!bpf_verifier_log_needed(&env->log))
return;
linfo = find_linfo(env, insn_off);
if (!linfo || linfo == env->prev_linfo)
return;
if (prefix_fmt) {
va_list args;
va_start(args, prefix_fmt);
bpf_verifier_vlog(&env->log, prefix_fmt, args);
va_end(args);
}
verbose(env, "%s\n",
ltrim(btf_name_by_offset(env->prog->aux->btf,
linfo->line_off)));
env->prev_linfo = linfo;
}
static const char *btf_type_name(const struct btf *btf, u32 id)
{
return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
}
/* string representation of 'enum bpf_reg_type'
*
* Note that reg_type_str() can not appear more than once in a single verbose()
* statement.
*/
const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type)
{
char postfix[16] = {0}, prefix[64] = {0};
static const char * const str[] = {
[NOT_INIT] = "?",
[SCALAR_VALUE] = "scalar",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_STACK] = "fp",
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_META] = "pkt_meta",
[PTR_TO_PACKET_END] = "pkt_end",
[PTR_TO_FLOW_KEYS] = "flow_keys",
[PTR_TO_SOCKET] = "sock",
[PTR_TO_SOCK_COMMON] = "sock_common",
[PTR_TO_TCP_SOCK] = "tcp_sock",
[PTR_TO_TP_BUFFER] = "tp_buffer",
[PTR_TO_XDP_SOCK] = "xdp_sock",
[PTR_TO_BTF_ID] = "ptr_",
[PTR_TO_MEM] = "mem",
[PTR_TO_BUF] = "buf",
[PTR_TO_FUNC] = "func",
[PTR_TO_MAP_KEY] = "map_key",
[CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
};
if (type & PTR_MAYBE_NULL) {
if (base_type(type) == PTR_TO_BTF_ID)
strncpy(postfix, "or_null_", 16);
else
strncpy(postfix, "_or_null", 16);
}
snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
type & MEM_RDONLY ? "rdonly_" : "",
type & MEM_RINGBUF ? "ringbuf_" : "",
type & MEM_USER ? "user_" : "",
type & MEM_PERCPU ? "percpu_" : "",
type & MEM_RCU ? "rcu_" : "",
type & PTR_UNTRUSTED ? "untrusted_" : "",
type & PTR_TRUSTED ? "trusted_" : ""
);
snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
prefix, str[base_type(type)], postfix);
return env->tmp_str_buf;
}
const char *dynptr_type_str(enum bpf_dynptr_type type)
{
switch (type) {
case BPF_DYNPTR_TYPE_LOCAL:
return "local";
case BPF_DYNPTR_TYPE_RINGBUF:
return "ringbuf";
case BPF_DYNPTR_TYPE_SKB:
return "skb";
case BPF_DYNPTR_TYPE_XDP:
return "xdp";
case BPF_DYNPTR_TYPE_INVALID:
return "<invalid>";
default:
WARN_ONCE(1, "unknown dynptr type %d\n", type);
return "<unknown>";
}
}
const char *iter_type_str(const struct btf *btf, u32 btf_id)
{
if (!btf || btf_id == 0)
return "<invalid>";
/* we already validated that type is valid and has conforming name */
return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
}
const char *iter_state_str(enum bpf_iter_state state)
{
switch (state) {
case BPF_ITER_STATE_ACTIVE:
return "active";
case BPF_ITER_STATE_DRAINED:
return "drained";
case BPF_ITER_STATE_INVALID:
return "<invalid>";
default:
WARN_ONCE(1, "unknown iter state %d\n", state);
return "<unknown>";
}
}
static char slot_type_char[] = {
[STACK_INVALID] = '?',
[STACK_SPILL] = 'r',
[STACK_MISC] = 'm',
[STACK_ZERO] = '0',
[STACK_DYNPTR] = 'd',
[STACK_ITER] = 'i',
};
static void print_liveness(struct bpf_verifier_env *env,
enum bpf_reg_liveness live)
{
if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
verbose(env, "_");
if (live & REG_LIVE_READ)
verbose(env, "r");
if (live & REG_LIVE_WRITTEN)
verbose(env, "w");
if (live & REG_LIVE_DONE)
verbose(env, "D");
}
#define UNUM_MAX_DECIMAL U16_MAX
#define SNUM_MAX_DECIMAL S16_MAX
#define SNUM_MIN_DECIMAL S16_MIN
static bool is_unum_decimal(u64 num)
{
return num <= UNUM_MAX_DECIMAL;
}
static bool is_snum_decimal(s64 num)
{
return num >= SNUM_MIN_DECIMAL && num <= SNUM_MAX_DECIMAL;
}
static void verbose_unum(struct bpf_verifier_env *env, u64 num)
{
if (is_unum_decimal(num))
verbose(env, "%llu", num);
else
verbose(env, "%#llx", num);
}
static void verbose_snum(struct bpf_verifier_env *env, s64 num)
{
if (is_snum_decimal(num))
verbose(env, "%lld", num);
else
verbose(env, "%#llx", num);
}
static void print_scalar_ranges(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
const char **sep)
{
/* For signed ranges, we want to unify 64-bit and 32-bit values in the
* output as much as possible, but there is a bit of a complication.
* If we choose to print values as decimals, this is natural to do,
* because negative 64-bit and 32-bit values >= -S32_MIN have the same
* representation due to sign extension. But if we choose to print
* them in hex format (see is_snum_decimal()), then sign extension is
* misleading.
* E.g., smin=-2 and smin32=-2 are exactly the same in decimal, but in
* hex they will be smin=0xfffffffffffffffe and smin32=0xfffffffe, two
* very different numbers.
* So we avoid sign extension if we choose to print values in hex.
*/
struct {
const char *name;
u64 val;
bool omit;
} minmaxs[] = {
{"smin", reg->smin_value, reg->smin_value == S64_MIN},
{"smax", reg->smax_value, reg->smax_value == S64_MAX},
{"umin", reg->umin_value, reg->umin_value == 0},
{"umax", reg->umax_value, reg->umax_value == U64_MAX},
{"smin32",
is_snum_decimal((s64)reg->s32_min_value)
? (s64)reg->s32_min_value
: (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN},
{"smax32",
is_snum_decimal((s64)reg->s32_max_value)
? (s64)reg->s32_max_value
: (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX},
{"umin32", reg->u32_min_value, reg->u32_min_value == 0},
{"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
}, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
bool neg1, neg2;
for (m1 = &minmaxs[0]; m1 < mend; m1++) {
if (m1->omit)
continue;
neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
verbose(env, "%s%s=", *sep, m1->name);
*sep = ",";
for (m2 = m1 + 2; m2 < mend; m2 += 2) {
if (m2->omit || m2->val != m1->val)
continue;
/* don't mix negatives with positives */
neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
if (neg2 != neg1)
continue;
m2->omit = true;
verbose(env, "%s=", m2->name);
}
if (m1->name[0] == 's')
verbose_snum(env, m1->val);
else
verbose_unum(env, m1->val);
}
}
static bool type_is_map_ptr(enum bpf_reg_type t) {
switch (base_type(t)) {
case CONST_PTR_TO_MAP:
case PTR_TO_MAP_KEY:
case PTR_TO_MAP_VALUE:
return true;
default:
return false;
}
}
static void print_reg_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state,
const struct bpf_reg_state *reg)
{
enum bpf_reg_type t;
const char *sep = "";
t = reg->type;
if (t == SCALAR_VALUE && reg->precise)
verbose(env, "P");
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose_snum(env, reg->var_off.value + reg->off);
return;
}
/*
* _a stands for append, was shortened to avoid multiline statements below.
* This macro is used to output a comma separated list of attributes.
*/
#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
verbose(env, "%s", reg_type_str(env, t));
if (t == PTR_TO_STACK) {
if (state->frameno != reg->frameno)
verbose(env, "[%d]", reg->frameno);
if (tnum_is_const(reg->var_off)) {
verbose_snum(env, reg->var_off.value + reg->off);
return;
}
}
if (base_type(t) == PTR_TO_BTF_ID)
verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
verbose(env, "(");
if (reg->id)
verbose_a("id=%d", reg->id);
if (reg->ref_obj_id)
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
if (type_is_non_owning_ref(reg->type))
verbose_a("%s", "non_own_ref");
if (type_is_map_ptr(t)) {
if (reg->map_ptr->name[0])
verbose_a("map=%s", reg->map_ptr->name);
verbose_a("ks=%d,vs=%d",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
}
if (t != SCALAR_VALUE && reg->off) {
verbose_a("off=");
verbose_snum(env, reg->off);
}
if (type_is_pkt_pointer(t)) {
verbose_a("r=");
verbose_unum(env, reg->range);
}
if (tnum_is_const(reg->var_off)) {
/* a pointer register with fixed offset */
if (reg->var_off.value) {
verbose_a("imm=");
verbose_snum(env, reg->var_off.value);
}
} else {
print_scalar_ranges(env, reg, &sep);
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose_a("var_off=%s", tn_buf);
}
}
verbose(env, ")");
#undef verbose_a
}
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
bool print_all)
{
const struct bpf_reg_state *reg;
int i;
if (state->frameno)
verbose(env, " frame%d:", state->frameno);
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
if (reg->type == NOT_INIT)
continue;
if (!print_all && !reg_scratched(env, i))
continue;
verbose(env, " R%d", i);
print_liveness(env, reg->live);
verbose(env, "=");
print_reg_state(env, state, reg);
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
char types_buf[BPF_REG_SIZE + 1];
bool valid = false;
u8 slot_type;
int j;
if (!print_all && !stack_slot_scratched(env, i))
continue;
for (j = 0; j < BPF_REG_SIZE; j++) {
slot_type = state->stack[i].slot_type[j];
if (slot_type != STACK_INVALID)
valid = true;
types_buf[j] = slot_type_char[slot_type];
}
types_buf[BPF_REG_SIZE] = 0;
if (!valid)
continue;
reg = &state->stack[i].spilled_ptr;
switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
case STACK_SPILL:
/* print MISC/ZERO/INVALID slots above subreg spill */
for (j = 0; j < BPF_REG_SIZE; j++)
if (state->stack[i].slot_type[j] == STACK_SPILL)
break;
types_buf[j] = '\0';
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, reg->live);
verbose(env, "=%s", types_buf);
print_reg_state(env, state, reg);
break;
case STACK_DYNPTR:
/* skip to main dynptr slot */
i += BPF_DYNPTR_NR_SLOTS - 1;
reg = &state->stack[i].spilled_ptr;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, reg->live);
verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type));
if (reg->ref_obj_id)
verbose(env, "(ref_id=%d)", reg->ref_obj_id);
break;
case STACK_ITER:
/* only main slot has ref_obj_id set; skip others */
if (!reg->ref_obj_id)
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, reg->live);
verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
iter_type_str(reg->iter.btf, reg->iter.btf_id),
reg->ref_obj_id, iter_state_str(reg->iter.state),
reg->iter.depth);
break;
case STACK_MISC:
case STACK_ZERO:
default:
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, reg->live);
verbose(env, "=%s", types_buf);
break;
}
}
if (state->acquired_refs && state->refs[0].id) {
verbose(env, " refs=%d", state->refs[0].id);
for (i = 1; i < state->acquired_refs; i++)
if (state->refs[i].id)
verbose(env, ",%d", state->refs[i].id);
}
if (state->in_callback_fn)
verbose(env, " cb");
if (state->in_async_callback_fn)
verbose(env, " async_cb");
verbose(env, "\n");
if (!print_all)
mark_verifier_state_clean(env);
}
static inline u32 vlog_alignment(u32 pos)
{
return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
BPF_LOG_MIN_ALIGNMENT) - pos - 1;
}
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
{
if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
/* remove new line character */
bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
} else {
verbose(env, "%d:", env->insn_idx);
}
print_verifier_state(env, state, false);
}

View File

@ -231,6 +231,9 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
struct lpm_trie_node *node, *found = NULL;
struct bpf_lpm_trie_key *key = _key;
if (key->prefixlen > trie->max_prefixlen)
return NULL;
/* Start walking the trie from the root node ... */
for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());

View File

@ -388,6 +388,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
{
u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
bool user_build_id = flags & BPF_F_USER_BUILD_ID;
bool crosstask = task && task != current;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
bool user = flags & BPF_F_USER_STACK;
struct perf_callchain_entry *trace;
@ -410,6 +411,14 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (task && user && !user_mode(regs))
goto err_fault;
/* get_perf_callchain does not support crosstask user stack walking
* but returns an empty stack instead of NULL.
*/
if (crosstask && user) {
err = -EOPNOTSUPP;
goto clear;
}
num_elem = size / elem_size;
max_depth = num_elem + skip;
if (sysctl_perf_event_max_stack < max_depth)
@ -421,7 +430,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
trace = get_callchain_entry_for_task(task, max_depth);
else
trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
false, false);
crosstask, false);
if (unlikely(!trace))
goto err_fault;

View File

@ -2573,7 +2573,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
BPF_F_SLEEPABLE |
BPF_F_TEST_RND_HI32 |
BPF_F_XDP_HAS_FRAGS |
BPF_F_XDP_DEV_BOUND_ONLY))
BPF_F_XDP_DEV_BOUND_ONLY |
BPF_F_TEST_REG_INVARIANTS))
return -EINVAL;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&

View File

@ -70,15 +70,13 @@ static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_comm
return NULL;
retry:
task = next_thread(task);
task = __next_thread(task);
if (!task)
return NULL;
next_tid = __task_pid_nr_ns(task, PIDTYPE_PID, common->ns);
if (!next_tid || next_tid == common->pid) {
/* Run out of tasks of a process. The tasks of a
* thread_group are linked as circular linked list.
*/
return NULL;
}
if (!next_tid)
goto retry;
if (skip_if_dup_files && task->files == task->group_leader->files)
goto retry;
@ -980,7 +978,6 @@ __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_kern) !=
__alignof__(struct bpf_iter_task));
kit->task = kit->pos = NULL;
switch (flags) {
case BPF_TASK_ITER_ALL_THREADS:
case BPF_TASK_ITER_ALL_PROCS:
@ -1017,20 +1014,16 @@ __bpf_kfunc struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it)
if (flags == BPF_TASK_ITER_ALL_PROCS)
goto get_next_task;
kit->pos = next_thread(kit->pos);
if (kit->pos == kit->task) {
if (flags == BPF_TASK_ITER_PROC_THREADS) {
kit->pos = NULL;
return pos;
}
} else
kit->pos = __next_thread(kit->pos);
if (kit->pos || flags == BPF_TASK_ITER_PROC_THREADS)
return pos;
get_next_task:
kit->pos = next_task(kit->pos);
kit->task = kit->pos;
if (kit->pos == &init_task)
kit->task = next_task(kit->task);
if (kit->task == &init_task)
kit->pos = NULL;
else
kit->pos = kit->task;
return pos;
}

View File

@ -208,7 +208,12 @@ struct tnum tnum_clear_subreg(struct tnum a)
return tnum_lshift(tnum_rshift(a, 32), 32);
}
struct tnum tnum_with_subreg(struct tnum reg, struct tnum subreg)
{
return tnum_or(tnum_clear_subreg(reg), tnum_subreg(subreg));
}
struct tnum tnum_const_subreg(struct tnum a, u32 value)
{
return tnum_or(tnum_clear_subreg(a), tnum_const(value));
return tnum_with_subreg(a, tnum_const(value));
}

File diff suppressed because it is too large Load Diff

View File

@ -164,13 +164,13 @@ struct cgroup_mgctx {
#define DEFINE_CGROUP_MGCTX(name) \
struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
/* iterate across the hierarchies */
#define for_each_root(root) \
list_for_each_entry((root), &cgroup_roots, root_list)
list_for_each_entry_rcu((root), &cgroup_roots, root_list, \
lockdep_is_held(&cgroup_mutex))
/**
* for_each_subsys - iterate all enabled cgroup subsystems

View File

@ -1262,6 +1262,40 @@ int cgroup1_get_tree(struct fs_context *fc)
return ret;
}
/**
* task_get_cgroup1 - Acquires the associated cgroup of a task within a
* specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
* hierarchy ID.
* @tsk: The target task
* @hierarchy_id: The ID of a cgroup1 hierarchy
*
* On success, the cgroup is returned. On failure, ERR_PTR is returned.
* We limit it to cgroup1 only.
*/
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id)
{
struct cgroup *cgrp = ERR_PTR(-ENOENT);
struct cgroup_root *root;
unsigned long flags;
rcu_read_lock();
for_each_root(root) {
/* cgroup1 only*/
if (root == &cgrp_dfl_root)
continue;
if (root->hierarchy_id != hierarchy_id)
continue;
spin_lock_irqsave(&css_set_lock, flags);
cgrp = task_cgroup_from_root(tsk, root);
if (!cgrp || !cgroup_tryget(cgrp))
cgrp = ERR_PTR(-ENOENT);
spin_unlock_irqrestore(&css_set_lock, flags);
break;
}
rcu_read_unlock();
return cgrp;
}
static int __init cgroup1_wq_init(void)
{
/*

View File

@ -1315,7 +1315,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root)
void cgroup_free_root(struct cgroup_root *root)
{
kfree(root);
kfree_rcu(root, rcu);
}
static void cgroup_destroy_root(struct cgroup_root *root)
@ -1347,10 +1347,9 @@ static void cgroup_destroy_root(struct cgroup_root *root)
spin_unlock_irq(&css_set_lock);
if (!list_empty(&root->root_list)) {
list_del(&root->root_list);
cgroup_root_count--;
}
WARN_ON_ONCE(list_empty(&root->root_list));
list_del_rcu(&root->root_list);
cgroup_root_count--;
if (!have_favordynmods)
cgroup_favor_dynmods(root, false);
@ -1390,7 +1389,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
}
}
BUG_ON(!res_cgroup);
/*
* If cgroup_mutex is not held, the cgrp_cset_link will be freed
* before we remove the cgroup root from the root_list. Consequently,
* when accessing a cgroup root, the cset_link may have already been
* freed, resulting in a NULL res_cgroup. However, by holding the
* cgroup_mutex, we ensure that res_cgroup can't be NULL.
* If we don't hold cgroup_mutex in the caller, we must do the NULL
* check.
*/
return res_cgroup;
}
@ -1413,6 +1420,11 @@ current_cgns_cgroup_from_root(struct cgroup_root *root)
rcu_read_unlock();
/*
* The namespace_sem is held by current, so the root cgroup can't
* be umounted. Therefore, we can ensure that the res is non-NULL.
*/
WARN_ON_ONCE(!res);
return res;
}
@ -1449,7 +1461,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void)
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root)
{
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock);
return __cset_cgroup_from_root(cset, root);
@ -1457,7 +1468,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
/*
* Return the cgroup for "task" from the given hierarchy. Must be
* called with cgroup_mutex and css_set_lock held.
* called with css_set_lock held to prevent task's groups from being modified.
* Must be called with either cgroup_mutex or rcu read lock to prevent the
* cgroup root from being destroyed.
*/
struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root)
@ -2032,7 +2045,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx)
struct cgroup_root *root = ctx->root;
struct cgroup *cgrp = &root->cgrp;
INIT_LIST_HEAD(&root->root_list);
INIT_LIST_HEAD_RCU(&root->root_list);
atomic_set(&root->nr_cgrps, 1);
cgrp->root = root;
init_cgroup_housekeeping(cgrp);
@ -2115,7 +2128,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
* care of subsystems' refcounts, which are explicitly dropped in
* the failure exit path.
*/
list_add(&root->root_list, &cgroup_roots);
list_add_rcu(&root->root_list, &cgroup_roots);
cgroup_root_count++;
/*
@ -6277,7 +6290,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
if (!buf)
goto out;
cgroup_lock();
rcu_read_lock();
spin_lock_irq(&css_set_lock);
for_each_root(root) {
@ -6288,6 +6301,11 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
if (root == &cgrp_dfl_root && !READ_ONCE(cgrp_dfl_visible))
continue;
cgrp = task_cgroup_from_root(tsk, root);
/* The root has already been unmounted. */
if (!cgrp)
continue;
seq_printf(m, "%d:", root->hierarchy_id);
if (root != &cgrp_dfl_root)
for_each_subsys(ss, ssid)
@ -6298,9 +6316,6 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m, "%sname=%s", count ? "," : "",
root->name);
seq_putc(m, ':');
cgrp = task_cgroup_from_root(tsk, root);
/*
* On traditional hierarchies, all zombie tasks show up as
* belonging to the root cgroup. On the default hierarchy,
@ -6332,7 +6347,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
retval = 0;
out_unlock:
spin_unlock_irq(&css_set_lock);
cgroup_unlock();
rcu_read_unlock();
kfree(buf);
out:
return retval;

View File

@ -1376,6 +1376,8 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
struct bpf_dynptr_kern *sig_ptr,
struct bpf_key *trusted_keyring)
{
const void *data, *sig;
u32 data_len, sig_len;
int ret;
if (trusted_keyring->has_ref) {
@ -1392,10 +1394,12 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
return ret;
}
return verify_pkcs7_signature(data_ptr->data,
__bpf_dynptr_size(data_ptr),
sig_ptr->data,
__bpf_dynptr_size(sig_ptr),
data_len = __bpf_dynptr_size(data_ptr);
data = __bpf_dynptr_data(data_ptr, data_len);
sig_len = __bpf_dynptr_size(sig_ptr);
sig = __bpf_dynptr_data(sig_ptr, sig_len);
return verify_pkcs7_signature(data, data_len, sig, sig_len,
trusted_keyring->key,
VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
NULL);

View File

@ -5144,22 +5144,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
{
"ALU_MOVSX | BPF_W",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x00000000deadbeefLL),
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
BPF_MOVSX32_REG(R1, R3, 32),
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
/* MOVSX64 REG */
{
"ALU64_MOVSX | BPF_B",

View File

@ -20,7 +20,7 @@ SYNOPSIS
**bpftool** **version**
*OBJECT* := { **map** | **program** | **link** | **cgroup** | **perf** | **net** | **feature** |
*OBJECT* := { **map** | **prog** | **link** | **cgroup** | **perf** | **net** | **feature** |
**btf** | **gen** | **struct_ops** | **iter** }
*OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| }

View File

@ -1200,6 +1200,9 @@ enum bpf_perf_event_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
@ -4517,6 +4520,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* Note: the user stack will only be populated if the *task* is
* the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@ -4528,6 +4533,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@ -7151,40 +7157,31 @@ struct bpf_spin_lock {
};
struct bpf_timer {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
__u64 :64;
__u64 :64;
__u64 :64;
__u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
__u64 :64;
__u64 :64;
__u64 :64;
__u64 :64;
__u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
__u32 :32;
__u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {

View File

@ -79,11 +79,14 @@
*/
#define LIBBPF_OPTS_RESET(NAME, ...) \
do { \
memset(&NAME, 0, sizeof(NAME)); \
NAME = (typeof(NAME)) { \
.sz = sizeof(NAME), \
__VA_ARGS__ \
}; \
typeof(NAME) ___##NAME = ({ \
memset(&___##NAME, 0, sizeof(NAME)); \
(typeof(NAME)) { \
.sz = sizeof(NAME), \
__VA_ARGS__ \
}; \
}); \
memcpy(&NAME, &___##NAME, sizeof(NAME)); \
} while (0)
#endif /* __LIBBPF_LIBBPF_COMMON_H */

View File

@ -45,9 +45,12 @@
#define format_parent_cgroup_path(buf, path) \
format_cgroup_path_pid(buf, path, getppid())
#define format_classid_path(buf) \
snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \
CGROUP_WORK_DIR)
#define format_classid_path_pid(buf, pid) \
snprintf(buf, sizeof(buf), "%s%s%d", NETCLS_MOUNT_PATH, \
CGROUP_WORK_DIR, pid)
#define format_classid_path(buf) \
format_classid_path_pid(buf, getpid())
static __thread bool cgroup_workdir_mounted;
@ -419,26 +422,23 @@ int create_and_get_cgroup(const char *relative_path)
}
/**
* get_cgroup_id() - Get cgroup id for a particular cgroup path
* @relative_path: The cgroup path, relative to the workdir, to join
* get_cgroup_id_from_path - Get cgroup id for a particular cgroup path
* @cgroup_workdir: The absolute cgroup path
*
* On success, it returns the cgroup id. On failure it returns 0,
* which is an invalid cgroup id.
* If there is a failure, it prints the error to stderr.
*/
unsigned long long get_cgroup_id(const char *relative_path)
unsigned long long get_cgroup_id_from_path(const char *cgroup_workdir)
{
int dirfd, err, flags, mount_id, fhsize;
union {
unsigned long long cgid;
unsigned char raw_bytes[8];
} id;
char cgroup_workdir[PATH_MAX + 1];
struct file_handle *fhp, *fhp2;
unsigned long long ret = 0;
format_cgroup_path(cgroup_workdir, relative_path);
dirfd = AT_FDCWD;
flags = 0;
fhsize = sizeof(*fhp);
@ -474,6 +474,14 @@ free_mem:
return ret;
}
unsigned long long get_cgroup_id(const char *relative_path)
{
char cgroup_workdir[PATH_MAX + 1];
format_cgroup_path(cgroup_workdir, relative_path);
return get_cgroup_id_from_path(cgroup_workdir);
}
int cgroup_setup_and_join(const char *path) {
int cg_fd;
@ -523,10 +531,20 @@ int setup_classid_environment(void)
return 1;
}
if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
errno != EBUSY) {
log_err("mount cgroup net_cls");
return 1;
if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls")) {
if (errno != EBUSY) {
log_err("mount cgroup net_cls");
return 1;
}
if (rmdir(NETCLS_MOUNT_PATH)) {
log_err("rmdir cgroup net_cls");
return 1;
}
if (umount(CGROUP_MOUNT_DFLT)) {
log_err("umount cgroup base");
return 1;
}
}
cleanup_classid_environment();
@ -541,15 +559,16 @@ int setup_classid_environment(void)
/**
* set_classid() - Set a cgroupv1 net_cls classid
* @id: the numeric classid
*
* Writes the passed classid into the cgroup work dir's net_cls.classid
* Writes the classid into the cgroup work dir's net_cls.classid
* file in order to later on trigger socket tagging.
*
* We leverage the current pid as the classid, ensuring unique identification.
*
* On success, it returns 0, otherwise on failure it returns 1. If there
* is a failure, it prints the error to stderr.
*/
int set_classid(unsigned int id)
int set_classid(void)
{
char cgroup_workdir[PATH_MAX - 42];
char cgroup_classid_path[PATH_MAX + 1];
@ -565,7 +584,7 @@ int set_classid(unsigned int id)
return 1;
}
if (dprintf(fd, "%u\n", id) < 0) {
if (dprintf(fd, "%u\n", getpid()) < 0) {
log_err("Setting cgroup classid");
rc = 1;
}
@ -607,3 +626,66 @@ void cleanup_classid_environment(void)
join_cgroup_from_top(NETCLS_MOUNT_PATH);
nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
}
/**
* get_classid_cgroup_id - Get the cgroup id of a net_cls cgroup
*/
unsigned long long get_classid_cgroup_id(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_classid_path(cgroup_workdir);
return get_cgroup_id_from_path(cgroup_workdir);
}
/**
* get_cgroup1_hierarchy_id - Retrieves the ID of a cgroup1 hierarchy from the cgroup1 subsys name.
* @subsys_name: The cgroup1 subsys name, which can be retrieved from /proc/self/cgroup. It can be
* a named cgroup like "name=systemd", a controller name like "net_cls", or multi-contollers like
* "net_cls,net_prio".
*/
int get_cgroup1_hierarchy_id(const char *subsys_name)
{
char *c, *c2, *c3, *c4;
bool found = false;
char line[1024];
FILE *file;
int i, id;
if (!subsys_name)
return -1;
file = fopen("/proc/self/cgroup", "r");
if (!file) {
log_err("fopen /proc/self/cgroup");
return -1;
}
while (fgets(line, 1024, file)) {
i = 0;
for (c = strtok_r(line, ":", &c2); c && i < 2; c = strtok_r(NULL, ":", &c2)) {
if (i == 0) {
id = strtol(c, NULL, 10);
} else if (i == 1) {
if (!strcmp(c, subsys_name)) {
found = true;
break;
}
/* Multiple subsystems may share one single mount point */
for (c3 = strtok_r(c, ",", &c4); c3;
c3 = strtok_r(NULL, ",", &c4)) {
if (!strcmp(c, subsys_name)) {
found = true;
break;
}
}
}
i++;
}
if (found)
break;
}
fclose(file);
return found ? id : -1;
}

View File

@ -20,6 +20,7 @@ int get_root_cgroup(void);
int create_and_get_cgroup(const char *relative_path);
void remove_cgroup(const char *relative_path);
unsigned long long get_cgroup_id(const char *relative_path);
int get_cgroup1_hierarchy_id(const char *subsys_name);
int join_cgroup(const char *relative_path);
int join_root_cgroup(void);
@ -29,8 +30,9 @@ int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
/* cgroupv1 related */
int set_classid(unsigned int id);
int set_classid(void);
int join_classid(void);
unsigned long long get_classid_cgroup_id(void);
int setup_classid_environment(void);
void cleanup_classid_environment(void);

View File

@ -1,4 +1,3 @@
CONFIG_9P_FS=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
CONFIG_ARM_SMMU_V3=y
@ -37,6 +36,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_REDUCED=n
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_NOTIFIERS=y
@ -46,7 +46,6 @@ CONFIG_DEBUG_SG=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEVTMPFS=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_DRM=y
CONFIG_DUMMY=y
CONFIG_EXPERT=y
@ -67,7 +66,6 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HEADERS_INSTALL=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HUGETLBFS=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_HW_RANDOM=y
CONFIG_HZ_100=y
CONFIG_IDLE_PAGE_TRACKING=y
@ -99,8 +97,6 @@ CONFIG_MEMCG=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_NAMESPACES=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_NET_9P=y
CONFIG_NET_ACT_BPF=y
CONFIG_NET_ACT_GACT=y
CONFIG_NETDEVICES=y
@ -140,7 +136,6 @@ CONFIG_SCHED_TRACER=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_VIRTIO=y
CONFIG_SCSI=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@ -167,16 +162,6 @@ CONFIG_UPROBES=y
CONFIG_USELIB=y
CONFIG_USER_NS=y
CONFIG_VETH=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_VIRTIO_FS=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_NET=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_VSOCKETS_COMMON=y
CONFIG_VLAN_8021Q=y
CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y

View File

@ -1,4 +1,3 @@
CONFIG_9P_FS=y
CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
CONFIG_AUDIT=y
CONFIG_BLK_CGROUP=y
@ -84,8 +83,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_NAMESPACES=y
CONFIG_NET=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_NET_ACT_BPF=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_KEY=y
@ -114,7 +111,6 @@ CONFIG_SAMPLE_SECCOMP=y
CONFIG_SAMPLES=y
CONFIG_SCHED_TRACER=y
CONFIG_SCSI=y
CONFIG_SCSI_VIRTIO=y
CONFIG_SECURITY_NETWORK=y
CONFIG_STACK_TRACER=y
CONFIG_STATIC_KEYS_SELFTEST=y
@ -136,11 +132,6 @@ CONFIG_UPROBES=y
CONFIG_USELIB=y
CONFIG_USER_NS=y
CONFIG_VETH=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTIO_NET=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_VSOCKETS_COMMON=y
CONFIG_VLAN_8021Q=y
CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y

View File

@ -0,0 +1,12 @@
CONFIG_9P_FS=y
CONFIG_9P_FS_POSIX_ACL=y
CONFIG_9P_FS_SECURITY=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_VIRTIO_NET=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_VSOCKETS_COMMON=y

View File

@ -1,6 +1,3 @@
CONFIG_9P_FS=y
CONFIG_9P_FS_POSIX_ACL=y
CONFIG_9P_FS_SECURITY=y
CONFIG_AGP=y
CONFIG_AGP_AMD64=y
CONFIG_AGP_INTEL=y
@ -45,7 +42,6 @@ CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPUSETS=y
CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_BLAKE2B=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_XXHASH=y
CONFIG_DCB=y
@ -145,8 +141,6 @@ CONFIG_MEMORY_FAILURE=y
CONFIG_MINIX_SUBPARTITION=y
CONFIG_NAMESPACES=y
CONFIG_NET=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_NET_ACT_BPF=y
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_EMATCH=y
@ -228,12 +222,6 @@ CONFIG_USER_NS=y
CONFIG_VALIDATE_FS_PARSER=y
CONFIG_VETH=y
CONFIG_VIRT_DRIVERS=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_VIRTIO_NET=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_VSOCKETS_COMMON=y
CONFIG_VLAN_8021Q=y
CONFIG_VSOCKETS=y
CONFIG_VSOCKETS_LOOPBACK=y

View File

@ -131,10 +131,17 @@ static bool is_lru(__u32 map_type)
map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
static bool is_percpu(__u32 map_type)
{
return map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
struct upsert_opts {
__u32 map_type;
int map_fd;
__u32 n;
bool retry_for_nomem;
};
static int create_small_hash(void)
@ -148,19 +155,38 @@ static int create_small_hash(void)
return map_fd;
}
static bool retry_for_nomem_fn(int err)
{
return err == ENOMEM;
}
static void *patch_map_thread(void *arg)
{
/* 8KB is enough for 1024 CPUs. And it is shared between N_THREADS. */
static __u8 blob[8 << 10];
struct upsert_opts *opts = arg;
void *val_ptr;
int val;
int ret;
int i;
for (i = 0; i < opts->n; i++) {
if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
val = create_small_hash();
else
val_ptr = &val;
} else if (is_percpu(opts->map_type)) {
val_ptr = blob;
} else {
val = rand();
ret = bpf_map_update_elem(opts->map_fd, &i, &val, 0);
val_ptr = &val;
}
/* 2 seconds may be enough ? */
if (opts->retry_for_nomem)
ret = map_update_retriable(opts->map_fd, &i, val_ptr, 0,
40, retry_for_nomem_fn);
else
ret = bpf_map_update_elem(opts->map_fd, &i, val_ptr, 0);
CHECK(ret < 0, "bpf_map_update_elem", "key=%d error: %s\n", i, strerror(errno));
if (opts->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
@ -281,6 +307,13 @@ static void __test(int map_fd)
else
opts.n /= 2;
/* per-cpu bpf memory allocator may not be able to allocate per-cpu
* pointer successfully and it can not refill free llist timely, and
* bpf_map_update_elem() will return -ENOMEM. so just retry to mitigate
* the problem temporarily.
*/
opts.retry_for_nomem = is_percpu(opts.map_type) && (info.map_flags & BPF_F_NO_PREALLOC);
/*
* Upsert keys [0, n) under some competition: with random values from
* N_THREADS threads. Check values, then delete all elements and check

View File

@ -40,7 +40,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "2"},
{1, "R3_w", "4"},
@ -68,7 +68,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "1"},
{1, "R3_w", "2"},
@ -97,7 +97,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "4"},
{1, "R3_w", "8"},
@ -119,7 +119,7 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R1", "ctx()"},
{0, "R10", "fp0"},
{0, "R3_w", "7"},
{1, "R3_w", "7"},
@ -162,13 +162,13 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{6, "R0_w", "pkt(off=8,r=8,imm=0)"},
{6, "R0_w", "pkt(off=8,r=8)"},
{6, "R3_w", "var_off=(0x0; 0xff)"},
{7, "R3_w", "var_off=(0x0; 0x1fe)"},
{8, "R3_w", "var_off=(0x0; 0x3fc)"},
{9, "R3_w", "var_off=(0x0; 0x7f8)"},
{10, "R3_w", "var_off=(0x0; 0xff0)"},
{12, "R3_w", "pkt_end(off=0,imm=0)"},
{12, "R3_w", "pkt_end()"},
{17, "R4_w", "var_off=(0x0; 0xff)"},
{18, "R4_w", "var_off=(0x0; 0x1fe0)"},
{19, "R4_w", "var_off=(0x0; 0xff0)"},
@ -235,11 +235,11 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{2, "R5_w", "pkt(off=0,r=0,imm=0)"},
{4, "R5_w", "pkt(off=14,r=0,imm=0)"},
{5, "R4_w", "pkt(off=14,r=0,imm=0)"},
{9, "R2", "pkt(off=0,r=18,imm=0)"},
{10, "R5", "pkt(off=14,r=18,imm=0)"},
{2, "R5_w", "pkt(r=0)"},
{4, "R5_w", "pkt(off=14,r=0)"},
{5, "R4_w", "pkt(off=14,r=0)"},
{9, "R2", "pkt(r=18)"},
{10, "R5", "pkt(off=14,r=18)"},
{10, "R4_w", "var_off=(0x0; 0xff)"},
{13, "R4_w", "var_off=(0x0; 0xffff)"},
{14, "R4_w", "var_off=(0x0; 0xffff)"},
@ -299,7 +299,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{6, "R2_w", "pkt(r=8)"},
{7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
@ -337,7 +337,7 @@ static struct bpf_align_test tests[] = {
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
{26, "R5_w", "pkt(off=14,r=8,"},
{26, "R5_w", "pkt(off=14,r=8)"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n). See comment for insn #18
* for R4 = R5 trick.
@ -397,7 +397,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{6, "R2_w", "pkt(r=8)"},
{7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
{8, "R6_w", "var_off=(0x2; 0x7fc)"},
@ -459,7 +459,7 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
{3, "R5_w", "pkt_end(off=0,imm=0)"},
{3, "R5_w", "pkt_end()"},
/* (ptr - ptr) << 2 == unknown, (4n) */
{5, "R5_w", "var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
@ -513,7 +513,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{6, "R2_w", "pkt(r=8)"},
{8, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
{9, "R6_w", "var_off=(0x2; 0x7fc)"},
@ -566,7 +566,7 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{6, "R2_w", "pkt(r=8)"},
{9, "R6_w", "var_off=(0x0; 0x3c)"},
/* Adding 14 makes R6 be (4n+2) */
{10, "R6_w", "var_off=(0x2; 0x7c)"},
@ -659,14 +659,14 @@ static int do_test_single(struct bpf_align_test *test)
/* Check the next line as well in case the previous line
* did not have a corresponding bpf insn. Example:
* func#0 @0
* 0: R1=ctx(off=0,imm=0) R10=fp0
* 0: R1=ctx() R10=fp0
* 0: (b7) r3 = 2 ; R3_w=2
*
* Sometimes it's actually two lines below, e.g. when
* searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))":
* from 4 to 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
* 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
* 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
* from 4 to 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
* 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
* 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(r=8) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
*/
while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
cur_line = -1;

View File

@ -9,8 +9,6 @@
#include "cap_helpers.h"
#include "bind_perm.skel.h"
static int duration;
static int create_netns(void)
{
if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
@ -27,7 +25,7 @@ void try_bind(int family, int port, int expected_errno)
int fd = -1;
fd = socket(family, SOCK_STREAM, 0);
if (CHECK(fd < 0, "fd", "errno %d", errno))
if (!ASSERT_GE(fd, 0, "socket"))
goto close_socket;
if (family == AF_INET) {
@ -60,7 +58,7 @@ void test_bind_perm(void)
return;
cgroup_fd = test__join_cgroup("/bind_perm");
if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
return;
skel = bind_perm__open_and_load();

View File

@ -34,8 +34,6 @@
#include "bpf_iter_ksym.skel.h"
#include "bpf_iter_sockmap.skel.h"
static int duration;
static void test_btf_id_or_null(void)
{
struct bpf_iter_test_kern3 *skel;
@ -64,7 +62,7 @@ static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_
/* not check contents, but ensure read() ends without error */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
ASSERT_GE(len, 0, "read");
close(iter_fd);
@ -334,6 +332,8 @@ static void test_task_stack(void)
do_dummy_read(skel->progs.dump_task_stack);
do_dummy_read(skel->progs.get_task_user_stacks);
ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
bpf_iter_task_stack__destroy(skel);
}
@ -413,7 +413,7 @@ static int do_btf_read(struct bpf_iter_task_btf *skel)
goto free_link;
}
if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(err, 0, "read"))
goto free_link;
ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
@ -526,11 +526,11 @@ static int do_read_with_fd(int iter_fd, const char *expected,
start = 0;
while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
start += len;
if (CHECK(start >= 16, "read", "read len %d\n", len))
if (!ASSERT_LT(start, 16, "read"))
return -1;
read_buf_len = read_one_char ? 1 : 16 - start;
}
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
return -1;
if (!ASSERT_STREQ(buf, expected, "read"))
@ -571,8 +571,7 @@ static int do_read(const char *path, const char *expected)
int err, iter_fd;
iter_fd = open(path, O_RDONLY);
if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
path, strerror(errno)))
if (!ASSERT_GE(iter_fd, 0, "open"))
return -1;
err = do_read_with_fd(iter_fd, expected, false);
@ -600,7 +599,7 @@ static void test_file_iter(void)
unlink(path);
err = bpf_link__pin(link, path);
if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
if (!ASSERT_OK(err, "pin_iter"))
goto free_link;
err = do_read(path, "abcd");
@ -651,12 +650,10 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
* overflow and needs restart.
*/
map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
if (CHECK(map1_fd < 0, "bpf_map_create",
"map_creation failed: %s\n", strerror(errno)))
if (!ASSERT_GE(map1_fd, 0, "bpf_map_create"))
goto out;
map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
if (CHECK(map2_fd < 0, "bpf_map_create",
"map_creation failed: %s\n", strerror(errno)))
if (!ASSERT_GE(map2_fd, 0, "bpf_map_create"))
goto free_map1;
/* bpf_seq_printf kernel buffer is 8 pages, so one map
@ -685,14 +682,12 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
/* setup filtering map_id in bpf program */
map_info_len = sizeof(map_info);
err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
strerror(errno)))
if (!ASSERT_OK(err, "get_map_info"))
goto free_map2;
skel->bss->map1_id = map_info.id;
err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
strerror(errno)))
if (!ASSERT_OK(err, "get_map_info"))
goto free_map2;
skel->bss->map2_id = map_info.id;
@ -705,7 +700,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
goto free_link;
buf = malloc(expected_read_len);
if (!buf)
if (!ASSERT_OK_PTR(buf, "malloc"))
goto close_iter;
/* do read */
@ -714,16 +709,14 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
CHECK(len != -1 || errno != E2BIG, "read",
"expected ret -1, errno E2BIG, but get ret %d, error %s\n",
len, strerror(errno));
ASSERT_EQ(len, -1, "read");
ASSERT_EQ(errno, E2BIG, "read");
goto free_buf;
} else if (!ret1) {
while ((len = read(iter_fd, buf, expected_read_len)) > 0)
total_read_len += len;
if (CHECK(len < 0, "read", "read failed: %s\n",
strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto free_buf;
} else {
do {
@ -732,8 +725,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
total_read_len += len;
} while (len > 0 || len == -EAGAIN);
if (CHECK(len < 0, "read", "read failed: %s\n",
strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto free_buf;
}
@ -836,7 +828,7 @@ static void test_bpf_hash_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
@ -878,6 +870,8 @@ static void test_bpf_percpu_hash_map(void)
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
if (!ASSERT_OK_PTR(val, "malloc"))
goto out;
err = bpf_iter_bpf_percpu_hash_map__load(skel);
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
@ -917,7 +911,7 @@ static void test_bpf_percpu_hash_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
@ -983,17 +977,14 @@ static void test_bpf_array_map(void)
start = 0;
while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
start += len;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
res_first_key = *(__u32 *)buf;
res_first_val = *(__u64 *)(buf + sizeof(__u32));
if (CHECK(res_first_key != 0 || res_first_val != first_val,
"bpf_seq_write",
"seq_write failure: first key %u vs expected 0, "
" first value %llu vs expected %llu\n",
res_first_key, res_first_val, first_val))
if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") ||
!ASSERT_EQ(res_first_val, first_val, "bpf_seq_write"))
goto close_iter;
if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
@ -1057,6 +1048,8 @@ static void test_bpf_percpu_array_map(void)
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
if (!ASSERT_OK_PTR(val, "malloc"))
goto out;
err = bpf_iter_bpf_percpu_array_map__load(skel);
if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
@ -1092,7 +1085,7 @@ static void test_bpf_percpu_array_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
@ -1131,6 +1124,7 @@ static void test_bpf_sk_storage_delete(void)
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
if (!ASSERT_OK(err, "map_update"))
goto out;
@ -1151,14 +1145,19 @@ static void test_bpf_sk_storage_delete(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
"map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
goto close_iter;
/* Note: The following assertions serve to ensure
* the value was deleted. It does so by asserting
* that bpf_map_lookup_elem has failed. This might
* seem counterintuitive at first.
*/
ASSERT_ERR(err, "bpf_map_lookup_elem");
ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem");
close_iter:
close(iter_fd);
@ -1203,17 +1202,15 @@ static void test_bpf_sk_storage_get(void)
do_dummy_read(skel->progs.fill_socket_owner);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
"map value wasn't set correctly (expected %d, got %d, err=%d)\n",
getpid(), val, err))
if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
!ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem"))
goto close_socket;
do_dummy_read(skel->progs.negate_socket_local_storage);
err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
"map value wasn't set correctly (expected %d, got %d, err=%d)\n",
-getpid(), val, err);
ASSERT_OK(err, "bpf_map_lookup_elem");
ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem");
close_socket:
close(sock_fd);
@ -1290,7 +1287,7 @@ static void test_bpf_sk_storage_map(void)
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
if (!ASSERT_GE(len, 0, "read"))
goto close_iter;
/* test results */

View File

@ -25,7 +25,7 @@ void serial_test_bpf_obj_id(void)
*/
__u32 map_ids[nr_iters + 1];
char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128];
__u32 i, next_id, info_len, nr_id_found, duration = 0;
__u32 i, next_id, info_len, nr_id_found;
struct timespec real_time_ts, boot_time_ts;
int err = 0;
__u64 array_value;
@ -33,16 +33,16 @@ void serial_test_bpf_obj_id(void)
time_t now, load_time;
err = bpf_prog_get_fd_by_id(0);
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
ASSERT_LT(err, 0, "bpf_prog_get_fd_by_id");
ASSERT_EQ(errno, ENOENT, "bpf_prog_get_fd_by_id");
err = bpf_map_get_fd_by_id(0);
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
ASSERT_LT(err, 0, "bpf_map_get_fd_by_id");
ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id");
err = bpf_link_get_fd_by_id(0);
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno);
ASSERT_LT(err, 0, "bpf_map_get_fd_by_id");
ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id");
/* Check bpf_map_get_info_by_fd() */
bzero(zeros, sizeof(zeros));
@ -53,25 +53,26 @@ void serial_test_bpf_obj_id(void)
/* test_obj_id.o is a dumb prog. It should never fail
* to load.
*/
if (CHECK_FAIL(err))
if (!ASSERT_OK(err, "bpf_prog_test_load"))
continue;
/* Insert a magic value to the map */
map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
if (CHECK_FAIL(map_fds[i] < 0))
goto done;
err = bpf_map_update_elem(map_fds[i], &array_key,
&array_magic_value, 0);
if (CHECK_FAIL(err))
if (!ASSERT_GE(map_fds[i], 0, "bpf_find_map"))
goto done;
prog = bpf_object__find_program_by_name(objs[i],
"test_obj_id");
if (CHECK_FAIL(!prog))
err = bpf_map_update_elem(map_fds[i], &array_key,
&array_magic_value, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto done;
prog = bpf_object__find_program_by_name(objs[i], "test_obj_id");
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto done;
links[i] = bpf_program__attach(prog);
err = libbpf_get_error(links[i]);
if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) {
if (!ASSERT_OK(err, "bpf_program__attach")) {
links[i] = NULL;
goto done;
}
@ -81,24 +82,14 @@ void serial_test_bpf_obj_id(void)
bzero(&map_infos[i], info_len);
err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i],
&info_len);
if (CHECK(err ||
map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
map_infos[i].key_size != sizeof(__u32) ||
map_infos[i].value_size != sizeof(__u64) ||
map_infos[i].max_entries != 1 ||
map_infos[i].map_flags != 0 ||
info_len != sizeof(struct bpf_map_info) ||
strcmp((char *)map_infos[i].name, expected_map_name),
"get-map-info(fd)",
"err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
err, errno,
map_infos[i].type, BPF_MAP_TYPE_ARRAY,
info_len, sizeof(struct bpf_map_info),
map_infos[i].key_size,
map_infos[i].value_size,
map_infos[i].max_entries,
map_infos[i].map_flags,
map_infos[i].name, expected_map_name))
if (!ASSERT_OK(err, "bpf_map_get_info_by_fd") ||
!ASSERT_EQ(map_infos[i].type, BPF_MAP_TYPE_ARRAY, "map_type") ||
!ASSERT_EQ(map_infos[i].key_size, sizeof(__u32), "key_size") ||
!ASSERT_EQ(map_infos[i].value_size, sizeof(__u64), "value_size") ||
!ASSERT_EQ(map_infos[i].max_entries, 1, "max_entries") ||
!ASSERT_EQ(map_infos[i].map_flags, 0, "map_flags") ||
!ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "map_info_len") ||
!ASSERT_STREQ((char *)map_infos[i].name, expected_map_name, "map_name"))
goto done;
/* Check getting prog info */
@ -112,48 +103,34 @@ void serial_test_bpf_obj_id(void)
prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
prog_infos[i].nr_map_ids = 2;
err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
if (CHECK_FAIL(err))
if (!ASSERT_OK(err, "clock_gettime"))
goto done;
err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
if (CHECK_FAIL(err))
if (!ASSERT_OK(err, "clock_gettime"))
goto done;
err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i],
&info_len);
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ (prog_infos[i].load_time / nsec_per_sec);
if (CHECK(err ||
prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT ||
info_len != sizeof(struct bpf_prog_info) ||
(env.jit_enabled && !prog_infos[i].jited_prog_len) ||
(env.jit_enabled &&
!memcmp(jited_insns, zeros, sizeof(zeros))) ||
!prog_infos[i].xlated_prog_len ||
!memcmp(xlated_insns, zeros, sizeof(zeros)) ||
load_time < now - 60 || load_time > now + 60 ||
prog_infos[i].created_by_uid != my_uid ||
prog_infos[i].nr_map_ids != 1 ||
*(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
strcmp((char *)prog_infos[i].name, expected_prog_name),
"get-prog-info(fd)",
"err %d errno %d i %d type %d(%d) info_len %u(%zu) "
"jit_enabled %d jited_prog_len %u xlated_prog_len %u "
"jited_prog %d xlated_prog %d load_time %lu(%lu) "
"uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) "
"name %s(%s)\n",
err, errno, i,
prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
info_len, sizeof(struct bpf_prog_info),
env.jit_enabled,
prog_infos[i].jited_prog_len,
prog_infos[i].xlated_prog_len,
!!memcmp(jited_insns, zeros, sizeof(zeros)),
!!memcmp(xlated_insns, zeros, sizeof(zeros)),
load_time, now,
prog_infos[i].created_by_uid, my_uid,
prog_infos[i].nr_map_ids, 1,
*(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
prog_infos[i].name, expected_prog_name))
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
!ASSERT_EQ(prog_infos[i].type, BPF_PROG_TYPE_RAW_TRACEPOINT, "prog_type") ||
!ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len") ||
!ASSERT_FALSE((env.jit_enabled && !prog_infos[i].jited_prog_len), "jited_prog_len") ||
!ASSERT_FALSE((env.jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))),
"jited_insns") ||
!ASSERT_NEQ(prog_infos[i].xlated_prog_len, 0, "xlated_prog_len") ||
!ASSERT_NEQ(memcmp(xlated_insns, zeros, sizeof(zeros)), 0, "xlated_insns") ||
!ASSERT_GE(load_time, (now - 60), "load_time") ||
!ASSERT_LE(load_time, (now + 60), "load_time") ||
!ASSERT_EQ(prog_infos[i].created_by_uid, my_uid, "created_by_uid") ||
!ASSERT_EQ(prog_infos[i].nr_map_ids, 1, "nr_map_ids") ||
!ASSERT_EQ(*(int *)(long)prog_infos[i].map_ids, map_infos[i].id, "map_ids") ||
!ASSERT_STREQ((char *)prog_infos[i].name, expected_prog_name, "prog_name"))
goto done;
/* Check getting link info */
@ -163,25 +140,12 @@ void serial_test_bpf_obj_id(void)
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len);
if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id ||
link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter") ||
info_len != sizeof(struct bpf_link_info),
"get-link-info(fd)",
"err %d errno %d info_len %u(%zu) type %d(%d) id %d "
"prog_id %d (%d) tp_name %s(%s)\n",
err, errno,
info_len, sizeof(struct bpf_link_info),
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
link_infos[i].id,
link_infos[i].prog_id, prog_infos[i].id,
(const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter"))
if (!ASSERT_OK(err, "bpf_link_get_info_by_fd") ||
!ASSERT_EQ(link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type") ||
!ASSERT_EQ(link_infos[i].prog_id, prog_infos[i].id, "prog_id") ||
!ASSERT_EQ(link_infos[i].raw_tracepoint.tp_name, ptr_to_u64(&tp_name), "&tp_name") ||
!ASSERT_STREQ(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter", "tp_name"))
goto done;
}
/* Check bpf_prog_get_next_id() */
@ -190,7 +154,7 @@ void serial_test_bpf_obj_id(void)
while (!bpf_prog_get_next_id(next_id, &next_id)) {
struct bpf_prog_info prog_info = {};
__u32 saved_map_id;
int prog_fd;
int prog_fd, cmp_res;
info_len = sizeof(prog_info);
@ -198,9 +162,7 @@ void serial_test_bpf_obj_id(void)
if (prog_fd < 0 && errno == ENOENT)
/* The bpf_prog is in the dead row */
continue;
if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
"prog_fd %d next_id %d errno %d\n",
prog_fd, next_id, errno))
if (!ASSERT_GE(prog_fd, 0, "bpf_prog_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@ -218,9 +180,8 @@ void serial_test_bpf_obj_id(void)
*/
prog_info.nr_map_ids = 1;
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
if (CHECK(!err || errno != EFAULT,
"get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
err, errno, EFAULT))
if (!ASSERT_ERR(err, "bpf_prog_get_info_by_fd") ||
!ASSERT_EQ(errno, EFAULT, "bpf_prog_get_info_by_fd"))
break;
bzero(&prog_info, sizeof(prog_info));
info_len = sizeof(prog_info);
@ -231,27 +192,22 @@ void serial_test_bpf_obj_id(void)
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
prog_infos[i].jited_prog_insns = 0;
prog_infos[i].xlated_prog_insns = 0;
CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
memcmp(&prog_info, &prog_infos[i], info_len) ||
*(int *)(long)prog_info.map_ids != saved_map_id,
"get-prog-info(next_id->fd)",
"err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n",
err, errno, info_len, sizeof(struct bpf_prog_info),
memcmp(&prog_info, &prog_infos[i], info_len),
*(int *)(long)prog_info.map_ids, saved_map_id);
cmp_res = memcmp(&prog_info, &prog_infos[i], info_len);
ASSERT_OK(err, "bpf_prog_get_info_by_fd");
ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len");
ASSERT_OK(cmp_res, "memcmp");
ASSERT_EQ(*(int *)(long)prog_info.map_ids, saved_map_id, "map_id");
close(prog_fd);
}
CHECK(nr_id_found != nr_iters,
"check total prog id found by get_next_id",
"nr_id_found %u(%u)\n",
nr_id_found, nr_iters);
ASSERT_EQ(nr_id_found, nr_iters, "prog_nr_id_found");
/* Check bpf_map_get_next_id() */
nr_id_found = 0;
next_id = 0;
while (!bpf_map_get_next_id(next_id, &next_id)) {
struct bpf_map_info map_info = {};
int map_fd;
int map_fd, cmp_res;
info_len = sizeof(map_info);
@ -259,9 +215,7 @@ void serial_test_bpf_obj_id(void)
if (map_fd < 0 && errno == ENOENT)
/* The bpf_map is in the dead row */
continue;
if (CHECK(map_fd < 0, "get-map-fd(next_id)",
"map_fd %d next_id %u errno %d\n",
map_fd, next_id, errno))
if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@ -274,25 +228,19 @@ void serial_test_bpf_obj_id(void)
nr_id_found++;
err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
if (CHECK_FAIL(err))
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto done;
err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
CHECK(err || info_len != sizeof(struct bpf_map_info) ||
memcmp(&map_info, &map_infos[i], info_len) ||
array_value != array_magic_value,
"check get-map-info(next_id->fd)",
"err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n",
err, errno, info_len, sizeof(struct bpf_map_info),
memcmp(&map_info, &map_infos[i], info_len),
array_value, array_magic_value);
cmp_res = memcmp(&map_info, &map_infos[i], info_len);
ASSERT_OK(err, "bpf_map_get_info_by_fd");
ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "info_len");
ASSERT_OK(cmp_res, "memcmp");
ASSERT_EQ(array_value, array_magic_value, "array_value");
close(map_fd);
}
CHECK(nr_id_found != nr_iters,
"check total map id found by get_next_id",
"nr_id_found %u(%u)\n",
nr_id_found, nr_iters);
ASSERT_EQ(nr_id_found, nr_iters, "map_nr_id_found");
/* Check bpf_link_get_next_id() */
nr_id_found = 0;
@ -308,9 +256,7 @@ void serial_test_bpf_obj_id(void)
if (link_fd < 0 && errno == ENOENT)
/* The bpf_link is in the dead row */
continue;
if (CHECK(link_fd < 0, "get-link-fd(next_id)",
"link_fd %d next_id %u errno %d\n",
link_fd, next_id, errno))
if (!ASSERT_GE(link_fd, 0, "bpf_link_get_fd_by_id"))
break;
for (i = 0; i < nr_iters; i++)
@ -325,17 +271,13 @@ void serial_test_bpf_obj_id(void)
err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len);
cmp_res = memcmp(&link_info, &link_infos[i],
offsetof(struct bpf_link_info, raw_tracepoint));
CHECK(err || info_len != sizeof(link_info) || cmp_res,
"check get-link-info(next_id->fd)",
"err %d errno %d info_len %u(%zu) memcmp %d\n",
err, errno, info_len, sizeof(struct bpf_link_info),
cmp_res);
ASSERT_OK(err, "bpf_link_get_info_by_fd");
ASSERT_EQ(info_len, sizeof(link_info), "info_len");
ASSERT_OK(cmp_res, "memcmp");
close(link_fd);
}
CHECK(nr_id_found != nr_iters,
"check total link id found by get_next_id",
"nr_id_found %u(%u)\n", nr_id_found, nr_iters);
ASSERT_EQ(nr_id_found, nr_iters, "link_nr_id_found");
done:
for (i = 0; i < nr_iters; i++) {

View File

@ -20,15 +20,14 @@
static const unsigned int total_bytes = 10 * 1024 * 1024;
static int expected_stg = 0xeB9F;
static int stop, duration;
static int stop;
static int settcpca(int fd, const char *tcp_ca)
{
int err;
err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n",
errno))
if (!ASSERT_NEQ(err, -1, "setsockopt"))
return -1;
return 0;
@ -65,8 +64,7 @@ static void *server(void *arg)
bytes += nr_sent;
}
CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
bytes, total_bytes, nr_sent, errno);
ASSERT_EQ(bytes, total_bytes, "send");
done:
if (fd >= 0)
@ -92,10 +90,11 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
WRITE_ONCE(stop, 0);
lfd = socket(AF_INET6, SOCK_STREAM, 0);
if (CHECK(lfd == -1, "socket", "errno:%d\n", errno))
if (!ASSERT_NEQ(lfd, -1, "socket"))
return;
fd = socket(AF_INET6, SOCK_STREAM, 0);
if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) {
if (!ASSERT_NEQ(fd, -1, "socket")) {
close(lfd);
return;
}
@ -108,26 +107,27 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
sa6.sin6_family = AF_INET6;
sa6.sin6_addr = in6addr_loopback;
err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
if (CHECK(err == -1, "bind", "errno:%d\n", errno))
if (!ASSERT_NEQ(err, -1, "bind"))
goto done;
err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
if (CHECK(err == -1, "getsockname", "errno:%d\n", errno))
if (!ASSERT_NEQ(err, -1, "getsockname"))
goto done;
err = listen(lfd, 1);
if (CHECK(err == -1, "listen", "errno:%d\n", errno))
if (!ASSERT_NEQ(err, -1, "listen"))
goto done;
if (sk_stg_map) {
err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
&expected_stg, BPF_NOEXIST);
if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
"err:%d errno:%d\n", err, errno))
if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
goto done;
}
/* connect to server */
err = connect(fd, (struct sockaddr *)&sa6, addrlen);
if (CHECK(err == -1, "connect", "errno:%d\n", errno))
if (!ASSERT_NEQ(err, -1, "connect"))
goto done;
if (sk_stg_map) {
@ -135,14 +135,13 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
&tmp_stg);
if (CHECK(!err || errno != ENOENT,
"bpf_map_lookup_elem(sk_stg_map)",
"err:%d errno:%d\n", err, errno))
if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
!ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
goto done;
}
err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
if (!ASSERT_OK(err, "pthread_create"))
goto done;
/* recv total_bytes */
@ -156,13 +155,12 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
bytes += nr_recv;
}
CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
bytes, total_bytes, nr_recv, errno);
ASSERT_EQ(bytes, total_bytes, "recv");
WRITE_ONCE(stop, 1);
pthread_join(srv_thread, &thread_ret);
CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
PTR_ERR(thread_ret));
ASSERT_OK(IS_ERR(thread_ret), "thread_ret");
done:
close(lfd);
close(fd);
@ -174,7 +172,7 @@ static void test_cubic(void)
struct bpf_link *link;
cubic_skel = bpf_cubic__open_and_load();
if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n"))
if (!ASSERT_OK_PTR(cubic_skel, "bpf_cubic__open_and_load"))
return;
link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
@ -197,7 +195,7 @@ static void test_dctcp(void)
struct bpf_link *link;
dctcp_skel = bpf_dctcp__open_and_load();
if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n"))
if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load"))
return;
link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
@ -207,9 +205,7 @@ static void test_dctcp(void)
}
do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
CHECK(dctcp_skel->bss->stg_result != expected_stg,
"Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
dctcp_skel->bss->stg_result, expected_stg);
ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result");
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);

View File

@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type)
}
bpf_program__set_type(prog, type);
bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32);
bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags);
err = bpf_object__load(obj);

View File

@ -5265,6 +5265,7 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
#endif
assert(0);
return 0;
}
static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,

View File

@ -0,0 +1,158 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
#include <sys/types.h>
#include <unistd.h>
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "test_cgroup1_hierarchy.skel.h"
static void bpf_cgroup1(struct test_cgroup1_hierarchy *skel)
{
struct bpf_link *lsm_link, *fentry_link;
int err;
/* Attach LSM prog first */
lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run);
if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
return;
/* LSM prog will be triggered when attaching fentry */
fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
ASSERT_NULL(fentry_link, "fentry_attach_fail");
err = bpf_link__destroy(lsm_link);
ASSERT_OK(err, "destroy_lsm");
}
static void bpf_cgroup1_sleepable(struct test_cgroup1_hierarchy *skel)
{
struct bpf_link *lsm_link, *fentry_link;
int err;
/* Attach LSM prog first */
lsm_link = bpf_program__attach_lsm(skel->progs.lsm_s_run);
if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
return;
/* LSM prog will be triggered when attaching fentry */
fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
ASSERT_NULL(fentry_link, "fentry_attach_fail");
err = bpf_link__destroy(lsm_link);
ASSERT_OK(err, "destroy_lsm");
}
static void bpf_cgroup1_invalid_id(struct test_cgroup1_hierarchy *skel)
{
struct bpf_link *lsm_link, *fentry_link;
int err;
/* Attach LSM prog first */
lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run);
if (!ASSERT_OK_PTR(lsm_link, "lsm_attach"))
return;
/* LSM prog will be triggered when attaching fentry */
fentry_link = bpf_program__attach_trace(skel->progs.fentry_run);
if (!ASSERT_OK_PTR(fentry_link, "fentry_attach_success"))
goto cleanup;
err = bpf_link__destroy(fentry_link);
ASSERT_OK(err, "destroy_lsm");
cleanup:
err = bpf_link__destroy(lsm_link);
ASSERT_OK(err, "destroy_fentry");
}
void test_cgroup1_hierarchy(void)
{
struct test_cgroup1_hierarchy *skel;
__u64 current_cgid;
int hid, err;
skel = test_cgroup1_hierarchy__open();
if (!ASSERT_OK_PTR(skel, "open"))
return;
skel->bss->target_pid = getpid();
err = bpf_program__set_attach_target(skel->progs.fentry_run, 0, "bpf_fentry_test1");
if (!ASSERT_OK(err, "fentry_set_target"))
goto destroy;
err = test_cgroup1_hierarchy__load(skel);
if (!ASSERT_OK(err, "load"))
goto destroy;
/* Setup cgroup1 hierarchy */
err = setup_classid_environment();
if (!ASSERT_OK(err, "setup_classid_environment"))
goto destroy;
err = join_classid();
if (!ASSERT_OK(err, "join_cgroup1"))
goto cleanup;
current_cgid = get_classid_cgroup_id();
if (!ASSERT_GE(current_cgid, 0, "cgroup1 id"))
goto cleanup;
hid = get_cgroup1_hierarchy_id("net_cls");
if (!ASSERT_GE(hid, 0, "cgroup1 id"))
goto cleanup;
skel->bss->target_hid = hid;
if (test__start_subtest("test_cgroup1_hierarchy")) {
skel->bss->target_ancestor_cgid = current_cgid;
bpf_cgroup1(skel);
}
if (test__start_subtest("test_root_cgid")) {
skel->bss->target_ancestor_cgid = 1;
skel->bss->target_ancestor_level = 0;
bpf_cgroup1(skel);
}
if (test__start_subtest("test_invalid_level")) {
skel->bss->target_ancestor_cgid = 1;
skel->bss->target_ancestor_level = 1;
bpf_cgroup1_invalid_id(skel);
}
if (test__start_subtest("test_invalid_cgid")) {
skel->bss->target_ancestor_cgid = 0;
bpf_cgroup1_invalid_id(skel);
}
if (test__start_subtest("test_invalid_hid")) {
skel->bss->target_ancestor_cgid = 1;
skel->bss->target_ancestor_level = 0;
skel->bss->target_hid = -1;
bpf_cgroup1_invalid_id(skel);
}
if (test__start_subtest("test_invalid_cgrp_name")) {
skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cl");
skel->bss->target_ancestor_cgid = current_cgid;
bpf_cgroup1_invalid_id(skel);
}
if (test__start_subtest("test_invalid_cgrp_name2")) {
skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cls,");
skel->bss->target_ancestor_cgid = current_cgid;
bpf_cgroup1_invalid_id(skel);
}
if (test__start_subtest("test_sleepable_prog")) {
skel->bss->target_hid = hid;
skel->bss->target_ancestor_cgid = current_cgid;
bpf_cgroup1_sleepable(skel);
}
cleanup:
cleanup_classid_environment();
destroy:
test_cgroup1_hierarchy__destroy(skel);
}

View File

@ -71,7 +71,7 @@ void test_cgroup_v1v2(void)
}
ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
setup_classid_environment();
set_classid(42);
set_classid();
ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
cleanup_classid_environment();
close(server_fd);

View File

@ -73,6 +73,37 @@ static void test_local_kptr_stash_unstash(void)
local_kptr_stash__destroy(skel);
}
static void test_refcount_acquire_without_unstash(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct local_kptr_stash *skel;
int ret;
skel = local_kptr_stash__open_and_load();
if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
&opts);
ASSERT_OK(ret, "refcount_acquire_without_unstash run");
ASSERT_EQ(opts.retval, 2, "refcount_acquire_without_unstash retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_refcounted_node), &opts);
ASSERT_OK(ret, "stash_refcounted_node run");
ASSERT_OK(opts.retval, "stash_refcounted_node retval");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash),
&opts);
ASSERT_OK(ret, "refcount_acquire_without_unstash (2) run");
ASSERT_EQ(opts.retval, 42, "refcount_acquire_without_unstash (2) retval");
local_kptr_stash__destroy(skel);
}
static void test_local_kptr_stash_fail(void)
{
RUN_TESTS(local_kptr_stash_fail);
@ -86,6 +117,8 @@ void test_local_kptr_stash(void)
test_local_kptr_stash_plain();
if (test__start_subtest("local_kptr_stash_unstash"))
test_local_kptr_stash_unstash();
if (test__start_subtest("refcount_acquire_without_unstash"))
test_refcount_acquire_without_unstash();
if (test__start_subtest("local_kptr_stash_fail"))
test_local_kptr_stash_fail();
}

View File

@ -78,7 +78,7 @@ static void obj_load_log_buf(void)
ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
"libbpf_log_not_empty");
ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"),
ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx() R10=fp0"),
"good_log_verbose");
ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
"bad_log_not_empty");
@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void)
opts.log_level = 2;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
good_prog_insns, good_prog_insn_cnt, &opts);
ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2");
ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx() R10=fp0"), "good_log_2");
ASSERT_GE(fd, 0, "good_fd2");
if (fd >= 0)
close(fd);

File diff suppressed because it is too large Load Diff

View File

@ -13,22 +13,22 @@ static struct {
const char *err_msg;
} spin_lock_fail_tests[] = {
{ "lock_id_kptr_preserve",
"5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) "
"R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
"5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2) "
"R1_w=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=ptr_ expected=percpu_ptr_" },
{ "lock_id_global_zero",
"; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n"
"; R1_w=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
" R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)"
" R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n"
" R0_w=map_value(id=1,map=array_map,ks=4,vs=8)"
" R1_w=map_value(id=1,map=array_map,ks=4,vs=8)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_innermapval_preserve",
"[0-9]\\+: (bf) r1 = r0 ;"
" R0=map_value(id=2,off=0,ks=4,vs=8,imm=0)"
" R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n"
" R0=map_value(id=2,ks=4,vs=8)"
" R1_w=map_value(id=2,ks=4,vs=8)\n"
"[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"
"R1 type=map_value expected=percpu_ptr_" },
{ "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },

View File

@ -2387,12 +2387,9 @@ static int generate_dummy_prog(void)
const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
LIBBPF_OPTS(bpf_prog_load_opts, opts);
const size_t log_buf_sz = 256;
char *log_buf;
char log_buf[log_buf_sz];
int fd = -1;
log_buf = malloc(log_buf_sz);
if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
return fd;
opts.log_buf = log_buf;
opts.log_size = log_buf_sz;
@ -2402,7 +2399,6 @@ static int generate_dummy_prog(void)
prog_insns, prog_insn_cnt, &opts);
ASSERT_STREQ(log_buf, "", "log_0");
ASSERT_GE(fd, 0, "prog_fd");
free(log_buf);
return fd;
}

View File

@ -16,27 +16,27 @@ static void nsleep()
void test_vmlinux(void)
{
int duration = 0, err;
int err;
struct test_vmlinux* skel;
struct test_vmlinux__bss *bss;
skel = test_vmlinux__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
if (!ASSERT_OK_PTR(skel, "test_vmlinux__open_and_load"))
return;
bss = skel->bss;
err = test_vmlinux__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
if (!ASSERT_OK(err, "test_vmlinux__attach"))
goto cleanup;
/* trigger everything */
nsleep();
CHECK(!bss->tp_called, "tp", "not called\n");
CHECK(!bss->raw_tp_called, "raw_tp", "not called\n");
CHECK(!bss->tp_btf_called, "tp_btf", "not called\n");
CHECK(!bss->kprobe_called, "kprobe", "not called\n");
CHECK(!bss->fentry_called, "fentry", "not called\n");
ASSERT_TRUE(bss->tp_called, "tp");
ASSERT_TRUE(bss->raw_tp_called, "raw_tp");
ASSERT_TRUE(bss->tp_btf_called, "tp_btf");
ASSERT_TRUE(bss->kprobe_called, "kprobe");
ASSERT_TRUE(bss->fentry_called, "fentry");
cleanup:
test_vmlinux__destroy(skel);

View File

@ -35,6 +35,8 @@ int dump_task_stack(struct bpf_iter__task *ctx)
return 0;
}
int num_user_stacks = 0;
SEC("iter/task")
int get_task_user_stacks(struct bpf_iter__task *ctx)
{
@ -51,6 +53,9 @@ int get_task_user_stacks(struct bpf_iter__task *ctx)
if (res <= 0)
return 0;
/* Only one task, the current one, should succeed */
++num_user_stacks;
buf_sz += res;
/* If the verifier doesn't refine bpf_get_task_stack res, and instead

View File

@ -18,48 +18,48 @@
return *(u64 *)num; \
}
__msg(": R0_w=-2147483648 R10=fp0")
__msg(": R0_w=0xffffffff80000000 R10=fp0")
check_assert(s64, eq, int_min, INT_MIN);
__msg(": R0_w=2147483647 R10=fp0")
__msg(": R0_w=0x7fffffff R10=fp0")
check_assert(s64, eq, int_max, INT_MAX);
__msg(": R0_w=0 R10=fp0")
check_assert(s64, eq, zero, 0);
__msg(": R0_w=-9223372036854775808 R1_w=-9223372036854775808 R10=fp0")
__msg(": R0_w=0x8000000000000000 R1_w=0x8000000000000000 R10=fp0")
check_assert(s64, eq, llong_min, LLONG_MIN);
__msg(": R0_w=9223372036854775807 R1_w=9223372036854775807 R10=fp0")
__msg(": R0_w=0x7fffffffffffffff R1_w=0x7fffffffffffffff R10=fp0")
check_assert(s64, eq, llong_max, LLONG_MAX);
__msg(": R0_w=scalar(smax=2147483646) R10=fp0")
__msg(": R0_w=scalar(smax=0x7ffffffe) R10=fp0")
check_assert(s64, lt, pos, INT_MAX);
__msg(": R0_w=scalar(smax=-1,umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, zero, 0);
__msg(": R0_w=scalar(smax=-2147483649,umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smax=0xffffffff7fffffff,umin=0x8000000000000000,umax=0xffffffff7fffffff,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, neg, INT_MIN);
__msg(": R0_w=scalar(smax=2147483647) R10=fp0")
__msg(": R0_w=scalar(smax=0x7fffffff) R10=fp0")
check_assert(s64, le, pos, INT_MAX);
__msg(": R0_w=scalar(smax=0) R10=fp0")
check_assert(s64, le, zero, 0);
__msg(": R0_w=scalar(smax=-2147483648,umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smax=0xffffffff80000000,umin=0x8000000000000000,umax=0xffffffff80000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, le, neg, INT_MIN);
__msg(": R0_w=scalar(smin=umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, pos, INT_MAX);
__msg(": R0_w=scalar(smin=umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, zero, 0);
__msg(": R0_w=scalar(smin=-2147483647) R10=fp0")
__msg(": R0_w=scalar(smin=0xffffffff80000001) R10=fp0")
check_assert(s64, gt, neg, INT_MIN);
__msg(": R0_w=scalar(smin=umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, ge, pos, INT_MAX);
__msg(": R0_w=scalar(smin=0,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
__msg(": R0_w=scalar(smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
check_assert(s64, ge, zero, 0);
__msg(": R0_w=scalar(smin=-2147483648) R10=fp0")
__msg(": R0_w=scalar(smin=0xffffffff80000000) R10=fp0")
check_assert(s64, ge, neg, INT_MIN);
SEC("?tc")
__log_level(2) __failure
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=smin32=-2147483646,smax=smax32=2147483645) R10=fp0")
__msg(": R0=0 R1=ctx() R2=scalar(smin=0xffffffff80000002,smax=smax32=0x7ffffffd,smin32=0x80000002) R10=fp0")
int check_assert_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
@ -75,7 +75,7 @@ int check_assert_range_s64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
__msg(": R1=ctx(off=0,imm=0) R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
__msg(": R1=ctx() R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
int check_assert_range_u64(struct __sk_buff *ctx)
{
u64 num = ctx->len;
@ -86,7 +86,7 @@ int check_assert_range_u64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
__msg(": R0=0 R1=ctx() R2=4096 R10=fp0")
int check_assert_single_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
@ -103,7 +103,7 @@ int check_assert_single_range_s64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
__msg(": R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
__msg(": R1=ctx() R2=4096 R10=fp0")
int check_assert_single_range_u64(struct __sk_buff *ctx)
{
u64 num = ctx->len;
@ -114,7 +114,7 @@ int check_assert_single_range_u64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
__msg(": R1=pkt(off=64,r=64,imm=0) R2=pkt_end(off=0,imm=0) R6=pkt(off=0,r=64,imm=0) R10=fp0")
__msg(": R1=pkt(off=64,r=64) R2=pkt_end() R6=pkt(r=64) R10=fp0")
int check_assert_generic(struct __sk_buff *ctx)
{
u8 *data_end = (void *)(long)ctx->data_end;

View File

@ -1411,4 +1411,26 @@ __naked int checkpoint_states_deletion(void)
);
}
struct {
int data[32];
int n;
} loop_data;
SEC("raw_tp")
__success
int iter_arr_with_actual_elem_count(const void *ctx)
{
int i, n = loop_data.n, sum = 0;
if (n > ARRAY_SIZE(loop_data.data))
return 0;
bpf_for(i, 0, n) {
/* no rechecking of i against ARRAY_SIZE(loop_data.n) */
sum += loop_data.data[i];
}
return sum;
}
char _license[] SEC("license") = "GPL";

View File

@ -14,6 +14,24 @@ struct node_data {
struct bpf_rb_node node;
};
struct refcounted_node {
long data;
struct bpf_rb_node rb_node;
struct bpf_refcount refcount;
};
struct stash {
struct bpf_spin_lock l;
struct refcounted_node __kptr *stashed;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct stash);
__uint(max_entries, 10);
} refcounted_node_stash SEC(".maps");
struct plain_local {
long key;
long data;
@ -38,6 +56,7 @@ struct map_value {
* Had to do the same w/ bpf_kfunc_call_test_release below
*/
struct node_data *just_here_because_btf_bug;
struct refcounted_node *just_here_because_btf_bug2;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
@ -132,4 +151,56 @@ long stash_test_ref_kfunc(void *ctx)
return 0;
}
SEC("tc")
long refcount_acquire_without_unstash(void *ctx)
{
struct refcounted_node *p;
struct stash *s;
int ret = 0;
s = bpf_map_lookup_elem(&refcounted_node_stash, &ret);
if (!s)
return 1;
if (!s->stashed)
/* refcount_acquire failure is expected when no refcounted_node
* has been stashed before this program executes
*/
return 2;
p = bpf_refcount_acquire(s->stashed);
if (!p)
return 3;
ret = s->stashed ? s->stashed->data : -1;
bpf_obj_drop(p);
return ret;
}
/* Helper for refcount_acquire_without_unstash test */
SEC("tc")
long stash_refcounted_node(void *ctx)
{
struct refcounted_node *p;
struct stash *s;
int key = 0;
s = bpf_map_lookup_elem(&refcounted_node_stash, &key);
if (!s)
return 1;
p = bpf_obj_new(typeof(*p));
if (!p)
return 2;
p->data = 42;
p = bpf_kptr_xchg(&s->stashed, p);
if (p) {
bpf_obj_drop(p);
return 3;
}
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -1,4 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 180
/* llvm upstream commit at clang18
* https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
* changed inlining behavior and caused compilation failure as some branch
* target distance exceeded 16bit representation which is the maximum for
* cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
* to specify which cpu version is used for compilation. So a smaller
* unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
* reduced some branch target distances and resolved the compilation failure.
*
* To capture the case where a developer/ci uses clang18 but the corresponding
* repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
* will be set as well to prevent potential compilation failures.
*/
#ifdef __BPF_CPU_VERSION__
#if __BPF_CPU_VERSION__ < 4
#define UNROLL_COUNT 90
#endif
#elif __clang_major__ == 18
#define UNROLL_COUNT 90
#endif
#include "pyperf.h"

View File

@ -53,6 +53,25 @@ long rbtree_refcounted_node_ref_escapes(void *ctx)
return 0;
}
SEC("?tc")
__failure __msg("Possibly NULL pointer passed to trusted arg0")
long refcount_acquire_maybe_null(void *ctx)
{
struct node_acquire *n, *m;
n = bpf_obj_new(typeof(*n));
/* Intentionally not testing !n
* it's MAYBE_NULL for refcount_acquire
*/
m = bpf_refcount_acquire(n);
if (m)
bpf_obj_drop(m);
if (n)
bpf_obj_drop(n);
return 0;
}
SEC("?tc")
__failure __msg("Unreleased reference id=3 alloc_insn=9")
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)

View File

@ -0,0 +1,71 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
__u32 target_ancestor_level;
__u64 target_ancestor_cgid;
int target_pid, target_hid;
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
static int bpf_link_create_verify(int cmd)
{
struct cgroup *cgrp, *ancestor;
struct task_struct *task;
int ret = 0;
if (cmd != BPF_LINK_CREATE)
return 0;
task = bpf_get_current_task_btf();
/* Then it can run in parallel with others */
if (task->pid != target_pid)
return 0;
cgrp = bpf_task_get_cgroup1(task, target_hid);
if (!cgrp)
return 0;
/* Refuse it if its cgid or its ancestor's cgid is the target cgid */
if (cgrp->kn->id == target_ancestor_cgid)
ret = -1;
ancestor = bpf_cgroup_ancestor(cgrp, target_ancestor_level);
if (!ancestor)
goto out;
if (ancestor->kn->id == target_ancestor_cgid)
ret = -1;
bpf_cgroup_release(ancestor);
out:
bpf_cgroup_release(cgrp);
return ret;
}
SEC("lsm/bpf")
int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
{
return bpf_link_create_verify(cmd);
}
SEC("lsm.s/bpf")
int BPF_PROG(lsm_s_run, int cmd, union bpf_attr *attr, unsigned int size)
{
return bpf_link_create_verify(cmd);
}
SEC("fentry")
int BPF_PROG(fentry_run)
{
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -965,6 +965,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP_JSLT for crossing 64-bit signed boundary")
__success __retval(0)
__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
__naked void crossing_64_bit_signed_boundary_2(void)
{
asm volatile (" \
@ -1046,6 +1047,7 @@ l0_%=: r0 = 0; \
SEC("xdp")
__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary")
__success __retval(0)
__flag(!BPF_F_TEST_REG_INVARIANTS) /* known invariants violation */
__naked void crossing_32_bit_signed_boundary_2(void)
{
asm volatile (" \

View File

@ -153,6 +153,14 @@ static int parse_retval(const char *str, int *val, const char *name)
return parse_int(str, val, name);
}
static void update_flags(int *flags, int flag, bool clear)
{
if (clear)
*flags &= ~flag;
else
*flags |= flag;
}
/* Uses btf_decl_tag attributes to describe the expected test
* behavior, see bpf_misc.h for detailed description of each attribute
* and attribute combinations.
@ -171,6 +179,7 @@ static int parse_test_spec(struct test_loader *tester,
memset(spec, 0, sizeof(*spec));
spec->prog_name = bpf_program__name(prog);
spec->prog_flags = BPF_F_TEST_REG_INVARIANTS; /* by default be strict */
btf = bpf_object__btf(obj);
if (!btf) {
@ -187,7 +196,8 @@ static int parse_test_spec(struct test_loader *tester,
for (i = 1; i < btf__type_cnt(btf); i++) {
const char *s, *val, *msg;
const struct btf_type *t;
int tmp;
bool clear;
int flags;
t = btf__type_by_id(btf, i);
if (!btf_is_decl_tag(t))
@ -253,23 +263,30 @@ static int parse_test_spec(struct test_loader *tester,
goto cleanup;
} else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
clear = val[0] == '!';
if (clear)
val++;
if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
spec->prog_flags |= BPF_F_STRICT_ALIGNMENT;
update_flags(&spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear);
} else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
spec->prog_flags |= BPF_F_ANY_ALIGNMENT;
update_flags(&spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear);
} else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
spec->prog_flags |= BPF_F_TEST_RND_HI32;
update_flags(&spec->prog_flags, BPF_F_TEST_RND_HI32, clear);
} else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
spec->prog_flags |= BPF_F_TEST_STATE_FREQ;
update_flags(&spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear);
} else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
spec->prog_flags |= BPF_F_SLEEPABLE;
update_flags(&spec->prog_flags, BPF_F_SLEEPABLE, clear);
} else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
spec->prog_flags |= BPF_F_XDP_HAS_FRAGS;
update_flags(&spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear);
} else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS") == 0) {
update_flags(&spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear);
} else /* assume numeric value */ {
err = parse_int(val, &tmp, "test prog flags");
err = parse_int(val, &flags, "test prog flags");
if (err)
goto cleanup;
spec->prog_flags |= tmp;
update_flags(&spec->prog_flags, flags, clear);
}
}
}

View File

@ -1396,13 +1396,18 @@ static void test_map_stress(void)
#define MAX_DELAY_US 50000
#define MIN_DELAY_RANGE_US 5000
static int map_update_retriable(int map_fd, const void *key, const void *value,
int flags, int attempts)
static bool retry_for_again_or_busy(int err)
{
return (err == EAGAIN || err == EBUSY);
}
int map_update_retriable(int map_fd, const void *key, const void *value, int flags, int attempts,
retry_for_error_fn need_retry)
{
int delay = rand() % MIN_DELAY_RANGE_US;
while (bpf_map_update_elem(map_fd, key, value, flags)) {
if (!attempts || (errno != EAGAIN && errno != EBUSY))
if (!attempts || !need_retry(errno))
return -errno;
if (delay <= MAX_DELAY_US / 2)
@ -1445,11 +1450,13 @@ static void test_update_delete(unsigned int fn, void *data)
key = value = i;
if (do_update) {
err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES);
err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES,
retry_for_again_or_busy);
if (err)
printf("error %d %d\n", err, errno);
assert(err == 0);
err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES);
err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES,
retry_for_again_or_busy);
if (err)
printf("error %d %d\n", err, errno);
assert(err == 0);

View File

@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
@ -16,4 +17,8 @@
extern int skips;
typedef bool (*retry_for_error_fn)(int err);
int map_update_retriable(int map_fd, const void *key, const void *value, int flags, int attempts,
retry_for_error_fn need_retry);
#endif

View File

@ -679,7 +679,7 @@ static int load_path(const struct sock_addr_test *test, const char *path)
bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR);
bpf_program__set_expected_attach_type(prog, test->expected_attach_type);
bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32);
bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
err = bpf_object__load(obj);
if (err) {

View File

@ -1588,7 +1588,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (fixup_skips != skips)
return;
pflags = BPF_F_TEST_RND_HI32;
pflags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
pflags |= BPF_F_STRICT_ALIGNMENT;
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)

View File

@ -276,7 +276,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
if (type != BPF_PROG_TYPE_UNSPEC && bpf_program__type(prog) != type)
bpf_program__set_type(prog, type);
flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32;
flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
bpf_program__set_flags(prog, flags);
err = bpf_object__load(obj);
@ -299,7 +299,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.kern_version = kern_version,
.prog_flags = BPF_F_TEST_RND_HI32,
.prog_flags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS,
.log_level = extra_prog_load_log_flags,
.log_buf = log_buf,
.log_size = log_buf_sz,

View File

@ -18,6 +18,7 @@
#include <libelf.h>
#include <gelf.h>
#include <float.h>
#include <math.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
@ -99,6 +100,7 @@ struct stat_specs {
enum stat_id ids[ALL_STATS_CNT];
enum stat_variant variants[ALL_STATS_CNT];
bool asc[ALL_STATS_CNT];
bool abs[ALL_STATS_CNT];
int lens[ALL_STATS_CNT * 3]; /* 3x for comparison mode */
};
@ -133,6 +135,7 @@ struct filter {
int stat_id;
enum stat_variant stat_var;
long value;
bool abs;
};
static struct env {
@ -142,10 +145,12 @@ static struct env {
bool debug;
bool quiet;
bool force_checkpoints;
bool force_reg_invariants;
enum resfmt out_fmt;
bool show_version;
bool comparison_mode;
bool replay_mode;
int top_n;
int log_level;
int log_size;
@ -210,8 +215,7 @@ static const struct argp_option opts[] = {
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
{ "test-states", 't', NULL, 0,
"Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
{ "top-n", 'n', "N", 0, "Emit only up to first N results." },
{ "quiet", 'q', NULL, 0, "Quiet mode" },
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
{ "sort", 's', "SPEC", 0, "Specify sort order" },
@ -219,6 +223,10 @@ static const struct argp_option opts[] = {
{ "compare", 'C', NULL, 0, "Comparison mode" },
{ "replay", 'R', NULL, 0, "Replay mode" },
{ "filter", 'f', "FILTER", 0, "Filter expressions (or @filename for file with expressions)." },
{ "test-states", 't', NULL, 0,
"Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
{ "test-reg-invariants", 'r', NULL, 0,
"Force BPF verifier failure on register invariant violation (BPF_F_TEST_REG_INVARIANTS program flag)" },
{},
};
@ -290,6 +298,16 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
case 't':
env.force_checkpoints = true;
break;
case 'r':
env.force_reg_invariants = true;
break;
case 'n':
errno = 0;
env.top_n = strtol(arg, NULL, 10);
if (errno) {
fprintf(stderr, "invalid top N specifier: %s\n", arg);
argp_usage(state);
}
case 'C':
env.comparison_mode = true;
break;
@ -455,7 +473,8 @@ static struct {
{ OP_EQ, "=" },
};
static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var);
static bool parse_stat_id_var(const char *name, size_t len, int *id,
enum stat_variant *var, bool *is_abs);
static int append_filter(struct filter **filters, int *cnt, const char *str)
{
@ -488,13 +507,14 @@ static int append_filter(struct filter **filters, int *cnt, const char *str)
long val;
const char *end = str;
const char *op_str;
bool is_abs;
op_str = operators[i].op_str;
p = strstr(str, op_str);
if (!p)
continue;
if (!parse_stat_id_var(str, p - str, &id, &var)) {
if (!parse_stat_id_var(str, p - str, &id, &var, &is_abs)) {
fprintf(stderr, "Unrecognized stat name in '%s'!\n", str);
return -EINVAL;
}
@ -533,6 +553,7 @@ static int append_filter(struct filter **filters, int *cnt, const char *str)
f->stat_id = id;
f->stat_var = var;
f->op = operators[i].op_kind;
f->abs = true;
f->value = val;
*cnt += 1;
@ -657,7 +678,8 @@ static struct stat_def {
[MARK_READ_MAX_LEN] = { "Max mark read length", {"max_mark_read_len", "mark_read"}, },
};
static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_variant *var)
static bool parse_stat_id_var(const char *name, size_t len, int *id,
enum stat_variant *var, bool *is_abs)
{
static const char *var_sfxs[] = {
[VARIANT_A] = "_a",
@ -667,6 +689,14 @@ static bool parse_stat_id_var(const char *name, size_t len, int *id, enum stat_v
};
int i, j, k;
/* |<stat>| means we take absolute value of given stat */
*is_abs = false;
if (len > 2 && name[0] == '|' && name[len - 1] == '|') {
*is_abs = true;
name += 1;
len -= 2;
}
for (i = 0; i < ARRAY_SIZE(stat_defs); i++) {
struct stat_def *def = &stat_defs[i];
size_t alias_len, sfx_len;
@ -722,7 +752,7 @@ static bool is_desc_sym(char c)
static int parse_stat(const char *stat_name, struct stat_specs *specs)
{
int id;
bool has_order = false, is_asc = false;
bool has_order = false, is_asc = false, is_abs = false;
size_t len = strlen(stat_name);
enum stat_variant var;
@ -737,7 +767,7 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs)
len -= 1;
}
if (!parse_stat_id_var(stat_name, len, &id, &var)) {
if (!parse_stat_id_var(stat_name, len, &id, &var, &is_abs)) {
fprintf(stderr, "Unrecognized stat name '%s'\n", stat_name);
return -ESRCH;
}
@ -745,6 +775,7 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs)
specs->ids[specs->spec_cnt] = id;
specs->variants[specs->spec_cnt] = var;
specs->asc[specs->spec_cnt] = has_order ? is_asc : stat_defs[id].asc_by_default;
specs->abs[specs->spec_cnt] = is_abs;
specs->spec_cnt++;
return 0;
@ -997,6 +1028,8 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
if (env.force_checkpoints)
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ);
if (env.force_reg_invariants)
bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_REG_INVARIANTS);
err = bpf_object__load(obj);
env.progs_processed++;
@ -1103,7 +1136,7 @@ cleanup:
}
static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2,
enum stat_id id, bool asc)
enum stat_id id, bool asc, bool abs)
{
int cmp = 0;
@ -1124,6 +1157,11 @@ static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2,
long v1 = s1->stats[id];
long v2 = s2->stats[id];
if (abs) {
v1 = v1 < 0 ? -v1 : v1;
v2 = v2 < 0 ? -v2 : v2;
}
if (v1 != v2)
cmp = v1 < v2 ? -1 : 1;
break;
@ -1142,7 +1180,8 @@ static int cmp_prog_stats(const void *v1, const void *v2)
int i, cmp;
for (i = 0; i < env.sort_spec.spec_cnt; i++) {
cmp = cmp_stat(s1, s2, env.sort_spec.ids[i], env.sort_spec.asc[i]);
cmp = cmp_stat(s1, s2, env.sort_spec.ids[i],
env.sort_spec.asc[i], env.sort_spec.abs[i]);
if (cmp != 0)
return cmp;
}
@ -1211,7 +1250,8 @@ static void fetch_join_stat_value(const struct verif_stats_join *s,
static int cmp_join_stat(const struct verif_stats_join *s1,
const struct verif_stats_join *s2,
enum stat_id id, enum stat_variant var, bool asc)
enum stat_id id, enum stat_variant var,
bool asc, bool abs)
{
const char *str1 = NULL, *str2 = NULL;
double v1, v2;
@ -1220,6 +1260,11 @@ static int cmp_join_stat(const struct verif_stats_join *s1,
fetch_join_stat_value(s1, id, var, &str1, &v1);
fetch_join_stat_value(s2, id, var, &str2, &v2);
if (abs) {
v1 = fabs(v1);
v2 = fabs(v2);
}
if (str1)
cmp = strcmp(str1, str2);
else if (v1 != v2)
@ -1237,7 +1282,8 @@ static int cmp_join_stats(const void *v1, const void *v2)
cmp = cmp_join_stat(s1, s2,
env.sort_spec.ids[i],
env.sort_spec.variants[i],
env.sort_spec.asc[i]);
env.sort_spec.asc[i],
env.sort_spec.abs[i]);
if (cmp != 0)
return cmp;
}
@ -1720,6 +1766,9 @@ static bool is_join_stat_filter_matched(struct filter *f, const struct verif_sta
fetch_join_stat_value(stats, f->stat_id, f->stat_var, &str, &value);
if (f->abs)
value = fabs(value);
switch (f->op) {
case OP_EQ: return value > f->value - eps && value < f->value + eps;
case OP_NEQ: return value < f->value - eps || value > f->value + eps;
@ -1766,7 +1815,7 @@ static int handle_comparison_mode(void)
struct stat_specs base_specs = {}, comp_specs = {};
struct stat_specs tmp_sort_spec;
enum resfmt cur_fmt;
int err, i, j, last_idx;
int err, i, j, last_idx, cnt;
if (env.filename_cnt != 2) {
fprintf(stderr, "Comparison mode expects exactly two input CSV files!\n\n");
@ -1879,7 +1928,7 @@ static int handle_comparison_mode(void)
env.join_stat_cnt += 1;
}
/* now sort joined results accorsing to sort spec */
/* now sort joined results according to sort spec */
qsort(env.join_stats, env.join_stat_cnt, sizeof(*env.join_stats), cmp_join_stats);
/* for human-readable table output we need to do extra pass to
@ -1896,16 +1945,22 @@ one_more_time:
output_comp_headers(cur_fmt);
last_idx = -1;
cnt = 0;
for (i = 0; i < env.join_stat_cnt; i++) {
const struct verif_stats_join *join = &env.join_stats[i];
if (!should_output_join_stats(join))
continue;
if (env.top_n && cnt >= env.top_n)
break;
if (cur_fmt == RESFMT_TABLE_CALCLEN)
last_idx = i;
output_comp_stats(join, cur_fmt, i == last_idx);
cnt++;
}
if (cur_fmt == RESFMT_TABLE_CALCLEN) {
@ -1920,6 +1975,9 @@ static bool is_stat_filter_matched(struct filter *f, const struct verif_stats *s
{
long value = stats->stats[f->stat_id];
if (f->abs)
value = value < 0 ? -value : value;
switch (f->op) {
case OP_EQ: return value == f->value;
case OP_NEQ: return value != f->value;
@ -1964,7 +2022,7 @@ static bool should_output_stats(const struct verif_stats *stats)
static void output_prog_stats(void)
{
const struct verif_stats *stats;
int i, last_stat_idx = 0;
int i, last_stat_idx = 0, cnt = 0;
if (env.out_fmt == RESFMT_TABLE) {
/* calculate column widths */
@ -1984,7 +2042,10 @@ static void output_prog_stats(void)
stats = &env.prog_stats[i];
if (!should_output_stats(stats))
continue;
if (env.top_n && cnt >= env.top_n)
break;
output_stats(stats, env.out_fmt, i == last_stat_idx);
cnt++;
}
}

View File

@ -36,7 +36,9 @@ DEFAULT_COMMAND="./test_progs"
MOUNT_DIR="mnt"
ROOTFS_IMAGE="root.img"
OUTPUT_DIR="$HOME/.bpf_selftests"
KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config" "tools/testing/selftests/bpf/config.${ARCH}")
KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config"
"tools/testing/selftests/bpf/config.vm"
"tools/testing/selftests/bpf/config.${ARCH}")
INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX"
NUM_COMPILE_JOBS="$(nproc)"
LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"