mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
bpf-next-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCY+/uBgAKCRDbK58LschI g0ngAPwJHd1RicBuy2C4fLv0nGKZtmYZBAnTGlI2RisPxU6BRwEAwUDLHuc5K6nR j261okOxOy/MRxdN1NhmR6Qe7nMyQAk= =tYU+ -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next Daniel Borkmann says: ==================== pull-request: bpf-next 2023-02-17 We've added 64 non-merge commits during the last 7 day(s) which contain a total of 158 files changed, 4190 insertions(+), 988 deletions(-). The main changes are: 1) Add a rbtree data structure following the "next-gen data structure" precedent set by recently-added linked-list, that is, by using kfunc + kptr instead of adding a new BPF map type, from Dave Marchevsky. 2) Add a new benchmark for hashmap lookups to BPF selftests, from Anton Protopopov. 3) Fix bpf_fib_lookup to only return valid neighbors and add an option to skip the neigh table lookup, from Martin KaFai Lau. 4) Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accouting for container environments, from Yafang Shao. 5) Batch of ice multi-buffer and driver performance fixes, from Alexander Lobakin. 6) Fix a bug in determining whether global subprog's argument is PTR_TO_CTX, which is based on type names which breaks kprobe progs, from Andrii Nakryiko. 7) Prep work for future -mcpu=v4 LLVM option which includes usage of BPF_ST insn. Thus improve BPF_ST-related value tracking in verifier, from Eduard Zingerman. 8) More prep work for later building selftests with Memory Sanitizer in order to detect usages of undefined memory, from Ilya Leoshkevich. 9) Fix xsk sockets to check IFF_UP earlier to avoid a NULL pointer dereference via sendmsg(), from Maciej Fijalkowski. 10) Implement BPF trampoline for RV64 JIT compiler, from Pu Lehui. 11) Fix BPF memory allocator in combination with BPF hashtab where it could corrupt special fields e.g. used in bpf_spin_lock, from Hou Tao. 12) Fix LoongArch BPF JIT to always use 4 instructions for function address so that instruction sequences don't change between passes, from Hengqi Chen. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (64 commits) selftests/bpf: Add bpf_fib_lookup test bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup riscv, bpf: Add bpf trampoline support for RV64 riscv, bpf: Add bpf_arch_text_poke support for RV64 riscv, bpf: Factor out emit_call for kernel and bpf context riscv: Extend patch_text for multiple instructions Revert "bpf, test_run: fix &xdp_frame misplacement for LIVE_FRAMES" selftests/bpf: Add global subprog context passing tests selftests/bpf: Convert test_global_funcs test to test_loader framework bpf: Fix global subprog context argument resolution logic LoongArch, bpf: Use 4 instructions for function address in JIT bpf: bpf_fib_lookup should not return neigh in NUD_FAILED state bpf: Disable bh in bpf_test_run for xdp and tc prog xsk: check IFF_UP earlier in Tx path Fix typos in selftest/bpf files selftests/bpf: Use bpf_{btf,link,map,prog}_get_info_by_fd() samples/bpf: Use bpf_{btf,link,map,prog}_get_info_by_fd() bpftool: Use bpf_{btf,link,map,prog}_get_info_by_fd() libbpf: Use bpf_{btf,link,map,prog}_get_info_by_fd() libbpf: Introduce bpf_{btf,link,map,prog}_get_info_by_fd() ... ==================== Link: https://lore.kernel.org/r/20230217221737.31122-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
ee8d72a157
@ -557,6 +557,7 @@
|
||||
Format: <string>
|
||||
nosocket -- Disable socket memory accounting.
|
||||
nokmem -- Disable kernel memory accounting.
|
||||
nobpf -- Disable BPF memory accounting.
|
||||
|
||||
checkreqprot= [SELINUX] Set initial checkreqprot flag value.
|
||||
Format: { "0" | "1" }
|
||||
|
267
Documentation/bpf/graph_ds_impl.rst
Normal file
267
Documentation/bpf/graph_ds_impl.rst
Normal file
@ -0,0 +1,267 @@
|
||||
=========================
|
||||
BPF Graph Data Structures
|
||||
=========================
|
||||
|
||||
This document describes implementation details of new-style "graph" data
|
||||
structures (linked_list, rbtree), with particular focus on the verifier's
|
||||
implementation of semantics specific to those data structures.
|
||||
|
||||
Although no specific verifier code is referred to in this document, the document
|
||||
assumes that the reader has general knowledge of BPF verifier internals, BPF
|
||||
maps, and BPF program writing.
|
||||
|
||||
Note that the intent of this document is to describe the current state of
|
||||
these graph data structures. **No guarantees** of stability for either
|
||||
semantics or APIs are made or implied here.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:depth: 2
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The BPF map API has historically been the main way to expose data structures
|
||||
of various types for use within BPF programs. Some data structures fit naturally
|
||||
with the map API (HASH, ARRAY), others less so. Consequentially, programs
|
||||
interacting with the latter group of data structures can be hard to parse
|
||||
for kernel programmers without previous BPF experience.
|
||||
|
||||
Luckily, some restrictions which necessitated the use of BPF map semantics are
|
||||
no longer relevant. With the introduction of kfuncs, kptrs, and the any-context
|
||||
BPF allocator, it is now possible to implement BPF data structures whose API
|
||||
and semantics more closely match those exposed to the rest of the kernel.
|
||||
|
||||
Two such data structures - linked_list and rbtree - have many verification
|
||||
details in common. Because both have "root"s ("head" for linked_list) and
|
||||
"node"s, the verifier code and this document refer to common functionality
|
||||
as "graph_api", "graph_root", "graph_node", etc.
|
||||
|
||||
Unless otherwise stated, examples and semantics below apply to both graph data
|
||||
structures.
|
||||
|
||||
Unstable API
|
||||
------------
|
||||
|
||||
Data structures implemented using the BPF map API have historically used BPF
|
||||
helper functions - either standard map API helpers like ``bpf_map_update_elem``
|
||||
or map-specific helpers. The new-style graph data structures instead use kfuncs
|
||||
to define their manipulation helpers. Because there are no stability guarantees
|
||||
for kfuncs, the API and semantics for these data structures can be evolved in
|
||||
a way that breaks backwards compatibility if necessary.
|
||||
|
||||
Root and node types for the new data structures are opaquely defined in the
|
||||
``uapi/linux/bpf.h`` header.
|
||||
|
||||
Locking
|
||||
-------
|
||||
|
||||
The new-style data structures are intrusive and are defined similarly to their
|
||||
vanilla kernel counterparts:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct node_data {
|
||||
long key;
|
||||
long data;
|
||||
struct bpf_rb_node node;
|
||||
};
|
||||
|
||||
struct bpf_spin_lock glock;
|
||||
struct bpf_rb_root groot __contains(node_data, node);
|
||||
|
||||
The "root" type for both linked_list and rbtree expects to be in a map_value
|
||||
which also contains a ``bpf_spin_lock`` - in the above example both global
|
||||
variables are placed in a single-value arraymap. The verifier considers this
|
||||
spin_lock to be associated with the ``bpf_rb_root`` by virtue of both being in
|
||||
the same map_value and will enforce that the correct lock is held when
|
||||
verifying BPF programs that manipulate the tree. Since this lock checking
|
||||
happens at verification time, there is no runtime penalty.
|
||||
|
||||
Non-owning references
|
||||
---------------------
|
||||
|
||||
**Motivation**
|
||||
|
||||
Consider the following BPF code:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct node_data *n = bpf_obj_new(typeof(*n)); /* ACQUIRED */
|
||||
|
||||
bpf_spin_lock(&lock);
|
||||
|
||||
bpf_rbtree_add(&tree, n); /* PASSED */
|
||||
|
||||
bpf_spin_unlock(&lock);
|
||||
|
||||
From the verifier's perspective, the pointer ``n`` returned from ``bpf_obj_new``
|
||||
has type ``PTR_TO_BTF_ID | MEM_ALLOC``, with a ``btf_id`` of
|
||||
``struct node_data`` and a nonzero ``ref_obj_id``. Because it holds ``n``, the
|
||||
program has ownership of the pointee's (object pointed to by ``n``) lifetime.
|
||||
The BPF program must pass off ownership before exiting - either via
|
||||
``bpf_obj_drop``, which ``free``'s the object, or by adding it to ``tree`` with
|
||||
``bpf_rbtree_add``.
|
||||
|
||||
(``ACQUIRED`` and ``PASSED`` comments in the example denote statements where
|
||||
"ownership is acquired" and "ownership is passed", respectively)
|
||||
|
||||
What should the verifier do with ``n`` after ownership is passed off? If the
|
||||
object was ``free``'d with ``bpf_obj_drop`` the answer is obvious: the verifier
|
||||
should reject programs which attempt to access ``n`` after ``bpf_obj_drop`` as
|
||||
the object is no longer valid. The underlying memory may have been reused for
|
||||
some other allocation, unmapped, etc.
|
||||
|
||||
When ownership is passed to ``tree`` via ``bpf_rbtree_add`` the answer is less
|
||||
obvious. The verifier could enforce the same semantics as for ``bpf_obj_drop``,
|
||||
but that would result in programs with useful, common coding patterns being
|
||||
rejected, e.g.:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int x;
|
||||
struct node_data *n = bpf_obj_new(typeof(*n)); /* ACQUIRED */
|
||||
|
||||
bpf_spin_lock(&lock);
|
||||
|
||||
bpf_rbtree_add(&tree, n); /* PASSED */
|
||||
x = n->data;
|
||||
n->data = 42;
|
||||
|
||||
bpf_spin_unlock(&lock);
|
||||
|
||||
Both the read from and write to ``n->data`` would be rejected. The verifier
|
||||
can do better, though, by taking advantage of two details:
|
||||
|
||||
* Graph data structure APIs can only be used when the ``bpf_spin_lock``
|
||||
associated with the graph root is held
|
||||
|
||||
* Both graph data structures have pointer stability
|
||||
|
||||
* Because graph nodes are allocated with ``bpf_obj_new`` and
|
||||
adding / removing from the root involves fiddling with the
|
||||
``bpf_{list,rb}_node`` field of the node struct, a graph node will
|
||||
remain at the same address after either operation.
|
||||
|
||||
Because the associated ``bpf_spin_lock`` must be held by any program adding
|
||||
or removing, if we're in the critical section bounded by that lock, we know
|
||||
that no other program can add or remove until the end of the critical section.
|
||||
This combined with pointer stability means that, until the critical section
|
||||
ends, we can safely access the graph node through ``n`` even after it was used
|
||||
to pass ownership.
|
||||
|
||||
The verifier considers such a reference a *non-owning reference*. The ref
|
||||
returned by ``bpf_obj_new`` is accordingly considered an *owning reference*.
|
||||
Both terms currently only have meaning in the context of graph nodes and API.
|
||||
|
||||
**Details**
|
||||
|
||||
Let's enumerate the properties of both types of references.
|
||||
|
||||
*owning reference*
|
||||
|
||||
* This reference controls the lifetime of the pointee
|
||||
|
||||
* Ownership of pointee must be 'released' by passing it to some graph API
|
||||
kfunc, or via ``bpf_obj_drop``, which ``free``'s the pointee
|
||||
|
||||
* If not released before program ends, verifier considers program invalid
|
||||
|
||||
* Access to the pointee's memory will not page fault
|
||||
|
||||
*non-owning reference*
|
||||
|
||||
* This reference does not own the pointee
|
||||
|
||||
* It cannot be used to add the graph node to a graph root, nor ``free``'d via
|
||||
``bpf_obj_drop``
|
||||
|
||||
* No explicit control of lifetime, but can infer valid lifetime based on
|
||||
non-owning ref existence (see explanation below)
|
||||
|
||||
* Access to the pointee's memory will not page fault
|
||||
|
||||
From verifier's perspective non-owning references can only exist
|
||||
between spin_lock and spin_unlock. Why? After spin_unlock another program
|
||||
can do arbitrary operations on the data structure like removing and ``free``-ing
|
||||
via bpf_obj_drop. A non-owning ref to some chunk of memory that was remove'd,
|
||||
``free``'d, and reused via bpf_obj_new would point to an entirely different thing.
|
||||
Or the memory could go away.
|
||||
|
||||
To prevent this logic violation all non-owning references are invalidated by the
|
||||
verifier after a critical section ends. This is necessary to ensure the "will
|
||||
not page fault" property of non-owning references. So if the verifier hasn't
|
||||
invalidated a non-owning ref, accessing it will not page fault.
|
||||
|
||||
Currently ``bpf_obj_drop`` is not allowed in the critical section, so
|
||||
if there's a valid non-owning ref, we must be in a critical section, and can
|
||||
conclude that the ref's memory hasn't been dropped-and- ``free``'d or
|
||||
dropped-and-reused.
|
||||
|
||||
Any reference to a node that is in an rbtree _must_ be non-owning, since
|
||||
the tree has control of the pointee's lifetime. Similarly, any ref to a node
|
||||
that isn't in rbtree _must_ be owning. This results in a nice property:
|
||||
graph API add / remove implementations don't need to check if a node
|
||||
has already been added (or already removed), as the ownership model
|
||||
allows the verifier to prevent such a state from being valid by simply checking
|
||||
types.
|
||||
|
||||
However, pointer aliasing poses an issue for the above "nice property".
|
||||
Consider the following example:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct node_data *n, *m, *o, *p;
|
||||
n = bpf_obj_new(typeof(*n)); /* 1 */
|
||||
|
||||
bpf_spin_lock(&lock);
|
||||
|
||||
bpf_rbtree_add(&tree, n); /* 2 */
|
||||
m = bpf_rbtree_first(&tree); /* 3 */
|
||||
|
||||
o = bpf_rbtree_remove(&tree, n); /* 4 */
|
||||
p = bpf_rbtree_remove(&tree, m); /* 5 */
|
||||
|
||||
bpf_spin_unlock(&lock);
|
||||
|
||||
bpf_obj_drop(o);
|
||||
bpf_obj_drop(p); /* 6 */
|
||||
|
||||
Assume the tree is empty before this program runs. If we track verifier state
|
||||
changes here using numbers in above comments:
|
||||
|
||||
1) n is an owning reference
|
||||
|
||||
2) n is a non-owning reference, it's been added to the tree
|
||||
|
||||
3) n and m are non-owning references, they both point to the same node
|
||||
|
||||
4) o is an owning reference, n and m non-owning, all point to same node
|
||||
|
||||
5) o and p are owning, n and m non-owning, all point to the same node
|
||||
|
||||
6) a double-free has occurred, since o and p point to same node and o was
|
||||
``free``'d in previous statement
|
||||
|
||||
States 4 and 5 violate our "nice property", as there are non-owning refs to
|
||||
a node which is not in an rbtree. Statement 5 will try to remove a node which
|
||||
has already been removed as a result of this violation. State 6 is a dangerous
|
||||
double-free.
|
||||
|
||||
At a minimum we should prevent state 6 from being possible. If we can't also
|
||||
prevent state 5 then we must abandon our "nice property" and check whether a
|
||||
node has already been removed at runtime.
|
||||
|
||||
We prevent both by generalizing the "invalidate non-owning references" behavior
|
||||
of ``bpf_spin_unlock`` and doing similar invalidation after
|
||||
``bpf_rbtree_remove``. The logic here being that any graph API kfunc which:
|
||||
|
||||
* takes an arbitrary node argument
|
||||
|
||||
* removes it from the data structure
|
||||
|
||||
* returns an owning reference to the removed node
|
||||
|
||||
May result in a state where some other non-owning reference points to the same
|
||||
node. So ``remove``-type kfuncs must be considered a non-owning reference
|
||||
invalidation point as well.
|
@ -6,4 +6,5 @@ Other
|
||||
:maxdepth: 1
|
||||
|
||||
ringbuf
|
||||
llvm_reloc
|
||||
llvm_reloc
|
||||
graph_ds_impl
|
||||
|
@ -4023,6 +4023,13 @@ L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: tools/testing/selftests/bpf/
|
||||
|
||||
BPF [DOCUMENTATION] (Related to Standardization)
|
||||
R: David Vernet <void@manifault.com>
|
||||
L: bpf@vger.kernel.org
|
||||
L: bpf@ietf.org
|
||||
S: Maintained
|
||||
F: Documentation/bpf/instruction-set.rst
|
||||
|
||||
BPF [MISC]
|
||||
L: bpf@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
|
@ -841,7 +841,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
move_imm(ctx, t1, func_addr, is32);
|
||||
move_addr(ctx, t1, func_addr);
|
||||
emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
|
||||
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
|
||||
break;
|
||||
|
@ -82,6 +82,27 @@ static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, boo
|
||||
emit_insn(ctx, addiw, reg, reg, 0);
|
||||
}
|
||||
|
||||
static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
|
||||
{
|
||||
u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
|
||||
|
||||
/* lu12iw rd, imm_31_12 */
|
||||
imm_31_12 = (addr >> 12) & 0xfffff;
|
||||
emit_insn(ctx, lu12iw, rd, imm_31_12);
|
||||
|
||||
/* ori rd, rd, imm_11_0 */
|
||||
imm_11_0 = addr & 0xfff;
|
||||
emit_insn(ctx, ori, rd, rd, imm_11_0);
|
||||
|
||||
/* lu32id rd, imm_51_32 */
|
||||
imm_51_32 = (addr >> 32) & 0xfffff;
|
||||
emit_insn(ctx, lu32id, rd, imm_51_32);
|
||||
|
||||
/* lu52id rd, rd, imm_63_52 */
|
||||
imm_63_52 = (addr >> 52) & 0xfff;
|
||||
emit_insn(ctx, lu52id, rd, rd, imm_63_52);
|
||||
}
|
||||
|
||||
static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
|
||||
{
|
||||
long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
|
||||
|
@ -7,6 +7,6 @@
|
||||
#define _ASM_RISCV_PATCH_H
|
||||
|
||||
int patch_text_nosync(void *addr, const void *insns, size_t len);
|
||||
int patch_text(void *addr, u32 insn);
|
||||
int patch_text(void *addr, u32 *insns, int ninsns);
|
||||
|
||||
#endif /* _ASM_RISCV_PATCH_H */
|
||||
|
@ -15,7 +15,8 @@
|
||||
|
||||
struct patch_insn {
|
||||
void *addr;
|
||||
u32 insn;
|
||||
u32 *insns;
|
||||
int ninsns;
|
||||
atomic_t cpu_count;
|
||||
};
|
||||
|
||||
@ -102,12 +103,15 @@ NOKPROBE_SYMBOL(patch_text_nosync);
|
||||
static int patch_text_cb(void *data)
|
||||
{
|
||||
struct patch_insn *patch = data;
|
||||
int ret = 0;
|
||||
unsigned long len;
|
||||
int i, ret = 0;
|
||||
|
||||
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
|
||||
ret =
|
||||
patch_text_nosync(patch->addr, &patch->insn,
|
||||
GET_INSN_LENGTH(patch->insn));
|
||||
for (i = 0; ret == 0 && i < patch->ninsns; i++) {
|
||||
len = GET_INSN_LENGTH(patch->insns[i]);
|
||||
ret = patch_text_nosync(patch->addr + i * len,
|
||||
&patch->insns[i], len);
|
||||
}
|
||||
atomic_inc(&patch->cpu_count);
|
||||
} else {
|
||||
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
|
||||
@ -119,11 +123,12 @@ static int patch_text_cb(void *data)
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_text_cb);
|
||||
|
||||
int patch_text(void *addr, u32 insn)
|
||||
int patch_text(void *addr, u32 *insns, int ninsns)
|
||||
{
|
||||
struct patch_insn patch = {
|
||||
.addr = addr,
|
||||
.insn = insn,
|
||||
.insns = insns,
|
||||
.ninsns = ninsns,
|
||||
.cpu_count = ATOMIC_INIT(0),
|
||||
};
|
||||
|
||||
|
@ -23,13 +23,14 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
|
||||
|
||||
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
||||
{
|
||||
u32 insn = __BUG_INSN_32;
|
||||
unsigned long offset = GET_INSN_LENGTH(p->opcode);
|
||||
|
||||
p->ainsn.api.restore = (unsigned long)p->addr + offset;
|
||||
|
||||
patch_text(p->ainsn.api.insn, p->opcode);
|
||||
patch_text(p->ainsn.api.insn, &p->opcode, 1);
|
||||
patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
|
||||
__BUG_INSN_32);
|
||||
&insn, 1);
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_simulate(struct kprobe *p)
|
||||
@ -116,16 +117,16 @@ void *alloc_insn_page(void)
|
||||
/* install breakpoint in text */
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
|
||||
patch_text(p->addr, __BUG_INSN_32);
|
||||
else
|
||||
patch_text(p->addr, __BUG_INSN_16);
|
||||
u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ?
|
||||
__BUG_INSN_32 : __BUG_INSN_16;
|
||||
|
||||
patch_text(p->addr, &insn, 1);
|
||||
}
|
||||
|
||||
/* remove breakpoint from text */
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, p->opcode);
|
||||
patch_text(p->addr, &p->opcode, 1);
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
|
@ -573,6 +573,11 @@ static inline u32 rv_fence(u8 pred, u8 succ)
|
||||
return rv_i_insn(imm11_0, 0, 0, 0, 0xf);
|
||||
}
|
||||
|
||||
static inline u32 rv_nop(void)
|
||||
{
|
||||
return rv_i_insn(0, 0, 0, 0, 0x13);
|
||||
}
|
||||
|
||||
/* RVC instrutions. */
|
||||
|
||||
static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include "bpf_jit.h"
|
||||
|
||||
#define RV_REG_TCC RV_REG_A6
|
||||
@ -238,7 +240,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
|
||||
if (!is_tail_call)
|
||||
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
|
||||
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
|
||||
is_tail_call ? 4 : 0, /* skip TCC init */
|
||||
is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */
|
||||
ctx);
|
||||
}
|
||||
|
||||
@ -428,12 +430,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
|
||||
*rd = RV_REG_T2;
|
||||
}
|
||||
|
||||
static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
|
||||
static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
|
||||
struct rv_jit_context *ctx)
|
||||
{
|
||||
s64 upper, lower;
|
||||
|
||||
if (rvoff && is_21b_int(rvoff) && !force_jalr) {
|
||||
if (rvoff && fixed_addr && is_21b_int(rvoff)) {
|
||||
emit(rv_jal(rd, rvoff >> 1), ctx);
|
||||
return 0;
|
||||
} else if (in_auipc_jalr_range(rvoff)) {
|
||||
@ -454,24 +456,17 @@ static bool is_signed_bpf_cond(u8 cond)
|
||||
cond == BPF_JSGE || cond == BPF_JSLE;
|
||||
}
|
||||
|
||||
static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
|
||||
static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
|
||||
{
|
||||
s64 off = 0;
|
||||
u64 ip;
|
||||
u8 rd;
|
||||
int ret;
|
||||
|
||||
if (addr && ctx->insns) {
|
||||
ip = (u64)(long)(ctx->insns + ctx->ninsns);
|
||||
off = addr - ip;
|
||||
}
|
||||
|
||||
ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
rd = bpf_to_rv_reg(BPF_REG_0, ctx);
|
||||
emit_mv(rd, RV_REG_A0, ctx);
|
||||
return 0;
|
||||
return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
|
||||
}
|
||||
|
||||
static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
|
||||
@ -622,6 +617,401 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen_call_or_nops(void *target, void *ip, u32 *insns)
|
||||
{
|
||||
s64 rvoff;
|
||||
int i, ret;
|
||||
struct rv_jit_context ctx;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
ctx.insns = (u16 *)insns;
|
||||
|
||||
if (!target) {
|
||||
for (i = 0; i < 4; i++)
|
||||
emit(rv_nop(), &ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rvoff = (s64)(target - (ip + 4));
|
||||
emit(rv_sd(RV_REG_SP, -8, RV_REG_RA), &ctx);
|
||||
ret = emit_jump_and_link(RV_REG_RA, rvoff, false, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
emit(rv_ld(RV_REG_RA, -8, RV_REG_SP), &ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen_jump_or_nops(void *target, void *ip, u32 *insns)
|
||||
{
|
||||
s64 rvoff;
|
||||
struct rv_jit_context ctx;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
ctx.insns = (u16 *)insns;
|
||||
|
||||
if (!target) {
|
||||
emit(rv_nop(), &ctx);
|
||||
emit(rv_nop(), &ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rvoff = (s64)(target - ip);
|
||||
return emit_jump_and_link(RV_REG_ZERO, rvoff, false, &ctx);
|
||||
}
|
||||
|
||||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
|
||||
void *old_addr, void *new_addr)
|
||||
{
|
||||
u32 old_insns[4], new_insns[4];
|
||||
bool is_call = poke_type == BPF_MOD_CALL;
|
||||
int (*gen_insns)(void *target, void *ip, u32 *insns);
|
||||
int ninsns = is_call ? 4 : 2;
|
||||
int ret;
|
||||
|
||||
if (!is_bpf_text_address((unsigned long)ip))
|
||||
return -ENOTSUPP;
|
||||
|
||||
gen_insns = is_call ? gen_call_or_nops : gen_jump_or_nops;
|
||||
|
||||
ret = gen_insns(old_addr, ip, old_insns);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (memcmp(ip, old_insns, ninsns * 4))
|
||||
return -EFAULT;
|
||||
|
||||
ret = gen_insns(new_addr, ip, new_insns);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
if (memcmp(ip, new_insns, ninsns * 4))
|
||||
ret = patch_text(ip, new_insns, ninsns);
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void store_args(int nregs, int args_off, struct rv_jit_context *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nregs; i++) {
|
||||
emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx);
|
||||
args_off -= 8;
|
||||
}
|
||||
}
|
||||
|
||||
static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nregs; i++) {
|
||||
emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx);
|
||||
args_off -= 8;
|
||||
}
|
||||
}
|
||||
|
||||
static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
|
||||
int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
|
||||
{
|
||||
int ret, branch_off;
|
||||
struct bpf_prog *p = l->link.prog;
|
||||
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
|
||||
|
||||
if (l->cookie) {
|
||||
emit_imm(RV_REG_T1, l->cookie, ctx);
|
||||
emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx);
|
||||
} else {
|
||||
emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx);
|
||||
}
|
||||
|
||||
/* arg1: prog */
|
||||
emit_imm(RV_REG_A0, (const s64)p, ctx);
|
||||
/* arg2: &run_ctx */
|
||||
emit_addi(RV_REG_A1, RV_REG_FP, -run_ctx_off, ctx);
|
||||
ret = emit_call((const u64)bpf_trampoline_enter(p), true, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* if (__bpf_prog_enter(prog) == 0)
|
||||
* goto skip_exec_of_prog;
|
||||
*/
|
||||
branch_off = ctx->ninsns;
|
||||
/* nop reserved for conditional jump */
|
||||
emit(rv_nop(), ctx);
|
||||
|
||||
/* store prog start time */
|
||||
emit_mv(RV_REG_S1, RV_REG_A0, ctx);
|
||||
|
||||
/* arg1: &args_off */
|
||||
emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
|
||||
if (!p->jited)
|
||||
/* arg2: progs[i]->insnsi for interpreter */
|
||||
emit_imm(RV_REG_A1, (const s64)p->insnsi, ctx);
|
||||
ret = emit_call((const u64)p->bpf_func, true, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (save_ret)
|
||||
emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
|
||||
|
||||
/* update branch with beqz */
|
||||
if (ctx->insns) {
|
||||
int offset = ninsns_rvoff(ctx->ninsns - branch_off);
|
||||
u32 insn = rv_beq(RV_REG_A0, RV_REG_ZERO, offset >> 1);
|
||||
*(u32 *)(ctx->insns + branch_off) = insn;
|
||||
}
|
||||
|
||||
/* arg1: prog */
|
||||
emit_imm(RV_REG_A0, (const s64)p, ctx);
|
||||
/* arg2: prog start time */
|
||||
emit_mv(RV_REG_A1, RV_REG_S1, ctx);
|
||||
/* arg3: &run_ctx */
|
||||
emit_addi(RV_REG_A2, RV_REG_FP, -run_ctx_off, ctx);
|
||||
ret = emit_call((const u64)bpf_trampoline_exit(p), true, ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
const struct btf_func_model *m,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *func_addr, u32 flags,
|
||||
struct rv_jit_context *ctx)
|
||||
{
|
||||
int i, ret, offset;
|
||||
int *branches_off = NULL;
|
||||
int stack_size = 0, nregs = m->nr_args;
|
||||
int retaddr_off, fp_off, retval_off, args_off;
|
||||
int nregs_off, ip_off, run_ctx_off, sreg_off;
|
||||
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
|
||||
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
|
||||
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
|
||||
void *orig_call = func_addr;
|
||||
bool save_ret;
|
||||
u32 insn;
|
||||
|
||||
/* Generated trampoline stack layout:
|
||||
*
|
||||
* FP - 8 [ RA of parent func ] return address of parent
|
||||
* function
|
||||
* FP - retaddr_off [ RA of traced func ] return address of traced
|
||||
* function
|
||||
* FP - fp_off [ FP of parent func ]
|
||||
*
|
||||
* FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
|
||||
* BPF_TRAMP_F_RET_FENTRY_RET
|
||||
* [ argN ]
|
||||
* [ ... ]
|
||||
* FP - args_off [ arg1 ]
|
||||
*
|
||||
* FP - nregs_off [ regs count ]
|
||||
*
|
||||
* FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
|
||||
*
|
||||
* FP - run_ctx_off [ bpf_tramp_run_ctx ]
|
||||
*
|
||||
* FP - sreg_off [ callee saved reg ]
|
||||
*
|
||||
* [ pads ] pads for 16 bytes alignment
|
||||
*/
|
||||
|
||||
if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* extra regiters for struct arguments */
|
||||
for (i = 0; i < m->nr_args; i++)
|
||||
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
|
||||
nregs += round_up(m->arg_size[i], 8) / 8 - 1;
|
||||
|
||||
/* 8 arguments passed by registers */
|
||||
if (nregs > 8)
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* room for parent function return address */
|
||||
stack_size += 8;
|
||||
|
||||
stack_size += 8;
|
||||
retaddr_off = stack_size;
|
||||
|
||||
stack_size += 8;
|
||||
fp_off = stack_size;
|
||||
|
||||
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
|
||||
if (save_ret) {
|
||||
stack_size += 8;
|
||||
retval_off = stack_size;
|
||||
}
|
||||
|
||||
stack_size += nregs * 8;
|
||||
args_off = stack_size;
|
||||
|
||||
stack_size += 8;
|
||||
nregs_off = stack_size;
|
||||
|
||||
if (flags & BPF_TRAMP_F_IP_ARG) {
|
||||
stack_size += 8;
|
||||
ip_off = stack_size;
|
||||
}
|
||||
|
||||
stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
|
||||
run_ctx_off = stack_size;
|
||||
|
||||
stack_size += 8;
|
||||
sreg_off = stack_size;
|
||||
|
||||
stack_size = round_up(stack_size, 16);
|
||||
|
||||
emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx);
|
||||
|
||||
emit_sd(RV_REG_SP, stack_size - retaddr_off, RV_REG_RA, ctx);
|
||||
emit_sd(RV_REG_SP, stack_size - fp_off, RV_REG_FP, ctx);
|
||||
|
||||
emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
|
||||
|
||||
/* callee saved register S1 to pass start time */
|
||||
emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx);
|
||||
|
||||
/* store ip address of the traced function */
|
||||
if (flags & BPF_TRAMP_F_IP_ARG) {
|
||||
emit_imm(RV_REG_T1, (const s64)func_addr, ctx);
|
||||
emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
|
||||
}
|
||||
|
||||
emit_li(RV_REG_T1, nregs, ctx);
|
||||
emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
|
||||
|
||||
store_args(nregs, args_off, ctx);
|
||||
|
||||
/* skip to actual body of traced function */
|
||||
if (flags & BPF_TRAMP_F_SKIP_FRAME)
|
||||
orig_call += 16;
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
emit_imm(RV_REG_A0, (const s64)im, ctx);
|
||||
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < fentry->nr_links; i++) {
|
||||
ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off,
|
||||
flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (fmod_ret->nr_links) {
|
||||
branches_off = kcalloc(fmod_ret->nr_links, sizeof(int), GFP_KERNEL);
|
||||
if (!branches_off)
|
||||
return -ENOMEM;
|
||||
|
||||
/* cleanup to avoid garbage return value confusion */
|
||||
emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx);
|
||||
for (i = 0; i < fmod_ret->nr_links; i++) {
|
||||
ret = invoke_bpf_prog(fmod_ret->links[i], args_off, retval_off,
|
||||
run_ctx_off, true, ctx);
|
||||
if (ret)
|
||||
goto out;
|
||||
emit_ld(RV_REG_T1, -retval_off, RV_REG_FP, ctx);
|
||||
branches_off[i] = ctx->ninsns;
|
||||
/* nop reserved for conditional jump */
|
||||
emit(rv_nop(), ctx);
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
restore_args(nregs, args_off, ctx);
|
||||
ret = emit_call((const u64)orig_call, true, ctx);
|
||||
if (ret)
|
||||
goto out;
|
||||
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
|
||||
im->ip_after_call = ctx->insns + ctx->ninsns;
|
||||
/* 2 nops reserved for auipc+jalr pair */
|
||||
emit(rv_nop(), ctx);
|
||||
emit(rv_nop(), ctx);
|
||||
}
|
||||
|
||||
/* update branches saved in invoke_bpf_mod_ret with bnez */
|
||||
for (i = 0; ctx->insns && i < fmod_ret->nr_links; i++) {
|
||||
offset = ninsns_rvoff(ctx->ninsns - branches_off[i]);
|
||||
insn = rv_bne(RV_REG_T1, RV_REG_ZERO, offset >> 1);
|
||||
*(u32 *)(ctx->insns + branches_off[i]) = insn;
|
||||
}
|
||||
|
||||
for (i = 0; i < fexit->nr_links; i++) {
|
||||
ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off,
|
||||
run_ctx_off, false, ctx);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
im->ip_epilogue = ctx->insns + ctx->ninsns;
|
||||
emit_imm(RV_REG_A0, (const s64)im, ctx);
|
||||
ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_RESTORE_REGS)
|
||||
restore_args(nregs, args_off, ctx);
|
||||
|
||||
if (save_ret)
|
||||
emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
|
||||
|
||||
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
|
||||
|
||||
if (flags & BPF_TRAMP_F_SKIP_FRAME)
|
||||
/* return address of parent function */
|
||||
emit_ld(RV_REG_RA, stack_size - 8, RV_REG_SP, ctx);
|
||||
else
|
||||
/* return address of traced function */
|
||||
emit_ld(RV_REG_RA, stack_size - retaddr_off, RV_REG_SP, ctx);
|
||||
|
||||
emit_ld(RV_REG_FP, stack_size - fp_off, RV_REG_SP, ctx);
|
||||
emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx);
|
||||
|
||||
emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx);
|
||||
|
||||
ret = ctx->ninsns;
|
||||
out:
|
||||
kfree(branches_off);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
|
||||
void *image_end, const struct btf_func_model *m,
|
||||
u32 flags, struct bpf_tramp_links *tlinks,
|
||||
void *func_addr)
|
||||
{
|
||||
int ret;
|
||||
struct rv_jit_context ctx;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
ctx.insns = NULL;
|
||||
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ninsns_rvoff(ret) > (long)image_end - (long)image)
|
||||
return -EFBIG;
|
||||
|
||||
ctx.ninsns = 0;
|
||||
ctx.insns = image;
|
||||
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns);
|
||||
|
||||
return ninsns_rvoff(ret);
|
||||
}
|
||||
|
||||
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
bool extra_pass)
|
||||
{
|
||||
@ -913,7 +1303,7 @@ out_be:
|
||||
/* JUMP off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
rvoff = rv_offset(i, off, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
@ -1032,17 +1422,20 @@ out_be:
|
||||
/* function call */
|
||||
case BPF_JMP | BPF_CALL:
|
||||
{
|
||||
bool fixed;
|
||||
bool fixed_addr;
|
||||
u64 addr;
|
||||
|
||||
mark_call(ctx);
|
||||
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
|
||||
&fixed);
|
||||
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
|
||||
&addr, &fixed_addr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = emit_call(fixed, addr, ctx);
|
||||
|
||||
ret = emit_call(addr, fixed_addr, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
|
||||
break;
|
||||
}
|
||||
/* tail call */
|
||||
@ -1057,7 +1450,7 @@ out_be:
|
||||
break;
|
||||
|
||||
rvoff = epilogue_offset(ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
|
||||
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
@ -1270,7 +1663,7 @@ out_be:
|
||||
|
||||
void bpf_jit_build_prologue(struct rv_jit_context *ctx)
|
||||
{
|
||||
int stack_adjust = 0, store_offset, bpf_stack_adjust;
|
||||
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
|
||||
|
||||
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
|
||||
if (bpf_stack_adjust)
|
||||
@ -1297,6 +1690,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
|
||||
|
||||
store_offset = stack_adjust - 8;
|
||||
|
||||
/* reserve 4 nop insns */
|
||||
for (i = 0; i < 4; i++)
|
||||
emit(rv_nop(), ctx);
|
||||
|
||||
/* First instruction is always setting the tail-call-counter
|
||||
* (TCC) register. This instruction is skipped for tail calls.
|
||||
* Force using a 4-byte (non-compressed) instruction.
|
||||
|
@ -85,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
|
||||
td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
|
||||
ICE_TX_DESC_CMD_RE;
|
||||
|
||||
tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
|
||||
tx_buf->type = ICE_TX_BUF_DUMMY;
|
||||
tx_buf->raw_buf = raw_packet;
|
||||
|
||||
tx_desc->cmd_type_offset_bsz =
|
||||
@ -112,31 +112,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
|
||||
static void
|
||||
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
|
||||
{
|
||||
if (tx_buf->skb) {
|
||||
if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) {
|
||||
devm_kfree(ring->dev, tx_buf->raw_buf);
|
||||
} else if (ice_ring_is_xdp(ring)) {
|
||||
if (ring->xsk_pool)
|
||||
xsk_buff_free(tx_buf->xdp);
|
||||
else
|
||||
page_frag_free(tx_buf->raw_buf);
|
||||
} else {
|
||||
dev_kfree_skb_any(tx_buf->skb);
|
||||
}
|
||||
if (dma_unmap_len(tx_buf, len))
|
||||
dma_unmap_single(ring->dev,
|
||||
dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_len(tx_buf, len),
|
||||
DMA_TO_DEVICE);
|
||||
} else if (dma_unmap_len(tx_buf, len)) {
|
||||
if (dma_unmap_len(tx_buf, len))
|
||||
dma_unmap_page(ring->dev,
|
||||
dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_len(tx_buf, len),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
switch (tx_buf->type) {
|
||||
case ICE_TX_BUF_DUMMY:
|
||||
devm_kfree(ring->dev, tx_buf->raw_buf);
|
||||
break;
|
||||
case ICE_TX_BUF_SKB:
|
||||
dev_kfree_skb_any(tx_buf->skb);
|
||||
break;
|
||||
case ICE_TX_BUF_XDP_TX:
|
||||
page_frag_free(tx_buf->raw_buf);
|
||||
break;
|
||||
case ICE_TX_BUF_XDP_XMIT:
|
||||
xdp_return_frame(tx_buf->xdpf);
|
||||
break;
|
||||
}
|
||||
|
||||
tx_buf->next_to_watch = NULL;
|
||||
tx_buf->skb = NULL;
|
||||
tx_buf->type = ICE_TX_BUF_EMPTY;
|
||||
dma_unmap_len_set(tx_buf, len, 0);
|
||||
/* tx_buf must be completely set up in the transmit path */
|
||||
}
|
||||
@ -269,7 +267,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* clear tx_buf data */
|
||||
tx_buf->skb = NULL;
|
||||
tx_buf->type = ICE_TX_BUF_EMPTY;
|
||||
dma_unmap_len_set(tx_buf, len, 0);
|
||||
|
||||
/* unmap remaining buffers */
|
||||
@ -580,7 +578,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
|
||||
case XDP_TX:
|
||||
if (static_branch_unlikely(&ice_xdp_locking_key))
|
||||
spin_lock(&xdp_ring->tx_lock);
|
||||
ret = __ice_xmit_xdp_ring(xdp, xdp_ring);
|
||||
ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
|
||||
if (static_branch_unlikely(&ice_xdp_locking_key))
|
||||
spin_unlock(&xdp_ring->tx_lock);
|
||||
if (ret == ICE_XDP_CONSUMED)
|
||||
@ -607,6 +605,25 @@ exit:
|
||||
ice_set_rx_bufs_act(xdp, rx_ring, ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xmit_xdp_ring - submit frame to XDP ring for transmission
|
||||
* @xdpf: XDP frame that will be converted to XDP buff
|
||||
* @xdp_ring: XDP ring for transmission
|
||||
*/
|
||||
static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
|
||||
struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
struct xdp_buff xdp;
|
||||
|
||||
xdp.data_hard_start = (void *)xdpf;
|
||||
xdp.data = xdpf->data;
|
||||
xdp.data_end = xdp.data + xdpf->len;
|
||||
xdp.frame_sz = xdpf->frame_sz;
|
||||
xdp.flags = xdpf->flags;
|
||||
|
||||
return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xdp_xmit - submit packets to XDP ring for transmission
|
||||
* @dev: netdev
|
||||
@ -652,7 +669,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
|
||||
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
|
||||
for (i = 0; i < n; i++) {
|
||||
struct xdp_frame *xdpf = frames[i];
|
||||
const struct xdp_frame *xdpf = frames[i];
|
||||
int err;
|
||||
|
||||
err = ice_xmit_xdp_ring(xdpf, xdp_ring);
|
||||
@ -1712,6 +1729,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
tx_buf = &tx_ring->tx_buf[i];
|
||||
tx_buf->type = ICE_TX_BUF_FRAG;
|
||||
}
|
||||
|
||||
/* record SW timestamp if HW timestamp is not available */
|
||||
@ -2358,6 +2376,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
|
||||
/* record the location of the first descriptor for this packet */
|
||||
first = &tx_ring->tx_buf[tx_ring->next_to_use];
|
||||
first->skb = skb;
|
||||
first->type = ICE_TX_BUF_SKB;
|
||||
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
first->gso_segs = 1;
|
||||
first->tx_flags = 0;
|
||||
@ -2530,11 +2549,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
|
||||
dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_len(tx_buf, len),
|
||||
DMA_TO_DEVICE);
|
||||
if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
|
||||
if (tx_buf->type == ICE_TX_BUF_DUMMY)
|
||||
devm_kfree(tx_ring->dev, tx_buf->raw_buf);
|
||||
|
||||
/* clear next_to_watch to prevent false hangs */
|
||||
tx_buf->raw_buf = NULL;
|
||||
tx_buf->type = ICE_TX_BUF_EMPTY;
|
||||
tx_buf->tx_flags = 0;
|
||||
tx_buf->next_to_watch = NULL;
|
||||
dma_unmap_len_set(tx_buf, len, 0);
|
||||
|
@ -121,10 +121,7 @@ static inline int ice_skb_pad(void)
|
||||
#define ICE_TX_FLAGS_TSO BIT(0)
|
||||
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
|
||||
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
|
||||
/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
|
||||
* freed instead of returned like skb packets.
|
||||
*/
|
||||
#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
|
||||
/* Free, was ICE_TX_FLAGS_DUMMY_PKT */
|
||||
#define ICE_TX_FLAGS_TSYN BIT(4)
|
||||
#define ICE_TX_FLAGS_IPV4 BIT(5)
|
||||
#define ICE_TX_FLAGS_IPV6 BIT(6)
|
||||
@ -149,22 +146,44 @@ static inline int ice_skb_pad(void)
|
||||
|
||||
#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
|
||||
|
||||
/**
|
||||
* enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
|
||||
* @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
|
||||
* @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
|
||||
* @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
|
||||
* @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
|
||||
* @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
|
||||
* @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
|
||||
* @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
|
||||
*/
|
||||
enum ice_tx_buf_type {
|
||||
ICE_TX_BUF_EMPTY = 0U,
|
||||
ICE_TX_BUF_DUMMY,
|
||||
ICE_TX_BUF_FRAG,
|
||||
ICE_TX_BUF_SKB,
|
||||
ICE_TX_BUF_XDP_TX,
|
||||
ICE_TX_BUF_XDP_XMIT,
|
||||
ICE_TX_BUF_XSK_TX,
|
||||
};
|
||||
|
||||
struct ice_tx_buf {
|
||||
union {
|
||||
struct ice_tx_desc *next_to_watch;
|
||||
u32 rs_idx;
|
||||
};
|
||||
union {
|
||||
struct sk_buff *skb;
|
||||
void *raw_buf; /* used for XDP */
|
||||
struct xdp_buff *xdp; /* used for XDP_TX ZC */
|
||||
void *raw_buf; /* used for XDP_TX and FDir rules */
|
||||
struct sk_buff *skb; /* used for .ndo_start_xmit() */
|
||||
struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
|
||||
struct xdp_buff *xdp; /* used for XDP_TX ZC */
|
||||
};
|
||||
unsigned int bytecount;
|
||||
union {
|
||||
unsigned int gso_segs;
|
||||
unsigned int nr_frags; /* used for mbuf XDP */
|
||||
unsigned int nr_frags; /* used for mbuf XDP */
|
||||
};
|
||||
u32 tx_flags;
|
||||
u32 type:16; /* &ice_tx_buf_type */
|
||||
u32 tx_flags:16;
|
||||
DEFINE_DMA_UNMAP_LEN(len);
|
||||
DEFINE_DMA_UNMAP_ADDR(dma);
|
||||
};
|
||||
|
@ -222,18 +222,28 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
|
||||
|
||||
/**
|
||||
* ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
|
||||
* @xdp_ring: XDP Tx ring
|
||||
* @dev: device for DMA mapping
|
||||
* @tx_buf: Tx buffer to clean
|
||||
* @bq: XDP bulk flush struct
|
||||
*/
|
||||
static void
|
||||
ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
|
||||
ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
|
||||
struct xdp_frame_bulk *bq)
|
||||
{
|
||||
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buf, len, 0);
|
||||
xdp_ring->xdp_tx_active--;
|
||||
page_frag_free(tx_buf->raw_buf);
|
||||
tx_buf->raw_buf = NULL;
|
||||
|
||||
switch (tx_buf->type) {
|
||||
case ICE_TX_BUF_XDP_TX:
|
||||
page_frag_free(tx_buf->raw_buf);
|
||||
break;
|
||||
case ICE_TX_BUF_XDP_XMIT:
|
||||
xdp_return_frame_bulk(tx_buf->xdpf, bq);
|
||||
break;
|
||||
}
|
||||
|
||||
tx_buf->type = ICE_TX_BUF_EMPTY;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -243,11 +253,13 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
|
||||
static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
int total_bytes = 0, total_pkts = 0;
|
||||
struct device *dev = xdp_ring->dev;
|
||||
u32 ntc = xdp_ring->next_to_clean;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
u32 cnt = xdp_ring->count;
|
||||
struct xdp_frame_bulk bq;
|
||||
u32 frags, xdp_tx = 0;
|
||||
u32 ready_frames = 0;
|
||||
u32 frags;
|
||||
u32 idx;
|
||||
u32 ret;
|
||||
|
||||
@ -261,12 +273,16 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
||||
ready_frames = idx + cnt - ntc + 1;
|
||||
}
|
||||
|
||||
if (!ready_frames)
|
||||
if (unlikely(!ready_frames))
|
||||
return 0;
|
||||
ret = ready_frames;
|
||||
|
||||
xdp_frame_bulk_init(&bq);
|
||||
rcu_read_lock(); /* xdp_return_frame_bulk() */
|
||||
|
||||
while (ready_frames) {
|
||||
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
struct ice_tx_buf *head = tx_buf;
|
||||
|
||||
/* bytecount holds size of head + frags */
|
||||
total_bytes += tx_buf->bytecount;
|
||||
@ -274,11 +290,8 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
||||
total_pkts++;
|
||||
/* count head + frags */
|
||||
ready_frames -= frags + 1;
|
||||
xdp_tx++;
|
||||
|
||||
if (xdp_ring->xsk_pool)
|
||||
xsk_buff_free(tx_buf->xdp);
|
||||
else
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
ntc++;
|
||||
if (ntc == cnt)
|
||||
ntc = 0;
|
||||
@ -286,15 +299,21 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
||||
for (int i = 0; i < frags; i++) {
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
ice_clean_xdp_tx_buf(dev, tx_buf, &bq);
|
||||
ntc++;
|
||||
if (ntc == cnt)
|
||||
ntc = 0;
|
||||
}
|
||||
|
||||
ice_clean_xdp_tx_buf(dev, head, &bq);
|
||||
}
|
||||
|
||||
xdp_flush_frame_bulk(&bq);
|
||||
rcu_read_unlock();
|
||||
|
||||
tx_desc->cmd_type_offset_bsz = 0;
|
||||
xdp_ring->next_to_clean = ntc;
|
||||
xdp_ring->xdp_tx_active -= xdp_tx;
|
||||
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
|
||||
|
||||
return ret;
|
||||
@ -304,8 +323,10 @@ static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
|
||||
* __ice_xmit_xdp_ring - submit frame to XDP ring for transmission
|
||||
* @xdp: XDP buffer to be placed onto Tx descriptors
|
||||
* @xdp_ring: XDP ring for transmission
|
||||
* @frame: whether this comes from .ndo_xdp_xmit()
|
||||
*/
|
||||
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
|
||||
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
|
||||
bool frame)
|
||||
{
|
||||
struct skb_shared_info *sinfo = NULL;
|
||||
u32 size = xdp->data_end - xdp->data;
|
||||
@ -321,17 +342,17 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
|
||||
u32 frag = 0;
|
||||
|
||||
free_space = ICE_DESC_UNUSED(xdp_ring);
|
||||
|
||||
if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring))
|
||||
if (free_space < ICE_RING_QUARTER(xdp_ring))
|
||||
free_space += ice_clean_xdp_irq(xdp_ring);
|
||||
|
||||
if (unlikely(!free_space))
|
||||
goto busy;
|
||||
|
||||
if (unlikely(xdp_buff_has_frags(xdp))) {
|
||||
sinfo = xdp_get_shared_info_from_buff(xdp);
|
||||
nr_frags = sinfo->nr_frags;
|
||||
if (free_space < nr_frags + 1) {
|
||||
xdp_ring->ring_stats->tx_stats.tx_busy++;
|
||||
return ICE_XDP_CONSUMED;
|
||||
}
|
||||
if (free_space < nr_frags + 1)
|
||||
goto busy;
|
||||
}
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
|
||||
@ -349,9 +370,15 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
|
||||
dma_unmap_len_set(tx_buf, len, size);
|
||||
dma_unmap_addr_set(tx_buf, dma, dma);
|
||||
|
||||
if (frame) {
|
||||
tx_buf->type = ICE_TX_BUF_FRAG;
|
||||
} else {
|
||||
tx_buf->type = ICE_TX_BUF_XDP_TX;
|
||||
tx_buf->raw_buf = data;
|
||||
}
|
||||
|
||||
tx_desc->buf_addr = cpu_to_le64(dma);
|
||||
tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
|
||||
tx_buf->raw_buf = data;
|
||||
|
||||
ntu++;
|
||||
if (ntu == cnt)
|
||||
@ -372,6 +399,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
|
||||
tx_head->bytecount = xdp_get_buff_len(xdp);
|
||||
tx_head->nr_frags = nr_frags;
|
||||
|
||||
if (frame) {
|
||||
tx_head->type = ICE_TX_BUF_XDP_XMIT;
|
||||
tx_head->xdpf = xdp->data_hard_start;
|
||||
}
|
||||
|
||||
/* update last descriptor from a frame with EOP */
|
||||
tx_desc->cmd_type_offset_bsz |=
|
||||
cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
|
||||
@ -395,19 +427,11 @@ dma_unmap:
|
||||
ntu--;
|
||||
}
|
||||
return ICE_XDP_CONSUMED;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xmit_xdp_ring - submit frame to XDP ring for transmission
|
||||
* @xdpf: XDP frame that will be converted to XDP buff
|
||||
* @xdp_ring: XDP ring for transmission
|
||||
*/
|
||||
int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring)
|
||||
{
|
||||
struct xdp_buff xdp;
|
||||
busy:
|
||||
xdp_ring->ring_stats->tx_stats.tx_busy++;
|
||||
|
||||
xdp_convert_frame_to_buff(xdpf, &xdp);
|
||||
return __ice_xmit_xdp_ring(&xdp, xdp_ring);
|
||||
return ICE_XDP_CONSUMED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -142,8 +142,8 @@ static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
|
||||
|
||||
void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
|
||||
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
|
||||
int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring);
|
||||
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
|
||||
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
|
||||
bool frame);
|
||||
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
|
||||
void
|
||||
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
|
||||
|
@ -597,21 +597,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
|
||||
return skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
|
||||
* @xdp_ring: XDP Tx ring
|
||||
* @tx_buf: Tx buffer to clean
|
||||
*/
|
||||
static void
|
||||
ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
|
||||
{
|
||||
page_frag_free(tx_buf->raw_buf);
|
||||
xdp_ring->xdp_tx_active--;
|
||||
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buf, len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
|
||||
* @xdp_ring: XDP Tx ring
|
||||
@ -629,8 +614,8 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
|
||||
|
||||
last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
|
||||
if ((tx_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
|
||||
if (tx_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
|
||||
if (last_rs >= ntc)
|
||||
completed_frames = last_rs - ntc + 1;
|
||||
else
|
||||
@ -649,9 +634,10 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
|
||||
for (i = 0; i < completed_frames; i++) {
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
if (tx_buf->raw_buf) {
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
tx_buf->raw_buf = NULL;
|
||||
if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
|
||||
tx_buf->type = ICE_TX_BUF_EMPTY;
|
||||
xsk_buff_free(tx_buf->xdp);
|
||||
xdp_ring->xdp_tx_active--;
|
||||
} else {
|
||||
xsk_frames++;
|
||||
}
|
||||
@ -703,6 +689,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
|
||||
|
||||
tx_buf = &xdp_ring->tx_buf[ntu];
|
||||
tx_buf->xdp = xdp;
|
||||
tx_buf->type = ICE_TX_BUF_XSK_TX;
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
|
||||
tx_desc->buf_addr = cpu_to_le64(dma);
|
||||
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
|
||||
@ -1101,12 +1088,12 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
|
||||
while (ntc != ntu) {
|
||||
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
if (tx_buf->xdp)
|
||||
if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
|
||||
tx_buf->type = ICE_TX_BUF_EMPTY;
|
||||
xsk_buff_free(tx_buf->xdp);
|
||||
else
|
||||
} else {
|
||||
xsk_frames++;
|
||||
|
||||
tx_buf->raw_buf = NULL;
|
||||
}
|
||||
|
||||
ntc++;
|
||||
if (ntc >= xdp_ring->count)
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/btf.h>
|
||||
#include <linux/rcupdate_trace.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/memcontrol.h>
|
||||
|
||||
struct bpf_verifier_env;
|
||||
struct bpf_verifier_log;
|
||||
@ -180,6 +181,10 @@ enum btf_field_type {
|
||||
BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
|
||||
BPF_LIST_HEAD = (1 << 4),
|
||||
BPF_LIST_NODE = (1 << 5),
|
||||
BPF_RB_ROOT = (1 << 6),
|
||||
BPF_RB_NODE = (1 << 7),
|
||||
BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
|
||||
BPF_RB_NODE | BPF_RB_ROOT,
|
||||
};
|
||||
|
||||
struct btf_field_kptr {
|
||||
@ -283,6 +288,10 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
|
||||
return "bpf_list_head";
|
||||
case BPF_LIST_NODE:
|
||||
return "bpf_list_node";
|
||||
case BPF_RB_ROOT:
|
||||
return "bpf_rb_root";
|
||||
case BPF_RB_NODE:
|
||||
return "bpf_rb_node";
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return "unknown";
|
||||
@ -303,6 +312,10 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
|
||||
return sizeof(struct bpf_list_head);
|
||||
case BPF_LIST_NODE:
|
||||
return sizeof(struct bpf_list_node);
|
||||
case BPF_RB_ROOT:
|
||||
return sizeof(struct bpf_rb_root);
|
||||
case BPF_RB_NODE:
|
||||
return sizeof(struct bpf_rb_node);
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
@ -323,6 +336,10 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
|
||||
return __alignof__(struct bpf_list_head);
|
||||
case BPF_LIST_NODE:
|
||||
return __alignof__(struct bpf_list_node);
|
||||
case BPF_RB_ROOT:
|
||||
return __alignof__(struct bpf_rb_root);
|
||||
case BPF_RB_NODE:
|
||||
return __alignof__(struct bpf_rb_node);
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
@ -346,6 +363,13 @@ static inline void bpf_obj_init(const struct btf_field_offs *foffs, void *obj)
|
||||
memset(obj + foffs->field_off[i], 0, foffs->field_sz[i]);
|
||||
}
|
||||
|
||||
/* 'dst' must be a temporary buffer and should not point to memory that is being
|
||||
* used in parallel by a bpf program or bpf syscall, otherwise the access from
|
||||
* the bpf program or bpf syscall may be corrupted by the reinitialization,
|
||||
* leading to weird problems. Even 'dst' is newly-allocated from bpf memory
|
||||
* allocator, it is still possible for 'dst' to be used in parallel by a bpf
|
||||
* program or bpf syscall.
|
||||
*/
|
||||
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
|
||||
{
|
||||
bpf_obj_init(map->field_offs, dst);
|
||||
@ -433,6 +457,9 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
|
||||
void bpf_timer_cancel_and_free(void *timer);
|
||||
void bpf_list_head_free(const struct btf_field *field, void *list_head,
|
||||
struct bpf_spin_lock *spin_lock);
|
||||
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
|
||||
struct bpf_spin_lock *spin_lock);
|
||||
|
||||
|
||||
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
|
||||
|
||||
@ -575,6 +602,11 @@ enum bpf_type_flag {
|
||||
/* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
|
||||
MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
/* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
|
||||
* Currently only valid for linked-list and rbtree nodes.
|
||||
*/
|
||||
NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_FLAG_MAX,
|
||||
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
|
||||
};
|
||||
@ -1886,6 +1918,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
|
||||
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
|
||||
int node);
|
||||
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
|
||||
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
|
||||
gfp_t flags);
|
||||
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
|
||||
size_t align, gfp_t flags);
|
||||
#else
|
||||
@ -1902,6 +1936,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
|
||||
return kzalloc(size, flags);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
|
||||
{
|
||||
return kvcalloc(n, size, flags);
|
||||
}
|
||||
|
||||
static inline void __percpu *
|
||||
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
|
||||
gfp_t flags)
|
||||
@ -2925,4 +2965,11 @@ static inline bool type_is_alloc(u32 type)
|
||||
return type & MEM_ALLOC;
|
||||
}
|
||||
|
||||
static inline gfp_t bpf_memcg_flags(gfp_t flags)
|
||||
{
|
||||
if (memcg_bpf_enabled())
|
||||
return flags | __GFP_ACCOUNT;
|
||||
return flags;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
@ -43,6 +43,22 @@ enum bpf_reg_liveness {
|
||||
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
|
||||
};
|
||||
|
||||
/* For every reg representing a map value or allocated object pointer,
|
||||
* we consider the tuple of (ptr, id) for them to be unique in verifier
|
||||
* context and conside them to not alias each other for the purposes of
|
||||
* tracking lock state.
|
||||
*/
|
||||
struct bpf_active_lock {
|
||||
/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
|
||||
* there's no active lock held, and other fields have no
|
||||
* meaning. If non-NULL, it indicates that a lock is held and
|
||||
* id member has the reg->id of the register which can be >= 0.
|
||||
*/
|
||||
void *ptr;
|
||||
/* This will be reg->id */
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct bpf_reg_state {
|
||||
/* Ordering of fields matters. See states_equal() */
|
||||
enum bpf_reg_type type;
|
||||
@ -226,11 +242,6 @@ struct bpf_reference_state {
|
||||
* exiting a callback function.
|
||||
*/
|
||||
int callback_ref;
|
||||
/* Mark the reference state to release the registers sharing the same id
|
||||
* on bpf_spin_unlock (for nodes that we will lose ownership to but are
|
||||
* safe to access inside the critical section).
|
||||
*/
|
||||
bool release_on_unlock;
|
||||
};
|
||||
|
||||
/* state of the program:
|
||||
@ -331,21 +342,8 @@ struct bpf_verifier_state {
|
||||
u32 branches;
|
||||
u32 insn_idx;
|
||||
u32 curframe;
|
||||
/* For every reg representing a map value or allocated object pointer,
|
||||
* we consider the tuple of (ptr, id) for them to be unique in verifier
|
||||
* context and conside them to not alias each other for the purposes of
|
||||
* tracking lock state.
|
||||
*/
|
||||
struct {
|
||||
/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
|
||||
* there's no active lock held, and other fields have no
|
||||
* meaning. If non-NULL, it indicates that a lock is held and
|
||||
* id member has the reg->id of the register which can be >= 0.
|
||||
*/
|
||||
void *ptr;
|
||||
/* This will be reg->id */
|
||||
u32 id;
|
||||
} active_lock;
|
||||
|
||||
struct bpf_active_lock active_lock;
|
||||
bool speculative;
|
||||
bool active_rcu_lock;
|
||||
|
||||
|
@ -1754,6 +1754,12 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
|
||||
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
|
||||
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
|
||||
|
||||
extern struct static_key_false memcg_bpf_enabled_key;
|
||||
static inline bool memcg_bpf_enabled(void)
|
||||
{
|
||||
return static_branch_likely(&memcg_bpf_enabled_key);
|
||||
}
|
||||
|
||||
extern struct static_key_false memcg_kmem_enabled_key;
|
||||
|
||||
static inline bool memcg_kmem_enabled(void)
|
||||
@ -1832,6 +1838,11 @@ static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool memcg_bpf_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool memcg_kmem_enabled(void)
|
||||
{
|
||||
return false;
|
||||
|
@ -3134,6 +3134,11 @@ union bpf_attr {
|
||||
* **BPF_FIB_LOOKUP_OUTPUT**
|
||||
* Perform lookup from an egress perspective (default is
|
||||
* ingress).
|
||||
* **BPF_FIB_LOOKUP_SKIP_NEIGH**
|
||||
* Skip the neighbour table lookup. *params*->dmac
|
||||
* and *params*->smac will not be set as output. A common
|
||||
* use case is to call **bpf_redirect_neigh**\ () after
|
||||
* doing **bpf_fib_lookup**\ ().
|
||||
*
|
||||
* *ctx* is either **struct xdp_md** for XDP programs or
|
||||
* **struct sk_buff** tc cls_act programs.
|
||||
@ -6750,6 +6755,7 @@ struct bpf_raw_tracepoint_args {
|
||||
enum {
|
||||
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
|
||||
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
|
||||
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -6917,6 +6923,17 @@ struct bpf_list_node {
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_rb_root {
|
||||
__u64 :64;
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_rb_node {
|
||||
__u64 :64;
|
||||
__u64 :64;
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_sysctl {
|
||||
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
|
||||
* Allows 1,2,4-byte read, but no write.
|
||||
|
@ -568,8 +568,8 @@ static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_att
|
||||
nbuckets = max_t(u32, 2, nbuckets);
|
||||
smap->bucket_log = ilog2(nbuckets);
|
||||
|
||||
smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
|
||||
GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
|
||||
smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
|
||||
nbuckets, GFP_USER | __GFP_NOWARN);
|
||||
if (!smap->buckets) {
|
||||
bpf_map_area_free(smap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
199
kernel/bpf/btf.c
199
kernel/bpf/btf.c
@ -3324,12 +3324,14 @@ static const char *btf_find_decl_tag_value(const struct btf *btf,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
|
||||
const struct btf_type *t, int comp_idx,
|
||||
u32 off, int sz, struct btf_field_info *info)
|
||||
static int
|
||||
btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
|
||||
const struct btf_type *t, int comp_idx, u32 off,
|
||||
int sz, struct btf_field_info *info,
|
||||
enum btf_field_type head_type)
|
||||
{
|
||||
const char *node_field_name;
|
||||
const char *value_type;
|
||||
const char *list_node;
|
||||
s32 id;
|
||||
|
||||
if (!__btf_type_is_struct(t))
|
||||
@ -3339,26 +3341,32 @@ static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
|
||||
value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
|
||||
if (!value_type)
|
||||
return -EINVAL;
|
||||
list_node = strstr(value_type, ":");
|
||||
if (!list_node)
|
||||
node_field_name = strstr(value_type, ":");
|
||||
if (!node_field_name)
|
||||
return -EINVAL;
|
||||
value_type = kstrndup(value_type, list_node - value_type, GFP_KERNEL | __GFP_NOWARN);
|
||||
value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!value_type)
|
||||
return -ENOMEM;
|
||||
id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
|
||||
kfree(value_type);
|
||||
if (id < 0)
|
||||
return id;
|
||||
list_node++;
|
||||
if (str_is_empty(list_node))
|
||||
node_field_name++;
|
||||
if (str_is_empty(node_field_name))
|
||||
return -EINVAL;
|
||||
info->type = BPF_LIST_HEAD;
|
||||
info->type = head_type;
|
||||
info->off = off;
|
||||
info->graph_root.value_btf_id = id;
|
||||
info->graph_root.node_name = list_node;
|
||||
info->graph_root.node_name = node_field_name;
|
||||
return BTF_FIELD_FOUND;
|
||||
}
|
||||
|
||||
#define field_mask_test_name(field_type, field_type_str) \
|
||||
if (field_mask & field_type && !strcmp(name, field_type_str)) { \
|
||||
type = field_type; \
|
||||
goto end; \
|
||||
}
|
||||
|
||||
static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
|
||||
int *align, int *sz)
|
||||
{
|
||||
@ -3382,18 +3390,11 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
if (field_mask & BPF_LIST_HEAD) {
|
||||
if (!strcmp(name, "bpf_list_head")) {
|
||||
type = BPF_LIST_HEAD;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
if (field_mask & BPF_LIST_NODE) {
|
||||
if (!strcmp(name, "bpf_list_node")) {
|
||||
type = BPF_LIST_NODE;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
|
||||
field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
|
||||
field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root");
|
||||
field_mask_test_name(BPF_RB_NODE, "bpf_rb_node");
|
||||
|
||||
/* Only return BPF_KPTR when all other types with matchable names fail */
|
||||
if (field_mask & BPF_KPTR) {
|
||||
type = BPF_KPTR_REF;
|
||||
@ -3406,6 +3407,8 @@ end:
|
||||
return type;
|
||||
}
|
||||
|
||||
#undef field_mask_test_name
|
||||
|
||||
static int btf_find_struct_field(const struct btf *btf,
|
||||
const struct btf_type *t, u32 field_mask,
|
||||
struct btf_field_info *info, int info_cnt)
|
||||
@ -3438,6 +3441,7 @@ static int btf_find_struct_field(const struct btf *btf,
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
case BPF_LIST_NODE:
|
||||
case BPF_RB_NODE:
|
||||
ret = btf_find_struct(btf, member_type, off, sz, field_type,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
@ -3451,8 +3455,11 @@ static int btf_find_struct_field(const struct btf *btf,
|
||||
return ret;
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
ret = btf_find_list_head(btf, t, member_type, i, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
case BPF_RB_ROOT:
|
||||
ret = btf_find_graph_root(btf, t, member_type,
|
||||
i, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp,
|
||||
field_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
@ -3499,6 +3506,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
case BPF_LIST_NODE:
|
||||
case BPF_RB_NODE:
|
||||
ret = btf_find_struct(btf, var_type, off, sz, field_type,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
@ -3512,8 +3520,11 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
|
||||
return ret;
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
ret = btf_find_list_head(btf, var, var_type, -1, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
case BPF_RB_ROOT:
|
||||
ret = btf_find_graph_root(btf, var, var_type,
|
||||
-1, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp,
|
||||
field_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
@ -3615,8 +3626,11 @@ end_btf:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
|
||||
struct btf_field_info *info)
|
||||
static int btf_parse_graph_root(const struct btf *btf,
|
||||
struct btf_field *field,
|
||||
struct btf_field_info *info,
|
||||
const char *node_type_name,
|
||||
size_t node_type_align)
|
||||
{
|
||||
const struct btf_type *t, *n = NULL;
|
||||
const struct btf_member *member;
|
||||
@ -3638,13 +3652,13 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
|
||||
n = btf_type_by_id(btf, member->type);
|
||||
if (!__btf_type_is_struct(n))
|
||||
return -EINVAL;
|
||||
if (strcmp("bpf_list_node", __btf_name_by_offset(btf, n->name_off)))
|
||||
if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
|
||||
return -EINVAL;
|
||||
offset = __btf_member_bit_offset(n, member);
|
||||
if (offset % 8)
|
||||
return -EINVAL;
|
||||
offset /= 8;
|
||||
if (offset % __alignof__(struct bpf_list_node))
|
||||
if (offset % node_type_align)
|
||||
return -EINVAL;
|
||||
|
||||
field->graph_root.btf = (struct btf *)btf;
|
||||
@ -3656,6 +3670,20 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
|
||||
struct btf_field_info *info)
|
||||
{
|
||||
return btf_parse_graph_root(btf, field, info, "bpf_list_node",
|
||||
__alignof__(struct bpf_list_node));
|
||||
}
|
||||
|
||||
static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
|
||||
struct btf_field_info *info)
|
||||
{
|
||||
return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
|
||||
__alignof__(struct bpf_rb_node));
|
||||
}
|
||||
|
||||
struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
|
||||
u32 field_mask, u32 value_size)
|
||||
{
|
||||
@ -3718,7 +3746,13 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
break;
|
||||
case BPF_RB_ROOT:
|
||||
ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
break;
|
||||
case BPF_LIST_NODE:
|
||||
case BPF_RB_NODE:
|
||||
break;
|
||||
default:
|
||||
ret = -EFAULT;
|
||||
@ -3727,8 +3761,33 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
|
||||
rec->cnt++;
|
||||
}
|
||||
|
||||
/* bpf_list_head requires bpf_spin_lock */
|
||||
if (btf_record_has_field(rec, BPF_LIST_HEAD) && rec->spin_lock_off < 0) {
|
||||
/* bpf_{list_head, rb_node} require bpf_spin_lock */
|
||||
if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
|
||||
btf_record_has_field(rec, BPF_RB_ROOT)) && rec->spin_lock_off < 0) {
|
||||
ret = -EINVAL;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* need collection identity for non-owning refs before allowing this
|
||||
*
|
||||
* Consider a node type w/ both list and rb_node fields:
|
||||
* struct node {
|
||||
* struct bpf_list_node l;
|
||||
* struct bpf_rb_node r;
|
||||
* }
|
||||
*
|
||||
* Used like so:
|
||||
* struct node *n = bpf_obj_new(....);
|
||||
* bpf_list_push_front(&list_head, &n->l);
|
||||
* bpf_rbtree_remove(&rb_root, &n->r);
|
||||
*
|
||||
* It should not be possible to rbtree_remove the node since it hasn't
|
||||
* been added to a tree. But push_front converts n to a non-owning
|
||||
* reference, and rbtree_remove accepts the non-owning reference to
|
||||
* a type w/ bpf_rb_node field.
|
||||
*/
|
||||
if (btf_record_has_field(rec, BPF_LIST_NODE) &&
|
||||
btf_record_has_field(rec, BPF_RB_NODE)) {
|
||||
ret = -EINVAL;
|
||||
goto end;
|
||||
}
|
||||
@ -3739,22 +3798,28 @@ end:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
#define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT)
|
||||
#define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE)
|
||||
|
||||
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* There are two owning types, kptr_ref and bpf_list_head. The former
|
||||
* only supports storing kernel types, which can never store references
|
||||
* to program allocated local types, atleast not yet. Hence we only need
|
||||
* to ensure that bpf_list_head ownership does not form cycles.
|
||||
/* There are three types that signify ownership of some other type:
|
||||
* kptr_ref, bpf_list_head, bpf_rb_root.
|
||||
* kptr_ref only supports storing kernel types, which can't store
|
||||
* references to program allocated local types.
|
||||
*
|
||||
* Hence we only need to ensure that bpf_{list_head,rb_root} ownership
|
||||
* does not form cycles.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_LIST_HEAD))
|
||||
if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & GRAPH_ROOT_MASK))
|
||||
return 0;
|
||||
for (i = 0; i < rec->cnt; i++) {
|
||||
struct btf_struct_meta *meta;
|
||||
u32 btf_id;
|
||||
|
||||
if (!(rec->fields[i].type & BPF_LIST_HEAD))
|
||||
if (!(rec->fields[i].type & GRAPH_ROOT_MASK))
|
||||
continue;
|
||||
btf_id = rec->fields[i].graph_root.value_btf_id;
|
||||
meta = btf_find_struct_meta(btf, btf_id);
|
||||
@ -3762,39 +3827,47 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
|
||||
return -EFAULT;
|
||||
rec->fields[i].graph_root.value_rec = meta->record;
|
||||
|
||||
if (!(rec->field_mask & BPF_LIST_NODE))
|
||||
/* We need to set value_rec for all root types, but no need
|
||||
* to check ownership cycle for a type unless it's also a
|
||||
* node type.
|
||||
*/
|
||||
if (!(rec->field_mask & GRAPH_NODE_MASK))
|
||||
continue;
|
||||
|
||||
/* We need to ensure ownership acyclicity among all types. The
|
||||
* proper way to do it would be to topologically sort all BTF
|
||||
* IDs based on the ownership edges, since there can be multiple
|
||||
* bpf_list_head in a type. Instead, we use the following
|
||||
* reasoning:
|
||||
* bpf_{list_head,rb_node} in a type. Instead, we use the
|
||||
* following resaoning:
|
||||
*
|
||||
* - A type can only be owned by another type in user BTF if it
|
||||
* has a bpf_list_node.
|
||||
* has a bpf_{list,rb}_node. Let's call these node types.
|
||||
* - A type can only _own_ another type in user BTF if it has a
|
||||
* bpf_list_head.
|
||||
* bpf_{list_head,rb_root}. Let's call these root types.
|
||||
*
|
||||
* We ensure that if a type has both bpf_list_head and
|
||||
* bpf_list_node, its element types cannot be owning types.
|
||||
* We ensure that if a type is both a root and node, its
|
||||
* element types cannot be root types.
|
||||
*
|
||||
* To ensure acyclicity:
|
||||
*
|
||||
* When A only has bpf_list_head, ownership chain can be:
|
||||
* When A is an root type but not a node, its ownership
|
||||
* chain can be:
|
||||
* A -> B -> C
|
||||
* Where:
|
||||
* - B has both bpf_list_head and bpf_list_node.
|
||||
* - C only has bpf_list_node.
|
||||
* - A is an root, e.g. has bpf_rb_root.
|
||||
* - B is both a root and node, e.g. has bpf_rb_node and
|
||||
* bpf_list_head.
|
||||
* - C is only an root, e.g. has bpf_list_node
|
||||
*
|
||||
* When A has both bpf_list_head and bpf_list_node, some other
|
||||
* type already owns it in the BTF domain, hence it can not own
|
||||
* another owning type through any of the bpf_list_head edges.
|
||||
* When A is both a root and node, some other type already
|
||||
* owns it in the BTF domain, hence it can not own
|
||||
* another root type through any of the ownership edges.
|
||||
* A -> B
|
||||
* Where:
|
||||
* - B only has bpf_list_node.
|
||||
* - A is both an root and node.
|
||||
* - B is only an node.
|
||||
*/
|
||||
if (meta->record->field_mask & BPF_LIST_HEAD)
|
||||
if (meta->record->field_mask & GRAPH_ROOT_MASK)
|
||||
return -ELOOP;
|
||||
}
|
||||
return 0;
|
||||
@ -5256,6 +5329,8 @@ static const char *alloc_obj_fields[] = {
|
||||
"bpf_spin_lock",
|
||||
"bpf_list_head",
|
||||
"bpf_list_node",
|
||||
"bpf_rb_root",
|
||||
"bpf_rb_node",
|
||||
};
|
||||
|
||||
static struct btf_struct_metas *
|
||||
@ -5329,7 +5404,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
|
||||
|
||||
type = &tab->types[tab->cnt];
|
||||
type->btf_id = i;
|
||||
record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE, t->size);
|
||||
record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
|
||||
BPF_RB_ROOT | BPF_RB_NODE, t->size);
|
||||
/* The record cannot be unset, treat it as an error if so */
|
||||
if (IS_ERR_OR_NULL(record)) {
|
||||
ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
|
||||
@ -5593,6 +5669,7 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
if (!ctx_struct)
|
||||
/* should not happen */
|
||||
return NULL;
|
||||
again:
|
||||
ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
|
||||
if (!ctx_tname) {
|
||||
/* should not happen */
|
||||
@ -5606,8 +5683,16 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
* int socket_filter_bpf_prog(struct __sk_buff *skb)
|
||||
* { // no fields of skb are ever used }
|
||||
*/
|
||||
if (strcmp(ctx_tname, tname))
|
||||
return NULL;
|
||||
if (strcmp(ctx_tname, tname)) {
|
||||
/* bpf_user_pt_regs_t is a typedef, so resolve it to
|
||||
* underlying struct and check name again
|
||||
*/
|
||||
if (!btf_type_is_modifier(ctx_struct))
|
||||
return NULL;
|
||||
while (btf_type_is_modifier(ctx_struct))
|
||||
ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type);
|
||||
goto again;
|
||||
}
|
||||
return ctx_type;
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
#include <linux/memcontrol.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/unaligned.h>
|
||||
@ -87,7 +88,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
|
||||
|
||||
struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
|
||||
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
|
||||
struct bpf_prog_aux *aux;
|
||||
struct bpf_prog *fp;
|
||||
|
||||
@ -96,12 +97,12 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||
if (fp == NULL)
|
||||
return NULL;
|
||||
|
||||
aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
|
||||
aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
|
||||
if (aux == NULL) {
|
||||
vfree(fp);
|
||||
return NULL;
|
||||
}
|
||||
fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
|
||||
fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
|
||||
if (!fp->active) {
|
||||
vfree(fp);
|
||||
kfree(aux);
|
||||
@ -126,7 +127,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||
|
||||
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
|
||||
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
|
||||
struct bpf_prog *prog;
|
||||
int cpu;
|
||||
|
||||
@ -159,7 +160,7 @@ int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
|
||||
|
||||
prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
|
||||
sizeof(*prog->aux->jited_linfo),
|
||||
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
|
||||
bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
|
||||
if (!prog->aux->jited_linfo)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -234,7 +235,7 @@ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
|
||||
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||
gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
|
||||
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
|
||||
struct bpf_prog *fp;
|
||||
u32 pages;
|
||||
|
||||
|
@ -1004,8 +1004,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
||||
l_new = ERR_PTR(-ENOMEM);
|
||||
goto dec_count;
|
||||
}
|
||||
check_and_init_map_value(&htab->map,
|
||||
l_new->key + round_up(key_size, 8));
|
||||
}
|
||||
|
||||
memcpy(l_new->key, key, key_size);
|
||||
@ -1592,6 +1590,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
|
||||
else
|
||||
copy_map_value(map, value, l->key +
|
||||
roundup_key_size);
|
||||
/* Zeroing special fields in the temp buffer */
|
||||
check_and_init_map_value(map, value);
|
||||
}
|
||||
|
||||
@ -1792,6 +1791,7 @@ again_nocopy:
|
||||
true);
|
||||
else
|
||||
copy_map_value(map, dst_val, value);
|
||||
/* Zeroing special fields in the temp buffer */
|
||||
check_and_init_map_value(map, dst_val);
|
||||
}
|
||||
if (do_delete) {
|
||||
|
@ -1772,6 +1772,46 @@ unlock:
|
||||
}
|
||||
}
|
||||
|
||||
/* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
|
||||
* 'rb_node *', so field name of rb_node within containing struct is not
|
||||
* needed.
|
||||
*
|
||||
* Since bpf_rb_tree's node type has a corresponding struct btf_field with
|
||||
* graph_root.node_offset, it's not necessary to know field name
|
||||
* or type of node struct
|
||||
*/
|
||||
#define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
|
||||
for (pos = rb_first_postorder(root); \
|
||||
pos && ({ n = rb_next_postorder(pos); 1; }); \
|
||||
pos = n)
|
||||
|
||||
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
|
||||
struct bpf_spin_lock *spin_lock)
|
||||
{
|
||||
struct rb_root_cached orig_root, *root = rb_root;
|
||||
struct rb_node *pos, *n;
|
||||
void *obj;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
|
||||
BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
|
||||
|
||||
__bpf_spin_lock_irqsave(spin_lock);
|
||||
orig_root = *root;
|
||||
*root = RB_ROOT_CACHED;
|
||||
__bpf_spin_unlock_irqrestore(spin_lock);
|
||||
|
||||
bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
|
||||
obj = pos;
|
||||
obj -= field->graph_root.node_offset;
|
||||
|
||||
bpf_obj_free_fields(field->graph_root.value_rec, obj);
|
||||
|
||||
migrate_disable();
|
||||
bpf_mem_free(&bpf_global_ma, obj);
|
||||
migrate_enable();
|
||||
}
|
||||
}
|
||||
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Wmissing-prototypes",
|
||||
"Global functions as their definitions will be in vmlinux BTF");
|
||||
@ -1844,6 +1884,56 @@ __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
|
||||
return __bpf_list_del(head, true);
|
||||
}
|
||||
|
||||
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
|
||||
struct bpf_rb_node *node)
|
||||
{
|
||||
struct rb_root_cached *r = (struct rb_root_cached *)root;
|
||||
struct rb_node *n = (struct rb_node *)node;
|
||||
|
||||
rb_erase_cached(n, r);
|
||||
RB_CLEAR_NODE(n);
|
||||
return (struct bpf_rb_node *)n;
|
||||
}
|
||||
|
||||
/* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
|
||||
* program
|
||||
*/
|
||||
static void __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
|
||||
void *less)
|
||||
{
|
||||
struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
|
||||
bpf_callback_t cb = (bpf_callback_t)less;
|
||||
struct rb_node *parent = NULL;
|
||||
bool leftmost = true;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
|
||||
link = &parent->rb_left;
|
||||
} else {
|
||||
link = &parent->rb_right;
|
||||
leftmost = false;
|
||||
}
|
||||
}
|
||||
|
||||
rb_link_node((struct rb_node *)node, parent, link);
|
||||
rb_insert_color_cached((struct rb_node *)node,
|
||||
(struct rb_root_cached *)root, leftmost);
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
|
||||
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
|
||||
{
|
||||
__bpf_rbtree_add(root, node, (void *)less);
|
||||
}
|
||||
|
||||
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
|
||||
{
|
||||
struct rb_root_cached *r = (struct rb_root_cached *)root;
|
||||
|
||||
return (struct bpf_rb_node *)rb_first_cached(r);
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_task_acquire - Acquire a reference to a task. A task acquired by this
|
||||
* kfunc which is not stored in a map as a kptr, must be released by calling
|
||||
@ -2068,6 +2158,10 @@ BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
|
||||
BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE)
|
||||
BTF_ID_FLAGS(func, bpf_rbtree_add)
|
||||
BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
|
||||
|
@ -143,7 +143,7 @@ static void *__alloc(struct bpf_mem_cache *c, int node)
|
||||
return obj;
|
||||
}
|
||||
|
||||
return kmalloc_node(c->unit_size, flags, node);
|
||||
return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
|
||||
}
|
||||
|
||||
static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
|
||||
@ -395,7 +395,8 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
unit_size = size;
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
objcg = get_obj_cgroup_from_current();
|
||||
if (memcg_bpf_enabled())
|
||||
objcg = get_obj_cgroup_from_current();
|
||||
#endif
|
||||
for_each_possible_cpu(cpu) {
|
||||
c = per_cpu_ptr(pc, cpu);
|
||||
|
@ -309,7 +309,7 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
|
||||
* __GFP_RETRY_MAYFAIL to avoid such situations.
|
||||
*/
|
||||
|
||||
const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
|
||||
gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
|
||||
unsigned int flags = 0;
|
||||
unsigned long align = 1;
|
||||
void *area;
|
||||
@ -418,7 +418,8 @@ static void bpf_map_save_memcg(struct bpf_map *map)
|
||||
* So we have to check map->objcg for being NULL each time it's
|
||||
* being used.
|
||||
*/
|
||||
map->objcg = get_obj_cgroup_from_current();
|
||||
if (memcg_bpf_enabled())
|
||||
map->objcg = get_obj_cgroup_from_current();
|
||||
}
|
||||
|
||||
static void bpf_map_release_memcg(struct bpf_map *map)
|
||||
@ -464,6 +465,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct mem_cgroup *memcg, *old_memcg;
|
||||
void *ptr;
|
||||
|
||||
memcg = bpf_map_get_memcg(map);
|
||||
old_memcg = set_active_memcg(memcg);
|
||||
ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
|
||||
set_active_memcg(old_memcg);
|
||||
mem_cgroup_put(memcg);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
|
||||
size_t align, gfp_t flags)
|
||||
{
|
||||
@ -521,9 +537,6 @@ void btf_record_free(struct btf_record *rec)
|
||||
return;
|
||||
for (i = 0; i < rec->cnt; i++) {
|
||||
switch (rec->fields[i].type) {
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
if (rec->fields[i].kptr.module)
|
||||
@ -532,7 +545,11 @@ void btf_record_free(struct btf_record *rec)
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
case BPF_LIST_NODE:
|
||||
/* Nothing to release for bpf_list_head */
|
||||
case BPF_RB_ROOT:
|
||||
case BPF_RB_NODE:
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
/* Nothing to release */
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@ -565,9 +582,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
|
||||
new_rec->cnt = 0;
|
||||
for (i = 0; i < rec->cnt; i++) {
|
||||
switch (fields[i].type) {
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
btf_get(fields[i].kptr.btf);
|
||||
@ -578,7 +592,11 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
case BPF_LIST_NODE:
|
||||
/* Nothing to acquire for bpf_list_head */
|
||||
case BPF_RB_ROOT:
|
||||
case BPF_RB_NODE:
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
/* Nothing to acquire */
|
||||
break;
|
||||
default:
|
||||
ret = -EFAULT;
|
||||
@ -658,7 +676,13 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
|
||||
continue;
|
||||
bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
|
||||
break;
|
||||
case BPF_RB_ROOT:
|
||||
if (WARN_ON_ONCE(rec->spin_lock_off < 0))
|
||||
continue;
|
||||
bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
|
||||
break;
|
||||
case BPF_LIST_NODE:
|
||||
case BPF_RB_NODE:
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@ -994,7 +1018,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
|
||||
return -EINVAL;
|
||||
|
||||
map->record = btf_parse_fields(btf, value_type,
|
||||
BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD,
|
||||
BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
|
||||
BPF_RB_ROOT,
|
||||
map->value_size);
|
||||
if (!IS_ERR_OR_NULL(map->record)) {
|
||||
int i;
|
||||
@ -1042,6 +1067,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
|
||||
}
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
case BPF_RB_ROOT:
|
||||
if (map->map_type != BPF_MAP_TYPE_HASH &&
|
||||
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
|
||||
map->map_type != BPF_MAP_TYPE_ARRAY) {
|
||||
|
@ -190,6 +190,10 @@ struct bpf_verifier_stack_elem {
|
||||
|
||||
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
|
||||
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
|
||||
static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
|
||||
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
|
||||
static int ref_set_non_owning(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg);
|
||||
|
||||
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
|
||||
{
|
||||
@ -457,6 +461,11 @@ static bool type_is_ptr_alloc_obj(u32 type)
|
||||
return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
|
||||
}
|
||||
|
||||
static bool type_is_non_owning_ref(u32 type)
|
||||
{
|
||||
return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
|
||||
}
|
||||
|
||||
static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
|
||||
{
|
||||
struct btf_record *rec = NULL;
|
||||
@ -1073,6 +1082,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
|
||||
verbose_a("id=%d", reg->id);
|
||||
if (reg->ref_obj_id)
|
||||
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
|
||||
if (type_is_non_owning_ref(reg->type))
|
||||
verbose_a("%s", "non_own_ref");
|
||||
if (t != SCALAR_VALUE)
|
||||
verbose_a("off=%d", reg->off);
|
||||
if (type_is_pkt_pointer(t))
|
||||
@ -1632,6 +1643,16 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
|
||||
reg->type &= ~PTR_MAYBE_NULL;
|
||||
}
|
||||
|
||||
static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
|
||||
struct btf_field_graph_root *ds_head)
|
||||
{
|
||||
__mark_reg_known_zero(®s[regno]);
|
||||
regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
|
||||
regs[regno].btf = ds_head->btf;
|
||||
regs[regno].btf_id = ds_head->value_btf_id;
|
||||
regs[regno].off = ds_head->node_offset;
|
||||
}
|
||||
|
||||
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
|
||||
{
|
||||
return type_is_pkt_pointer(reg->type);
|
||||
@ -3452,6 +3473,11 @@ static void save_register_state(struct bpf_func_state *state,
|
||||
scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
|
||||
}
|
||||
|
||||
static bool is_bpf_st_mem(struct bpf_insn *insn)
|
||||
{
|
||||
return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
|
||||
}
|
||||
|
||||
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
|
||||
* stack boundary and alignment are checked in check_mem_access()
|
||||
*/
|
||||
@ -3463,8 +3489,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
{
|
||||
struct bpf_func_state *cur; /* state of the current function */
|
||||
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
|
||||
u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
|
||||
struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
|
||||
struct bpf_reg_state *reg = NULL;
|
||||
u32 dst_reg = insn->dst_reg;
|
||||
|
||||
err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
|
||||
if (err)
|
||||
@ -3517,6 +3544,13 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
return err;
|
||||
}
|
||||
save_register_state(state, spi, reg, size);
|
||||
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
|
||||
insn->imm != 0 && env->bpf_capable) {
|
||||
struct bpf_reg_state fake_reg = {};
|
||||
|
||||
__mark_reg_known(&fake_reg, (u32)insn->imm);
|
||||
fake_reg.type = SCALAR_VALUE;
|
||||
save_register_state(state, spi, &fake_reg, size);
|
||||
} else if (reg && is_spillable_regtype(reg->type)) {
|
||||
/* register containing pointer is being spilled into stack */
|
||||
if (size != BPF_REG_SIZE) {
|
||||
@ -3551,7 +3585,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
|
||||
|
||||
/* when we zero initialize stack slots mark them as such */
|
||||
if (reg && register_is_null(reg)) {
|
||||
if ((reg && register_is_null(reg)) ||
|
||||
(!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
|
||||
/* backtracking doesn't work for STACK_ZERO yet. */
|
||||
err = mark_chain_precision(env, value_regno);
|
||||
if (err)
|
||||
@ -3596,6 +3631,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
|
||||
int min_off, max_off;
|
||||
int i, err;
|
||||
struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
|
||||
struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
|
||||
bool writing_zero = false;
|
||||
/* set if the fact that we're writing a zero is used to let any
|
||||
* stack slots remain STACK_ZERO
|
||||
@ -3608,7 +3644,8 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
|
||||
max_off = ptr_reg->smax_value + off + size;
|
||||
if (value_regno >= 0)
|
||||
value_reg = &cur->regs[value_regno];
|
||||
if (value_reg && register_is_null(value_reg))
|
||||
if ((value_reg && register_is_null(value_reg)) ||
|
||||
(!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
|
||||
writing_zero = true;
|
||||
|
||||
err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
|
||||
@ -5052,7 +5089,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (type_is_alloc(reg->type) && !reg->ref_obj_id) {
|
||||
if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
|
||||
!reg->ref_obj_id) {
|
||||
verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -6042,9 +6080,7 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
|
||||
cur->active_lock.ptr = btf;
|
||||
cur->active_lock.id = reg->id;
|
||||
} else {
|
||||
struct bpf_func_state *fstate = cur_func(env);
|
||||
void *ptr;
|
||||
int i;
|
||||
|
||||
if (map)
|
||||
ptr = map;
|
||||
@ -6060,25 +6096,11 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
|
||||
verbose(env, "bpf_spin_unlock of different lock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
invalidate_non_owning_refs(env);
|
||||
|
||||
cur->active_lock.ptr = NULL;
|
||||
cur->active_lock.id = 0;
|
||||
|
||||
for (i = fstate->acquired_refs - 1; i >= 0; i--) {
|
||||
int err;
|
||||
|
||||
/* Complain on error because this reference state cannot
|
||||
* be freed before this point, as bpf_spin_lock critical
|
||||
* section does not allow functions that release the
|
||||
* allocated object immediately.
|
||||
*/
|
||||
if (!fstate->refs[i].release_on_unlock)
|
||||
continue;
|
||||
err = release_reference(env, fstate->refs[i].id);
|
||||
if (err) {
|
||||
verbose(env, "failed to release release_on_unlock reference");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -6546,6 +6568,23 @@ found:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct btf_field *
|
||||
reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
|
||||
{
|
||||
struct btf_field *field;
|
||||
struct btf_record *rec;
|
||||
|
||||
rec = reg_btf_record(reg);
|
||||
if (!rec)
|
||||
return NULL;
|
||||
|
||||
field = btf_record_find(rec, off, fields);
|
||||
if (!field)
|
||||
return NULL;
|
||||
|
||||
return field;
|
||||
}
|
||||
|
||||
int check_func_arg_reg_off(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno,
|
||||
enum bpf_arg_type arg_type)
|
||||
@ -6567,6 +6606,18 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
|
||||
*/
|
||||
if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
|
||||
return 0;
|
||||
|
||||
if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
|
||||
if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
|
||||
return __check_ptr_off_reg(env, reg, regno, true);
|
||||
|
||||
verbose(env, "R%d must have zero offset when passed to release func\n",
|
||||
regno);
|
||||
verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
|
||||
kernel_type_name(reg->btf, reg->btf_id), reg->off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Doing check_ptr_off_reg check for the offset will catch this
|
||||
* because fixed_off_ok is false, but checking here allows us
|
||||
* to give the user a better error message.
|
||||
@ -6601,6 +6652,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
|
||||
case PTR_TO_BTF_ID | PTR_TRUSTED:
|
||||
case PTR_TO_BTF_ID | MEM_RCU:
|
||||
case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
|
||||
case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
|
||||
/* When referenced PTR_TO_BTF_ID is passed to release function,
|
||||
* its fixed offset must be 0. In the other cases, fixed offset
|
||||
* can be non-zero. This was already checked above. So pass
|
||||
@ -6812,6 +6864,10 @@ skip_type_check:
|
||||
meta->ret_btf_id = reg->btf_id;
|
||||
break;
|
||||
case ARG_PTR_TO_SPIN_LOCK:
|
||||
if (in_rbtree_lock_required_cb(env)) {
|
||||
verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
|
||||
return -EACCES;
|
||||
}
|
||||
if (meta->func_id == BPF_FUNC_spin_lock) {
|
||||
err = process_spin_lock(env, regno, true);
|
||||
if (err)
|
||||
@ -7363,6 +7419,17 @@ static int release_reference(struct bpf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_func_state *unused;
|
||||
struct bpf_reg_state *reg;
|
||||
|
||||
bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
|
||||
if (type_is_non_owning_ref(reg->type))
|
||||
__mark_reg_unknown(env, reg);
|
||||
}));
|
||||
}
|
||||
|
||||
static void clear_caller_saved_regs(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *regs)
|
||||
{
|
||||
@ -7384,6 +7451,8 @@ static int set_callee_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee, int insn_idx);
|
||||
|
||||
static bool is_callback_calling_kfunc(u32 btf_id);
|
||||
|
||||
static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx, int subprog,
|
||||
set_callee_state_fn set_callee_state_cb)
|
||||
@ -7438,10 +7507,18 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
* interested in validating only BPF helpers that can call subprogs as
|
||||
* callbacks
|
||||
*/
|
||||
if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) {
|
||||
verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
if (set_callee_state_cb != set_callee_state) {
|
||||
if (bpf_pseudo_kfunc_call(insn) &&
|
||||
!is_callback_calling_kfunc(insn->imm)) {
|
||||
verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
} else if (!bpf_pseudo_kfunc_call(insn) &&
|
||||
!is_callback_calling_function(insn->imm)) { /* helper */
|
||||
verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
if (insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
@ -7706,6 +7783,63 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee,
|
||||
int insn_idx)
|
||||
{
|
||||
/* void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
|
||||
* bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
|
||||
*
|
||||
* 'struct bpf_rb_node *node' arg to bpf_rbtree_add is the same PTR_TO_BTF_ID w/ offset
|
||||
* that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
|
||||
* by this point, so look at 'root'
|
||||
*/
|
||||
struct btf_field *field;
|
||||
|
||||
field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
|
||||
BPF_RB_ROOT);
|
||||
if (!field || !field->graph_root.value_btf_id)
|
||||
return -EFAULT;
|
||||
|
||||
mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
|
||||
ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
|
||||
mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
|
||||
ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
|
||||
|
||||
__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
|
||||
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
|
||||
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
|
||||
callee->in_callback_fn = true;
|
||||
callee->callback_ret_range = tnum_range(0, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_rbtree_lock_required_kfunc(u32 btf_id);
|
||||
|
||||
/* Are we currently verifying the callback for a rbtree helper that must
|
||||
* be called with lock held? If so, no need to complain about unreleased
|
||||
* lock
|
||||
*/
|
||||
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
struct bpf_func_state *callee;
|
||||
int kfunc_btf_id;
|
||||
|
||||
if (!state->curframe)
|
||||
return false;
|
||||
|
||||
callee = state->frame[state->curframe];
|
||||
|
||||
if (!callee->in_callback_fn)
|
||||
return false;
|
||||
|
||||
kfunc_btf_id = insn[callee->callsite].imm;
|
||||
return is_rbtree_lock_required_kfunc(kfunc_btf_id);
|
||||
}
|
||||
|
||||
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
@ -8474,6 +8608,7 @@ struct bpf_kfunc_call_arg_meta {
|
||||
bool r0_rdonly;
|
||||
u32 ret_btf_id;
|
||||
u64 r0_size;
|
||||
u32 subprogno;
|
||||
struct {
|
||||
u64 value;
|
||||
bool found;
|
||||
@ -8485,6 +8620,9 @@ struct bpf_kfunc_call_arg_meta {
|
||||
struct {
|
||||
struct btf_field *field;
|
||||
} arg_list_head;
|
||||
struct {
|
||||
struct btf_field *field;
|
||||
} arg_rbtree_root;
|
||||
};
|
||||
|
||||
static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
|
||||
@ -8596,12 +8734,16 @@ enum {
|
||||
KF_ARG_DYNPTR_ID,
|
||||
KF_ARG_LIST_HEAD_ID,
|
||||
KF_ARG_LIST_NODE_ID,
|
||||
KF_ARG_RB_ROOT_ID,
|
||||
KF_ARG_RB_NODE_ID,
|
||||
};
|
||||
|
||||
BTF_ID_LIST(kf_arg_btf_ids)
|
||||
BTF_ID(struct, bpf_dynptr_kern)
|
||||
BTF_ID(struct, bpf_list_head)
|
||||
BTF_ID(struct, bpf_list_node)
|
||||
BTF_ID(struct, bpf_rb_root)
|
||||
BTF_ID(struct, bpf_rb_node)
|
||||
|
||||
static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
|
||||
const struct btf_param *arg, int type)
|
||||
@ -8635,6 +8777,28 @@ static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param
|
||||
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
|
||||
}
|
||||
|
||||
static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
|
||||
{
|
||||
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
|
||||
}
|
||||
|
||||
static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
|
||||
{
|
||||
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
|
||||
}
|
||||
|
||||
static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
|
||||
const struct btf_param *arg)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
|
||||
t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
|
||||
if (!t)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
|
||||
static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
|
||||
const struct btf *btf,
|
||||
@ -8694,6 +8858,9 @@ enum kfunc_ptr_arg_type {
|
||||
KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */
|
||||
KF_ARG_PTR_TO_MEM,
|
||||
KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */
|
||||
KF_ARG_PTR_TO_CALLBACK,
|
||||
KF_ARG_PTR_TO_RB_ROOT,
|
||||
KF_ARG_PTR_TO_RB_NODE,
|
||||
};
|
||||
|
||||
enum special_kfunc_type {
|
||||
@ -8707,6 +8874,9 @@ enum special_kfunc_type {
|
||||
KF_bpf_rdonly_cast,
|
||||
KF_bpf_rcu_read_lock,
|
||||
KF_bpf_rcu_read_unlock,
|
||||
KF_bpf_rbtree_remove,
|
||||
KF_bpf_rbtree_add,
|
||||
KF_bpf_rbtree_first,
|
||||
};
|
||||
|
||||
BTF_SET_START(special_kfunc_set)
|
||||
@ -8718,6 +8888,9 @@ BTF_ID(func, bpf_list_pop_front)
|
||||
BTF_ID(func, bpf_list_pop_back)
|
||||
BTF_ID(func, bpf_cast_to_kern_ctx)
|
||||
BTF_ID(func, bpf_rdonly_cast)
|
||||
BTF_ID(func, bpf_rbtree_remove)
|
||||
BTF_ID(func, bpf_rbtree_add)
|
||||
BTF_ID(func, bpf_rbtree_first)
|
||||
BTF_SET_END(special_kfunc_set)
|
||||
|
||||
BTF_ID_LIST(special_kfunc_list)
|
||||
@ -8731,6 +8904,9 @@ BTF_ID(func, bpf_cast_to_kern_ctx)
|
||||
BTF_ID(func, bpf_rdonly_cast)
|
||||
BTF_ID(func, bpf_rcu_read_lock)
|
||||
BTF_ID(func, bpf_rcu_read_unlock)
|
||||
BTF_ID(func, bpf_rbtree_remove)
|
||||
BTF_ID(func, bpf_rbtree_add)
|
||||
BTF_ID(func, bpf_rbtree_first)
|
||||
|
||||
static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
|
||||
{
|
||||
@ -8792,6 +8968,12 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
|
||||
if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
|
||||
return KF_ARG_PTR_TO_LIST_NODE;
|
||||
|
||||
if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
|
||||
return KF_ARG_PTR_TO_RB_ROOT;
|
||||
|
||||
if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
|
||||
return KF_ARG_PTR_TO_RB_NODE;
|
||||
|
||||
if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
|
||||
if (!btf_type_is_struct(ref_t)) {
|
||||
verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
|
||||
@ -8801,6 +8983,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
|
||||
return KF_ARG_PTR_TO_BTF_ID;
|
||||
}
|
||||
|
||||
if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
|
||||
return KF_ARG_PTR_TO_CALLBACK;
|
||||
|
||||
if (argno + 1 < nargs && is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))
|
||||
arg_mem_size = true;
|
||||
|
||||
@ -8915,38 +9100,54 @@ static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
|
||||
static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
|
||||
{
|
||||
struct bpf_func_state *state = cur_func(env);
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
|
||||
if (!state->active_lock.ptr) {
|
||||
verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (type_flag(reg->type) & NON_OWN_REF) {
|
||||
verbose(env, "verifier internal error: NON_OWN_REF already set\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
reg->type |= NON_OWN_REF;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
|
||||
{
|
||||
struct bpf_func_state *state, *unused;
|
||||
struct bpf_reg_state *reg;
|
||||
int i;
|
||||
|
||||
/* bpf_spin_lock only allows calling list_push and list_pop, no BPF
|
||||
* subprogs, no global functions. This means that the references would
|
||||
* not be released inside the critical section but they may be added to
|
||||
* the reference state, and the acquired_refs are never copied out for a
|
||||
* different frame as BPF to BPF calls don't work in bpf_spin_lock
|
||||
* critical sections.
|
||||
*/
|
||||
state = cur_func(env);
|
||||
|
||||
if (!ref_obj_id) {
|
||||
verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
|
||||
verbose(env, "verifier internal error: ref_obj_id is zero for "
|
||||
"owning -> non-owning conversion\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < state->acquired_refs; i++) {
|
||||
if (state->refs[i].id == ref_obj_id) {
|
||||
if (state->refs[i].release_on_unlock) {
|
||||
verbose(env, "verifier internal error: expected false release_on_unlock");
|
||||
return -EFAULT;
|
||||
if (state->refs[i].id != ref_obj_id)
|
||||
continue;
|
||||
|
||||
/* Clear ref_obj_id here so release_reference doesn't clobber
|
||||
* the whole reg
|
||||
*/
|
||||
bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
|
||||
if (reg->ref_obj_id == ref_obj_id) {
|
||||
reg->ref_obj_id = 0;
|
||||
ref_set_non_owning(env, reg);
|
||||
}
|
||||
state->refs[i].release_on_unlock = true;
|
||||
/* Now mark everyone sharing same ref_obj_id as untrusted */
|
||||
bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
|
||||
if (reg->ref_obj_id == ref_obj_id)
|
||||
reg->type |= PTR_UNTRUSTED;
|
||||
}));
|
||||
return 0;
|
||||
}
|
||||
}));
|
||||
return 0;
|
||||
}
|
||||
|
||||
verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -9032,46 +9233,207 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
|
||||
btf_id == special_kfunc_list[KF_bpf_list_pop_back];
|
||||
}
|
||||
|
||||
static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
struct bpf_kfunc_call_arg_meta *meta)
|
||||
static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
|
||||
{
|
||||
return btf_id == special_kfunc_list[KF_bpf_rbtree_add] ||
|
||||
btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
|
||||
btf_id == special_kfunc_list[KF_bpf_rbtree_first];
|
||||
}
|
||||
|
||||
static bool is_bpf_graph_api_kfunc(u32 btf_id)
|
||||
{
|
||||
return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id);
|
||||
}
|
||||
|
||||
static bool is_callback_calling_kfunc(u32 btf_id)
|
||||
{
|
||||
return btf_id == special_kfunc_list[KF_bpf_rbtree_add];
|
||||
}
|
||||
|
||||
static bool is_rbtree_lock_required_kfunc(u32 btf_id)
|
||||
{
|
||||
return is_bpf_rbtree_api_kfunc(btf_id);
|
||||
}
|
||||
|
||||
static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
|
||||
enum btf_field_type head_field_type,
|
||||
u32 kfunc_btf_id)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
switch (head_field_type) {
|
||||
case BPF_LIST_HEAD:
|
||||
ret = is_bpf_list_api_kfunc(kfunc_btf_id);
|
||||
break;
|
||||
case BPF_RB_ROOT:
|
||||
ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
|
||||
break;
|
||||
default:
|
||||
verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
|
||||
btf_field_type_name(head_field_type));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
|
||||
btf_field_type_name(head_field_type));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
|
||||
enum btf_field_type node_field_type,
|
||||
u32 kfunc_btf_id)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
switch (node_field_type) {
|
||||
case BPF_LIST_NODE:
|
||||
ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
|
||||
kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back]);
|
||||
break;
|
||||
case BPF_RB_NODE:
|
||||
ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
|
||||
kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add]);
|
||||
break;
|
||||
default:
|
||||
verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
|
||||
btf_field_type_name(node_field_type));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
|
||||
btf_field_type_name(node_field_type));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
struct bpf_kfunc_call_arg_meta *meta,
|
||||
enum btf_field_type head_field_type,
|
||||
struct btf_field **head_field)
|
||||
{
|
||||
const char *head_type_name;
|
||||
struct btf_field *field;
|
||||
struct btf_record *rec;
|
||||
u32 list_head_off;
|
||||
u32 head_off;
|
||||
|
||||
if (meta->btf != btf_vmlinux || !is_bpf_list_api_kfunc(meta->func_id)) {
|
||||
verbose(env, "verifier internal error: bpf_list_head argument for unknown kfunc\n");
|
||||
if (meta->btf != btf_vmlinux) {
|
||||
verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
|
||||
return -EFAULT;
|
||||
|
||||
head_type_name = btf_field_type_name(head_field_type);
|
||||
if (!tnum_is_const(reg->var_off)) {
|
||||
verbose(env,
|
||||
"R%d doesn't have constant offset. bpf_list_head has to be at the constant offset\n",
|
||||
regno);
|
||||
"R%d doesn't have constant offset. %s has to be at the constant offset\n",
|
||||
regno, head_type_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rec = reg_btf_record(reg);
|
||||
list_head_off = reg->off + reg->var_off.value;
|
||||
field = btf_record_find(rec, list_head_off, BPF_LIST_HEAD);
|
||||
head_off = reg->off + reg->var_off.value;
|
||||
field = btf_record_find(rec, head_off, head_field_type);
|
||||
if (!field) {
|
||||
verbose(env, "bpf_list_head not found at offset=%u\n", list_head_off);
|
||||
verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
|
||||
if (check_reg_allocation_locked(env, reg)) {
|
||||
verbose(env, "bpf_spin_lock at off=%d must be held for bpf_list_head\n",
|
||||
rec->spin_lock_off);
|
||||
verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
|
||||
rec->spin_lock_off, head_type_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (meta->arg_list_head.field) {
|
||||
verbose(env, "verifier internal error: repeating bpf_list_head arg\n");
|
||||
if (*head_field) {
|
||||
verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
|
||||
return -EFAULT;
|
||||
}
|
||||
meta->arg_list_head.field = field;
|
||||
*head_field = field;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
struct bpf_kfunc_call_arg_meta *meta)
|
||||
{
|
||||
return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
|
||||
&meta->arg_list_head.field);
|
||||
}
|
||||
|
||||
static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
struct bpf_kfunc_call_arg_meta *meta)
|
||||
{
|
||||
return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
|
||||
&meta->arg_rbtree_root.field);
|
||||
}
|
||||
|
||||
static int
|
||||
__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
struct bpf_kfunc_call_arg_meta *meta,
|
||||
enum btf_field_type head_field_type,
|
||||
enum btf_field_type node_field_type,
|
||||
struct btf_field **node_field)
|
||||
{
|
||||
const char *node_type_name;
|
||||
const struct btf_type *et, *t;
|
||||
struct btf_field *field;
|
||||
u32 node_off;
|
||||
|
||||
if (meta->btf != btf_vmlinux) {
|
||||
verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
|
||||
return -EFAULT;
|
||||
|
||||
node_type_name = btf_field_type_name(node_field_type);
|
||||
if (!tnum_is_const(reg->var_off)) {
|
||||
verbose(env,
|
||||
"R%d doesn't have constant offset. %s has to be at the constant offset\n",
|
||||
regno, node_type_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
node_off = reg->off + reg->var_off.value;
|
||||
field = reg_find_field_offset(reg, node_off, node_field_type);
|
||||
if (!field || field->offset != node_off) {
|
||||
verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
field = *node_field;
|
||||
|
||||
et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
|
||||
t = btf_type_by_id(reg->btf, reg->btf_id);
|
||||
if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
|
||||
field->graph_root.value_btf_id, true)) {
|
||||
verbose(env, "operation on %s expects arg#1 %s at offset=%d "
|
||||
"in struct %s, but arg is at offset=%d in struct %s\n",
|
||||
btf_field_type_name(head_field_type),
|
||||
btf_field_type_name(node_field_type),
|
||||
field->graph_root.node_offset,
|
||||
btf_name_by_offset(field->graph_root.btf, et->name_off),
|
||||
node_off, btf_name_by_offset(reg->btf, t->name_off));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (node_off != field->graph_root.node_offset) {
|
||||
verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
|
||||
node_off, btf_field_type_name(node_field_type),
|
||||
field->graph_root.node_offset,
|
||||
btf_name_by_offset(field->graph_root.btf, et->name_off));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9079,55 +9441,18 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
struct bpf_kfunc_call_arg_meta *meta)
|
||||
{
|
||||
const struct btf_type *et, *t;
|
||||
struct btf_field *field;
|
||||
struct btf_record *rec;
|
||||
u32 list_node_off;
|
||||
return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
|
||||
BPF_LIST_HEAD, BPF_LIST_NODE,
|
||||
&meta->arg_list_head.field);
|
||||
}
|
||||
|
||||
if (meta->btf != btf_vmlinux ||
|
||||
(meta->func_id != special_kfunc_list[KF_bpf_list_push_front] &&
|
||||
meta->func_id != special_kfunc_list[KF_bpf_list_push_back])) {
|
||||
verbose(env, "verifier internal error: bpf_list_node argument for unknown kfunc\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!tnum_is_const(reg->var_off)) {
|
||||
verbose(env,
|
||||
"R%d doesn't have constant offset. bpf_list_node has to be at the constant offset\n",
|
||||
regno);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rec = reg_btf_record(reg);
|
||||
list_node_off = reg->off + reg->var_off.value;
|
||||
field = btf_record_find(rec, list_node_off, BPF_LIST_NODE);
|
||||
if (!field || field->offset != list_node_off) {
|
||||
verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
field = meta->arg_list_head.field;
|
||||
|
||||
et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
|
||||
t = btf_type_by_id(reg->btf, reg->btf_id);
|
||||
if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
|
||||
field->graph_root.value_btf_id, true)) {
|
||||
verbose(env, "operation on bpf_list_head expects arg#1 bpf_list_node at offset=%d "
|
||||
"in struct %s, but arg is at offset=%d in struct %s\n",
|
||||
field->graph_root.node_offset,
|
||||
btf_name_by_offset(field->graph_root.btf, et->name_off),
|
||||
list_node_off, btf_name_by_offset(reg->btf, t->name_off));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (list_node_off != field->graph_root.node_offset) {
|
||||
verbose(env, "arg#1 offset=%d, but expected bpf_list_node at offset=%d in struct %s\n",
|
||||
list_node_off, field->graph_root.node_offset,
|
||||
btf_name_by_offset(field->graph_root.btf, et->name_off));
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Set arg#1 for expiration after unlock */
|
||||
return ref_set_release_on_unlock(env, reg->ref_obj_id);
|
||||
static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
struct bpf_kfunc_call_arg_meta *meta)
|
||||
{
|
||||
return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
|
||||
BPF_RB_ROOT, BPF_RB_NODE,
|
||||
&meta->arg_rbtree_root.field);
|
||||
}
|
||||
|
||||
static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
|
||||
@ -9264,8 +9589,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
||||
case KF_ARG_PTR_TO_DYNPTR:
|
||||
case KF_ARG_PTR_TO_LIST_HEAD:
|
||||
case KF_ARG_PTR_TO_LIST_NODE:
|
||||
case KF_ARG_PTR_TO_RB_ROOT:
|
||||
case KF_ARG_PTR_TO_RB_NODE:
|
||||
case KF_ARG_PTR_TO_MEM:
|
||||
case KF_ARG_PTR_TO_MEM_SIZE:
|
||||
case KF_ARG_PTR_TO_CALLBACK:
|
||||
/* Trusted by default */
|
||||
break;
|
||||
default:
|
||||
@ -9342,6 +9670,20 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case KF_ARG_PTR_TO_RB_ROOT:
|
||||
if (reg->type != PTR_TO_MAP_VALUE &&
|
||||
reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
|
||||
verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
|
||||
verbose(env, "allocated object must be referenced\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case KF_ARG_PTR_TO_LIST_NODE:
|
||||
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
|
||||
verbose(env, "arg#%d expected pointer to allocated object\n", i);
|
||||
@ -9355,6 +9697,31 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case KF_ARG_PTR_TO_RB_NODE:
|
||||
if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
|
||||
if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
|
||||
verbose(env, "rbtree_remove node input must be non-owning ref\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (in_rbtree_lock_required_cb(env)) {
|
||||
verbose(env, "rbtree_remove not allowed in rbtree cb\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
|
||||
verbose(env, "arg#%d expected pointer to allocated object\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!reg->ref_obj_id) {
|
||||
verbose(env, "allocated object must be referenced\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case KF_ARG_PTR_TO_BTF_ID:
|
||||
/* Only base_type is checked, further checks are done here */
|
||||
if ((base_type(reg->type) != PTR_TO_BTF_ID ||
|
||||
@ -9390,6 +9757,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
||||
/* Skip next '__sz' argument */
|
||||
i++;
|
||||
break;
|
||||
case KF_ARG_PTR_TO_CALLBACK:
|
||||
meta->subprogno = reg->subprogno;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -9406,11 +9776,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx_p)
|
||||
{
|
||||
const struct btf_type *t, *func, *func_proto, *ptr_type;
|
||||
u32 i, nargs, func_id, ptr_type_id, release_ref_obj_id;
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
const char *func_name, *ptr_type_name;
|
||||
bool sleepable, rcu_lock, rcu_unlock;
|
||||
struct bpf_kfunc_call_arg_meta meta;
|
||||
u32 i, nargs, func_id, ptr_type_id;
|
||||
int err, insn_idx = *insn_idx_p;
|
||||
const struct btf_param *args;
|
||||
const struct btf_type *ret_t;
|
||||
@ -9505,6 +9875,35 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
}
|
||||
}
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] ||
|
||||
meta.func_id == special_kfunc_list[KF_bpf_list_push_back] ||
|
||||
meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
|
||||
release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
|
||||
err = ref_convert_owning_non_owning(env, release_ref_obj_id);
|
||||
if (err) {
|
||||
verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
|
||||
func_name, func_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = release_reference(env, release_ref_obj_id);
|
||||
if (err) {
|
||||
verbose(env, "kfunc %s#%d reference has not been acquired before\n",
|
||||
func_name, func_id);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
|
||||
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
|
||||
set_rbtree_add_callback_state);
|
||||
if (err) {
|
||||
verbose(env, "kfunc %s#%d failed callback verification\n",
|
||||
func_name, func_id);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < CALLER_SAVED_REGS; i++)
|
||||
mark_reg_not_init(env, regs, caller_saved[i]);
|
||||
|
||||
@ -9569,11 +9968,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
|
||||
struct btf_field *field = meta.arg_list_head.field;
|
||||
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
|
||||
regs[BPF_REG_0].btf = field->graph_root.btf;
|
||||
regs[BPF_REG_0].btf_id = field->graph_root.value_btf_id;
|
||||
regs[BPF_REG_0].off = field->graph_root.node_offset;
|
||||
mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
|
||||
} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
|
||||
meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
|
||||
struct btf_field *field = meta.arg_rbtree_root.field;
|
||||
|
||||
mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
|
||||
} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
|
||||
@ -9639,7 +10039,13 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
if (is_kfunc_ret_null(&meta))
|
||||
regs[BPF_REG_0].id = id;
|
||||
regs[BPF_REG_0].ref_obj_id = id;
|
||||
} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
|
||||
ref_set_non_owning(env, ®s[BPF_REG_0]);
|
||||
}
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove])
|
||||
invalidate_non_owning_refs(env);
|
||||
|
||||
if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id)
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
|
||||
@ -11825,8 +12231,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
||||
*/
|
||||
if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
|
||||
return;
|
||||
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL) && WARN_ON_ONCE(reg->off))
|
||||
if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
|
||||
WARN_ON_ONCE(reg->off))
|
||||
return;
|
||||
|
||||
if (is_null) {
|
||||
reg->type = SCALAR_VALUE;
|
||||
/* We don't need id and ref_obj_id from this point
|
||||
@ -14335,7 +14743,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
|
||||
(insn->src_reg == BPF_PSEUDO_CALL) ||
|
||||
(insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
|
||||
(insn->off != 0 || !is_bpf_list_api_kfunc(insn->imm)))) {
|
||||
(insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
|
||||
verbose(env, "function calls are not allowed while holding a lock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -14371,7 +14779,8 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (env->cur_state->active_lock.ptr) {
|
||||
if (env->cur_state->active_lock.ptr &&
|
||||
!in_rbtree_lock_required_cb(env)) {
|
||||
verbose(env, "bpf_spin_unlock is missing\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -14633,9 +15042,10 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
|
||||
{
|
||||
enum bpf_prog_type prog_type = resolve_prog_type(prog);
|
||||
|
||||
if (btf_record_has_field(map->record, BPF_LIST_HEAD)) {
|
||||
if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
|
||||
btf_record_has_field(map->record, BPF_RB_ROOT)) {
|
||||
if (is_tracing_prog_type(prog_type)) {
|
||||
verbose(env, "tracing progs cannot use bpf_list_head yet\n");
|
||||
verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -88,6 +88,9 @@ static bool cgroup_memory_nosocket __ro_after_init;
|
||||
/* Kernel memory accounting disabled? */
|
||||
static bool cgroup_memory_nokmem __ro_after_init;
|
||||
|
||||
/* BPF memory accounting disabled? */
|
||||
static bool cgroup_memory_nobpf __ro_after_init;
|
||||
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
|
||||
#endif
|
||||
@ -347,6 +350,9 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
|
||||
*/
|
||||
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
|
||||
EXPORT_SYMBOL(memcg_kmem_enabled_key);
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
|
||||
EXPORT_SYMBOL(memcg_bpf_enabled_key);
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -5357,6 +5363,11 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
|
||||
static_branch_inc(&memcg_sockets_enabled_key);
|
||||
|
||||
#if defined(CONFIG_MEMCG_KMEM)
|
||||
if (!cgroup_memory_nobpf)
|
||||
static_branch_inc(&memcg_bpf_enabled_key);
|
||||
#endif
|
||||
|
||||
return &memcg->css;
|
||||
}
|
||||
|
||||
@ -5441,6 +5452,11 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
|
||||
static_branch_dec(&memcg_sockets_enabled_key);
|
||||
|
||||
#if defined(CONFIG_MEMCG_KMEM)
|
||||
if (!cgroup_memory_nobpf)
|
||||
static_branch_dec(&memcg_bpf_enabled_key);
|
||||
#endif
|
||||
|
||||
vmpressure_cleanup(&memcg->vmpressure);
|
||||
cancel_work_sync(&memcg->high_work);
|
||||
mem_cgroup_remove_from_trees(memcg);
|
||||
@ -7269,6 +7285,8 @@ static int __init cgroup_memory(char *s)
|
||||
cgroup_memory_nosocket = true;
|
||||
if (!strcmp(token, "nokmem"))
|
||||
cgroup_memory_nokmem = true;
|
||||
if (!strcmp(token, "nobpf"))
|
||||
cgroup_memory_nobpf = true;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
@ -396,10 +396,12 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
|
||||
old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
|
||||
do {
|
||||
run_ctx.prog_item = &item;
|
||||
local_bh_disable();
|
||||
if (xdp)
|
||||
*retval = bpf_prog_run_xdp(prog, ctx);
|
||||
else
|
||||
*retval = bpf_prog_run(prog, ctx);
|
||||
local_bh_enable();
|
||||
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
|
||||
bpf_reset_run_ctx(old_ctx);
|
||||
bpf_test_timer_leave(&t);
|
||||
|
@ -5722,12 +5722,8 @@ static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
|
||||
static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
|
||||
const struct neighbour *neigh,
|
||||
const struct net_device *dev, u32 mtu)
|
||||
static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu)
|
||||
{
|
||||
memcpy(params->dmac, neigh->ha, ETH_ALEN);
|
||||
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
|
||||
params->h_vlan_TCI = 0;
|
||||
params->h_vlan_proto = 0;
|
||||
if (mtu)
|
||||
@ -5838,21 +5834,29 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
|
||||
if (likely(nhc->nhc_gw_family != AF_INET6)) {
|
||||
if (nhc->nhc_gw_family)
|
||||
params->ipv4_dst = nhc->nhc_gw.ipv4;
|
||||
|
||||
neigh = __ipv4_neigh_lookup_noref(dev,
|
||||
(__force u32)params->ipv4_dst);
|
||||
} else {
|
||||
struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
|
||||
|
||||
params->family = AF_INET6;
|
||||
*dst = nhc->nhc_gw.ipv6;
|
||||
neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
|
||||
}
|
||||
|
||||
if (!neigh)
|
||||
return BPF_FIB_LKUP_RET_NO_NEIGH;
|
||||
if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
|
||||
goto set_fwd_params;
|
||||
|
||||
return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
|
||||
if (likely(nhc->nhc_gw_family != AF_INET6))
|
||||
neigh = __ipv4_neigh_lookup_noref(dev,
|
||||
(__force u32)params->ipv4_dst);
|
||||
else
|
||||
neigh = __ipv6_neigh_lookup_noref_stub(dev, params->ipv6_dst);
|
||||
|
||||
if (!neigh || !(neigh->nud_state & NUD_VALID))
|
||||
return BPF_FIB_LKUP_RET_NO_NEIGH;
|
||||
memcpy(params->dmac, neigh->ha, ETH_ALEN);
|
||||
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
|
||||
|
||||
set_fwd_params:
|
||||
return bpf_fib_set_fwd_params(params, mtu);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -5960,24 +5964,33 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
|
||||
params->rt_metric = res.f6i->fib6_metric;
|
||||
params->ifindex = dev->ifindex;
|
||||
|
||||
if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
|
||||
goto set_fwd_params;
|
||||
|
||||
/* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
|
||||
* not needed here.
|
||||
*/
|
||||
neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
|
||||
if (!neigh)
|
||||
if (!neigh || !(neigh->nud_state & NUD_VALID))
|
||||
return BPF_FIB_LKUP_RET_NO_NEIGH;
|
||||
memcpy(params->dmac, neigh->ha, ETH_ALEN);
|
||||
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
|
||||
|
||||
return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
|
||||
set_fwd_params:
|
||||
return bpf_fib_set_fwd_params(params, mtu);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
|
||||
BPF_FIB_LOOKUP_SKIP_NEIGH)
|
||||
|
||||
BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
|
||||
struct bpf_fib_lookup *, params, int, plen, u32, flags)
|
||||
{
|
||||
if (plen < sizeof(*params))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
|
||||
if (flags & ~BPF_FIB_LOOKUP_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
switch (params->family) {
|
||||
@ -6015,7 +6028,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
|
||||
if (plen < sizeof(*params))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
|
||||
if (flags & ~BPF_FIB_LOOKUP_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (params->tot_len)
|
||||
|
@ -511,7 +511,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int xsk_generic_xmit(struct sock *sk)
|
||||
static int __xsk_generic_xmit(struct sock *sk)
|
||||
{
|
||||
struct xdp_sock *xs = xdp_sk(sk);
|
||||
u32 max_batch = TX_BATCH_SIZE;
|
||||
@ -594,22 +594,13 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xsk_xmit(struct sock *sk)
|
||||
static int xsk_generic_xmit(struct sock *sk)
|
||||
{
|
||||
struct xdp_sock *xs = xdp_sk(sk);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!(xs->dev->flags & IFF_UP)))
|
||||
return -ENETDOWN;
|
||||
if (unlikely(!xs->tx))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (xs->zc)
|
||||
return xsk_wakeup(xs, XDP_WAKEUP_TX);
|
||||
|
||||
/* Drop the RCU lock since the SKB path might sleep. */
|
||||
rcu_read_unlock();
|
||||
ret = xsk_generic_xmit(sk);
|
||||
ret = __xsk_generic_xmit(sk);
|
||||
/* Reaquire RCU lock before going into common code. */
|
||||
rcu_read_lock();
|
||||
|
||||
@ -627,17 +618,31 @@ static bool xsk_no_wakeup(struct sock *sk)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int xsk_check_common(struct xdp_sock *xs)
|
||||
{
|
||||
if (unlikely(!xsk_is_bound(xs)))
|
||||
return -ENXIO;
|
||||
if (unlikely(!(xs->dev->flags & IFF_UP)))
|
||||
return -ENETDOWN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
|
||||
{
|
||||
bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
|
||||
struct sock *sk = sock->sk;
|
||||
struct xdp_sock *xs = xdp_sk(sk);
|
||||
struct xsk_buff_pool *pool;
|
||||
int err;
|
||||
|
||||
if (unlikely(!xsk_is_bound(xs)))
|
||||
return -ENXIO;
|
||||
err = xsk_check_common(xs);
|
||||
if (err)
|
||||
return err;
|
||||
if (unlikely(need_wait))
|
||||
return -EOPNOTSUPP;
|
||||
if (unlikely(!xs->tx))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (sk_can_busy_loop(sk)) {
|
||||
if (xs->zc)
|
||||
@ -649,8 +654,11 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
|
||||
return 0;
|
||||
|
||||
pool = xs->pool;
|
||||
if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
|
||||
return xsk_xmit(sk);
|
||||
if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
|
||||
if (xs->zc)
|
||||
return xsk_wakeup(xs, XDP_WAKEUP_TX);
|
||||
return xsk_generic_xmit(sk);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -670,11 +678,11 @@ static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int
|
||||
bool need_wait = !(flags & MSG_DONTWAIT);
|
||||
struct sock *sk = sock->sk;
|
||||
struct xdp_sock *xs = xdp_sk(sk);
|
||||
int err;
|
||||
|
||||
if (unlikely(!xsk_is_bound(xs)))
|
||||
return -ENXIO;
|
||||
if (unlikely(!(xs->dev->flags & IFF_UP)))
|
||||
return -ENETDOWN;
|
||||
err = xsk_check_common(xs);
|
||||
if (err)
|
||||
return err;
|
||||
if (unlikely(!xs->rx))
|
||||
return -ENOBUFS;
|
||||
if (unlikely(need_wait))
|
||||
@ -713,21 +721,20 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
|
||||
sock_poll_wait(file, sock, wait);
|
||||
|
||||
rcu_read_lock();
|
||||
if (unlikely(!xsk_is_bound(xs))) {
|
||||
rcu_read_unlock();
|
||||
return mask;
|
||||
}
|
||||
if (xsk_check_common(xs))
|
||||
goto skip_tx;
|
||||
|
||||
pool = xs->pool;
|
||||
|
||||
if (pool->cached_need_wakeup) {
|
||||
if (xs->zc)
|
||||
xsk_wakeup(xs, pool->cached_need_wakeup);
|
||||
else
|
||||
else if (xs->tx)
|
||||
/* Poll needs to drive Tx also in copy mode */
|
||||
xsk_xmit(sk);
|
||||
xsk_generic_xmit(sk);
|
||||
}
|
||||
|
||||
skip_tx:
|
||||
if (xs->rx && !xskq_prod_is_empty(xs->rx))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
if (xs->tx && xsk_tx_writeable(xs))
|
||||
|
@ -38,7 +38,7 @@ static void check_map_id(int inner_map_fd, int map_in_map_fd, uint32_t key)
|
||||
uint32_t info_len = sizeof(info);
|
||||
int ret, id;
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(inner_map_fd, &info, &info_len);
|
||||
ret = bpf_map_get_info_by_fd(inner_map_fd, &info, &info_len);
|
||||
assert(!ret);
|
||||
|
||||
ret = bpf_map_lookup_elem(map_in_map_fd, &key, &id);
|
||||
|
@ -153,7 +153,7 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err) {
|
||||
printf("can't get prog info - %s\n", strerror(errno));
|
||||
return err;
|
||||
|
@ -184,7 +184,7 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err) {
|
||||
printf("can't get prog info - %s\n", strerror(errno));
|
||||
return 1;
|
||||
|
@ -76,9 +76,9 @@ static int do_detach(int ifindex, const char *ifname, const char *app_name)
|
||||
return prog_fd;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
if (err) {
|
||||
printf("ERROR: bpf_obj_get_info_by_fd failed (%s)\n",
|
||||
printf("ERROR: bpf_prog_get_info_by_fd failed (%s)\n",
|
||||
strerror(errno));
|
||||
goto close_out;
|
||||
}
|
||||
|
@ -494,9 +494,9 @@ int main(int argc, char **argv)
|
||||
goto end_cpu;
|
||||
}
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz);
|
||||
ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed bpf_obj_get_info_by_fd for cpumap: %s\n",
|
||||
fprintf(stderr, "Failed bpf_map_get_info_by_fd for cpumap: %s\n",
|
||||
strerror(errno));
|
||||
goto end_cpu;
|
||||
}
|
||||
|
@ -602,7 +602,7 @@ int main(int argc, char **argv)
|
||||
return EXIT_FAIL_XDP;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err) {
|
||||
printf("can't get prog info - %s\n", strerror(errno));
|
||||
return err;
|
||||
|
@ -35,7 +35,7 @@ static int do_attach(int idx, int fd, const char *name)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
|
||||
if (err) {
|
||||
printf("can't get prog info - %s\n", strerror(errno));
|
||||
return err;
|
||||
|
@ -295,7 +295,7 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err) {
|
||||
printf("can't get prog info - %s\n", strerror(errno));
|
||||
return err;
|
||||
|
@ -537,7 +537,7 @@ static bool btf_is_kernel_module(__u32 btf_id)
|
||||
len = sizeof(btf_info);
|
||||
btf_info.name = ptr_to_u64(btf_name);
|
||||
btf_info.name_len = sizeof(btf_name);
|
||||
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
|
||||
close(btf_fd);
|
||||
if (err) {
|
||||
p_err("can't get BTF (ID %u) object info: %s", btf_id, strerror(errno));
|
||||
@ -606,7 +606,7 @@ static int do_dump(int argc, char **argv)
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_prog_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get prog info: %s", strerror(errno));
|
||||
goto done;
|
||||
@ -789,7 +789,10 @@ build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
|
||||
}
|
||||
|
||||
memset(info, 0, *len);
|
||||
err = bpf_obj_get_info_by_fd(fd, info, len);
|
||||
if (type == BPF_OBJ_PROG)
|
||||
err = bpf_prog_get_info_by_fd(fd, info, len);
|
||||
else
|
||||
err = bpf_map_get_info_by_fd(fd, info, len);
|
||||
close(fd);
|
||||
if (err) {
|
||||
p_err("can't get %s info: %s", names[type],
|
||||
@ -931,7 +934,7 @@ show_btf(int fd, struct hashmap *btf_prog_table,
|
||||
int err;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_btf_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get BTF object info: %s", strerror(errno));
|
||||
return -1;
|
||||
@ -943,7 +946,7 @@ show_btf(int fd, struct hashmap *btf_prog_table,
|
||||
info.name = ptr_to_u64(name);
|
||||
len = sizeof(info);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_btf_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get BTF object info: %s", strerror(errno));
|
||||
return -1;
|
||||
|
@ -57,7 +57,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
|
||||
if (prog_fd < 0)
|
||||
goto print;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err)
|
||||
goto print;
|
||||
|
||||
@ -70,7 +70,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
|
||||
info.func_info_rec_size = finfo_rec_size;
|
||||
info.func_info = ptr_to_u64(&finfo);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (err)
|
||||
goto print;
|
||||
|
||||
|
@ -82,7 +82,7 @@ static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id)
|
||||
if (fd < 0)
|
||||
return;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &btf_info, &btf_len);
|
||||
err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_len);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -108,7 +108,7 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
|
||||
if (prog_fd < 0)
|
||||
return -1;
|
||||
|
||||
if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
|
||||
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
|
||||
close(prog_fd);
|
||||
return -1;
|
||||
}
|
||||
|
@ -353,7 +353,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
|
||||
info.func_info_rec_size = sizeof(finfo);
|
||||
info.func_info = ptr_to_u64(&finfo);
|
||||
|
||||
if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len))
|
||||
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
|
||||
goto copy_name;
|
||||
|
||||
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
|
||||
@ -488,7 +488,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
|
||||
goto out_close;
|
||||
|
||||
memset(&pinned_info, 0, sizeof(pinned_info));
|
||||
if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len))
|
||||
if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
|
||||
goto out_close;
|
||||
|
||||
path = strdup(fpath);
|
||||
@ -756,7 +756,7 @@ static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
|
||||
goto err_close_fds;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_prog_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get prog info (%u): %s",
|
||||
id, strerror(errno));
|
||||
@ -916,7 +916,7 @@ static int map_fd_by_name(char *name, int **fds)
|
||||
goto err_close_fds;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_map_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get map info (%u): %s",
|
||||
id, strerror(errno));
|
||||
@ -1026,7 +1026,8 @@ exit_free:
|
||||
return fd;
|
||||
}
|
||||
|
||||
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
|
||||
int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
|
||||
__u32 *info_len)
|
||||
{
|
||||
int err;
|
||||
int fd;
|
||||
@ -1035,7 +1036,7 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, info, info_len);
|
||||
err = bpf_map_get_info_by_fd(fd, info, info_len);
|
||||
if (err) {
|
||||
p_err("can't get map info: %s", strerror(errno));
|
||||
close(fd);
|
||||
|
@ -145,7 +145,7 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info)
|
||||
return prog_fd;
|
||||
|
||||
memset(info, 0, sizeof(*info));
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, info, &len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
|
||||
if (err)
|
||||
p_err("can't get prog info: %s", strerror(errno));
|
||||
close(prog_fd);
|
||||
@ -327,7 +327,7 @@ static int do_show_link(int fd)
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
again:
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_link_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get link info: %s",
|
||||
strerror(errno));
|
||||
|
@ -168,7 +168,8 @@ int prog_parse_fd(int *argc, char ***argv);
|
||||
int prog_parse_fds(int *argc, char ***argv, int **fds);
|
||||
int map_parse_fd(int *argc, char ***argv);
|
||||
int map_parse_fds(int *argc, char ***argv, int **fds);
|
||||
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
|
||||
int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
|
||||
__u32 *info_len);
|
||||
|
||||
struct bpf_prog_linfo;
|
||||
#if defined(HAVE_LLVM_SUPPORT) || defined(HAVE_LIBBFD_SUPPORT)
|
||||
|
@ -638,7 +638,7 @@ static int do_show_subset(int argc, char **argv)
|
||||
if (json_output && nb_fds > 1)
|
||||
jsonw_start_array(json_wtr); /* root array */
|
||||
for (i = 0; i < nb_fds; i++) {
|
||||
err = bpf_obj_get_info_by_fd(fds[i], &info, &len);
|
||||
err = bpf_map_get_info_by_fd(fds[i], &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get map info: %s",
|
||||
strerror(errno));
|
||||
@ -708,7 +708,7 @@ static int do_show(int argc, char **argv)
|
||||
break;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_map_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get map info: %s", strerror(errno));
|
||||
close(fd);
|
||||
@ -764,7 +764,7 @@ static int maps_have_btf(int *fds, int nb_fds)
|
||||
int err, i;
|
||||
|
||||
for (i = 0; i < nb_fds; i++) {
|
||||
err = bpf_obj_get_info_by_fd(fds[i], &info, &len);
|
||||
err = bpf_map_get_info_by_fd(fds[i], &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get map info: %s", strerror(errno));
|
||||
return -1;
|
||||
@ -925,7 +925,7 @@ static int do_dump(int argc, char **argv)
|
||||
if (wtr && nb_fds > 1)
|
||||
jsonw_start_array(wtr); /* root array */
|
||||
for (i = 0; i < nb_fds; i++) {
|
||||
if (bpf_obj_get_info_by_fd(fds[i], &info, &len)) {
|
||||
if (bpf_map_get_info_by_fd(fds[i], &info, &len)) {
|
||||
p_err("can't get map info: %s", strerror(errno));
|
||||
break;
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ static void show_prog_maps(int fd, __u32 num_maps)
|
||||
info.nr_map_ids = num_maps;
|
||||
info.map_ids = ptr_to_u64(map_ids);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_prog_get_info_by_fd(fd, &info, &len);
|
||||
if (err || !info.nr_map_ids)
|
||||
return;
|
||||
|
||||
@ -231,7 +231,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
|
||||
|
||||
memset(&prog_info, 0, sizeof(prog_info));
|
||||
prog_info_len = sizeof(prog_info);
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
@ -248,7 +248,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
|
||||
prog_info.map_ids = ptr_to_u64(map_ids);
|
||||
prog_info_len = sizeof(prog_info);
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret)
|
||||
goto free_map_ids;
|
||||
|
||||
@ -259,7 +259,7 @@ static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
|
||||
|
||||
memset(map_info, 0, sizeof(*map_info));
|
||||
map_info_len = sizeof(*map_info);
|
||||
ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
|
||||
ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
|
||||
if (ret < 0) {
|
||||
close(map_fd);
|
||||
goto free_map_ids;
|
||||
@ -580,7 +580,7 @@ static int show_prog(int fd)
|
||||
__u32 len = sizeof(info);
|
||||
int err;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_prog_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
p_err("can't get prog info: %s", strerror(errno));
|
||||
return -1;
|
||||
@ -949,7 +949,7 @@ static int do_dump(int argc, char **argv)
|
||||
for (i = 0; i < nb_fds; i++) {
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
|
||||
if (err) {
|
||||
p_err("can't get prog info: %s", strerror(errno));
|
||||
break;
|
||||
@ -961,7 +961,7 @@ static int do_dump(int argc, char **argv)
|
||||
break;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
|
||||
if (err) {
|
||||
p_err("can't get prog info: %s", strerror(errno));
|
||||
break;
|
||||
@ -2170,9 +2170,9 @@ static char *profile_target_name(int tgt_fd)
|
||||
char *name = NULL;
|
||||
int err;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
|
||||
if (err) {
|
||||
p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd);
|
||||
p_err("failed to get info for prog FD %d", tgt_fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2183,7 +2183,7 @@ static char *profile_target_name(int tgt_fd)
|
||||
|
||||
func_info_rec_size = info.func_info_rec_size;
|
||||
if (info.nr_func_info == 0) {
|
||||
p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd);
|
||||
p_err("found 0 func_info for prog FD %d", tgt_fd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2192,7 +2192,7 @@ static char *profile_target_name(int tgt_fd)
|
||||
info.func_info_rec_size = func_info_rec_size;
|
||||
info.func_info = ptr_to_u64(&func_info);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
|
||||
if (err) {
|
||||
p_err("failed to get func_info for prog FD %d", tgt_fd);
|
||||
goto out;
|
||||
|
@ -151,7 +151,7 @@ static int get_next_struct_ops_map(const char *name, int *res_fd,
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, info, &info_len);
|
||||
err = bpf_map_get_info_by_fd(fd, info, &info_len);
|
||||
if (err) {
|
||||
p_err("can't get map info: %s", strerror(errno));
|
||||
close(fd);
|
||||
@ -262,7 +262,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (bpf_obj_get_info_by_fd(fd, info, &info_len)) {
|
||||
if (bpf_map_get_info_by_fd(fd, info, &info_len)) {
|
||||
p_err("can't get map info: %s", strerror(errno));
|
||||
res.nr_errs++;
|
||||
goto done;
|
||||
@ -522,7 +522,7 @@ static int do_register(int argc, char **argv)
|
||||
bpf_link__disconnect(link);
|
||||
bpf_link__destroy(link);
|
||||
|
||||
if (!bpf_obj_get_info_by_fd(bpf_map__fd(map), &info,
|
||||
if (!bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
|
||||
&info_len))
|
||||
p_info("Registered %s %s id %u",
|
||||
get_kern_struct_ops_name(&info),
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
|
||||
#elif defined(__riscv)
|
||||
#include "../../arch/riscv/include/uapi/asm/bpf_perf_event.h"
|
||||
#elif defined(__loongarch__)
|
||||
#include "../../arch/loongarch/include/uapi/asm/bpf_perf_event.h"
|
||||
#else
|
||||
#include <uapi/asm-generic/bpf_perf_event.h>
|
||||
#endif
|
||||
|
@ -3134,6 +3134,11 @@ union bpf_attr {
|
||||
* **BPF_FIB_LOOKUP_OUTPUT**
|
||||
* Perform lookup from an egress perspective (default is
|
||||
* ingress).
|
||||
* **BPF_FIB_LOOKUP_SKIP_NEIGH**
|
||||
* Skip the neighbour table lookup. *params*->dmac
|
||||
* and *params*->smac will not be set as output. A common
|
||||
* use case is to call **bpf_redirect_neigh**\ () after
|
||||
* doing **bpf_fib_lookup**\ ().
|
||||
*
|
||||
* *ctx* is either **struct xdp_md** for XDP programs or
|
||||
* **struct sk_buff** tc cls_act programs.
|
||||
@ -6750,6 +6755,7 @@ struct bpf_raw_tracepoint_args {
|
||||
enum {
|
||||
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
|
||||
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
|
||||
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -6917,6 +6923,17 @@ struct bpf_list_node {
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_rb_root {
|
||||
__u64 :64;
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_rb_node {
|
||||
__u64 :64;
|
||||
__u64 :64;
|
||||
__u64 :64;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_sysctl {
|
||||
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
|
||||
* Allows 1,2,4-byte read, but no write.
|
||||
|
@ -1044,6 +1044,26 @@ int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
|
||||
return libbpf_err_errno(err);
|
||||
}
|
||||
|
||||
int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len)
|
||||
{
|
||||
return bpf_obj_get_info_by_fd(prog_fd, info, info_len);
|
||||
}
|
||||
|
||||
int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len)
|
||||
{
|
||||
return bpf_obj_get_info_by_fd(map_fd, info, info_len);
|
||||
}
|
||||
|
||||
int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len)
|
||||
{
|
||||
return bpf_obj_get_info_by_fd(btf_fd, info, info_len);
|
||||
}
|
||||
|
||||
int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len)
|
||||
{
|
||||
return bpf_obj_get_info_by_fd(link_fd, info, info_len);
|
||||
}
|
||||
|
||||
int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
|
||||
|
@ -386,6 +386,15 @@ LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
|
||||
LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
|
||||
const struct bpf_get_fd_by_id_opts *opts);
|
||||
LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
|
||||
/* Type-safe variants of bpf_obj_get_info_by_fd(). The callers still needs to
|
||||
* pass info_len, which should normally be
|
||||
* sizeof(struct bpf_{prog,map,btf,link}_info), in order to be compatible with
|
||||
* different libbpf and kernel versions.
|
||||
*/
|
||||
LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len);
|
||||
LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
|
||||
LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len);
|
||||
LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len);
|
||||
|
||||
struct bpf_prog_query_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
@ -1350,9 +1350,9 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
|
||||
void *ptr;
|
||||
int err;
|
||||
|
||||
/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
|
||||
/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
|
||||
* let's start with a sane default - 4KiB here - and resize it only if
|
||||
* bpf_obj_get_info_by_fd() needs a bigger buffer.
|
||||
* bpf_btf_get_info_by_fd() needs a bigger buffer.
|
||||
*/
|
||||
last_size = 4096;
|
||||
ptr = malloc(last_size);
|
||||
@ -1362,7 +1362,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
|
||||
memset(&btf_info, 0, sizeof(btf_info));
|
||||
btf_info.btf = ptr_to_u64(ptr);
|
||||
btf_info.btf_size = last_size;
|
||||
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
|
||||
|
||||
if (!err && btf_info.btf_size > last_size) {
|
||||
void *temp_ptr;
|
||||
@ -1380,7 +1380,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
|
||||
btf_info.btf = ptr_to_u64(ptr);
|
||||
btf_info.btf_size = last_size;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
|
||||
}
|
||||
|
||||
if (err || btf_info.btf_size > last_size) {
|
||||
|
@ -4345,7 +4345,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
||||
char *new_name;
|
||||
|
||||
memset(&info, 0, len);
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_map_get_info_by_fd(fd, &info, &len);
|
||||
if (err && errno == EINVAL)
|
||||
err = bpf_get_map_info_from_fdinfo(fd, &info);
|
||||
if (err)
|
||||
@ -4729,7 +4729,7 @@ static int probe_module_btf(void)
|
||||
* kernel's module BTF support coincides with support for
|
||||
* name/name_len fields in struct bpf_btf_info.
|
||||
*/
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_btf_get_info_by_fd(fd, &info, &len);
|
||||
close(fd);
|
||||
return !err;
|
||||
}
|
||||
@ -4892,7 +4892,7 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
|
||||
int err;
|
||||
|
||||
memset(&map_info, 0, map_info_len);
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
|
||||
err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
|
||||
if (err && errno == EINVAL)
|
||||
err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
|
||||
if (err) {
|
||||
@ -5437,7 +5437,7 @@ static int load_module_btfs(struct bpf_object *obj)
|
||||
info.name = ptr_to_u64(name);
|
||||
info.name_len = sizeof(name);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(fd, &info, &len);
|
||||
err = bpf_btf_get_info_by_fd(fd, &info, &len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("failed to get BTF object #%d info: %d\n", id, err);
|
||||
@ -9030,9 +9030,9 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
|
||||
int err;
|
||||
|
||||
memset(&info, 0, info_len);
|
||||
err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
|
||||
if (err) {
|
||||
pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n",
|
||||
pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
|
||||
attach_prog_fd, err);
|
||||
return err;
|
||||
}
|
||||
@ -11741,7 +11741,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
|
||||
/* best-effort sanity checks */
|
||||
memset(&map, 0, sizeof(map));
|
||||
map_info_len = sizeof(map);
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
|
||||
err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
|
||||
|
@ -384,4 +384,9 @@ LIBBPF_1.1.0 {
|
||||
} LIBBPF_1.0.0;
|
||||
|
||||
LIBBPF_1.2.0 {
|
||||
global:
|
||||
bpf_btf_get_info_by_fd;
|
||||
bpf_link_get_info_by_fd;
|
||||
bpf_map_get_info_by_fd;
|
||||
bpf_prog_get_info_by_fd;
|
||||
} LIBBPF_1.1.0;
|
||||
|
@ -689,7 +689,7 @@ static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd)
|
||||
int len, ret;
|
||||
|
||||
memset(&info, 0, info_len);
|
||||
ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
|
||||
ret = bpf_prog_get_info_by_fd(fd, &info, &info_len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -83,7 +83,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
|
||||
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
|
||||
@ -359,7 +359,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
|
||||
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
|
||||
|
@ -152,14 +152,13 @@ endif
|
||||
# NOTE: Semicolon at the end is critical to override lib.mk's default static
|
||||
# rule for binaries.
|
||||
$(notdir $(TEST_GEN_PROGS) \
|
||||
$(TEST_PROGS) \
|
||||
$(TEST_PROGS_EXTENDED) \
|
||||
$(TEST_GEN_PROGS_EXTENDED) \
|
||||
$(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
|
||||
|
||||
# sort removes libbpf duplicates when not cross-building
|
||||
MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
|
||||
$(HOST_BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/resolve_btfids \
|
||||
MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
|
||||
$(BUILD_DIR)/bpftool $(HOST_BUILD_DIR)/bpftool \
|
||||
$(HOST_BUILD_DIR)/resolve_btfids \
|
||||
$(RUNQSLOWER_OUTPUT) $(INCLUDE_DIR))
|
||||
$(MAKE_DIRS):
|
||||
$(call msg,MKDIR,,$@)
|
||||
@ -209,6 +208,14 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_tes
|
||||
$(Q)cp bpf_testmod/bpf_testmod.ko $@
|
||||
|
||||
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
|
||||
TRUNNER_BPFTOOL := $(CROSS_BPFTOOL)
|
||||
USE_BOOTSTRAP := ""
|
||||
else
|
||||
TRUNNER_BPFTOOL := $(DEFAULT_BPFTOOL)
|
||||
USE_BOOTSTRAP := "bootstrap/"
|
||||
endif
|
||||
|
||||
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
|
||||
@ -220,7 +227,7 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
|
||||
EXTRA_LDFLAGS='$(SAN_LDFLAGS)' && \
|
||||
cp $(RUNQSLOWER_OUTPUT)runqslower $@
|
||||
|
||||
TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL)
|
||||
TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL)
|
||||
|
||||
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
|
||||
|
||||
@ -258,6 +265,18 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
|
||||
LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \
|
||||
prefix= DESTDIR=$(HOST_SCRATCH_DIR)/ install-bin
|
||||
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
$(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
|
||||
$(BPFOBJ) | $(BUILD_DIR)/bpftool
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
|
||||
ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \
|
||||
EXTRA_CFLAGS='-g -O0' \
|
||||
OUTPUT=$(BUILD_DIR)/bpftool/ \
|
||||
LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \
|
||||
LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \
|
||||
prefix= DESTDIR=$(SCRATCH_DIR)/ install-bin
|
||||
endif
|
||||
|
||||
all: docs
|
||||
|
||||
docs:
|
||||
@ -523,11 +542,12 @@ endif
|
||||
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
|
||||
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
|
||||
$(RESOLVE_BTFIDS) \
|
||||
$(TRUNNER_BPFTOOL) \
|
||||
| $(TRUNNER_BINARY)-extras
|
||||
$$(call msg,BINARY,,$$@)
|
||||
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
|
||||
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@
|
||||
$(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool \
|
||||
$(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \
|
||||
$(OUTPUT)/$(if $2,$2/)bpftool
|
||||
|
||||
endef
|
||||
@ -618,6 +638,7 @@ $(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h
|
||||
$(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h
|
||||
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h
|
||||
$(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h
|
||||
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
|
||||
$(OUTPUT)/bench: LDLIBS += -lm
|
||||
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
@ -632,7 +653,9 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
$(OUTPUT)/bench_strncmp.o \
|
||||
$(OUTPUT)/bench_bpf_hashmap_full_update.o \
|
||||
$(OUTPUT)/bench_local_storage.o \
|
||||
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o
|
||||
$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o \
|
||||
$(OUTPUT)/bench_bpf_hashmap_lookup.o \
|
||||
#
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
|
||||
|
||||
|
@ -16,6 +16,7 @@ struct env env = {
|
||||
.warmup_sec = 1,
|
||||
.duration_sec = 5,
|
||||
.affinity = false,
|
||||
.quiet = false,
|
||||
.consumer_cnt = 1,
|
||||
.producer_cnt = 1,
|
||||
};
|
||||
@ -262,6 +263,7 @@ static const struct argp_option opts[] = {
|
||||
{ "consumers", 'c', "NUM", 0, "Number of consumer threads"},
|
||||
{ "verbose", 'v', NULL, 0, "Verbose debug output"},
|
||||
{ "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"},
|
||||
{ "quiet", 'q', NULL, 0, "Be more quiet"},
|
||||
{ "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0,
|
||||
"Set of CPUs for producer threads; implies --affinity"},
|
||||
{ "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0,
|
||||
@ -275,6 +277,7 @@ extern struct argp bench_bpf_loop_argp;
|
||||
extern struct argp bench_local_storage_argp;
|
||||
extern struct argp bench_local_storage_rcu_tasks_trace_argp;
|
||||
extern struct argp bench_strncmp_argp;
|
||||
extern struct argp bench_hashmap_lookup_argp;
|
||||
|
||||
static const struct argp_child bench_parsers[] = {
|
||||
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
|
||||
@ -284,13 +287,15 @@ static const struct argp_child bench_parsers[] = {
|
||||
{ &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 },
|
||||
{ &bench_local_storage_rcu_tasks_trace_argp, 0,
|
||||
"local_storage RCU Tasks Trace slowdown benchmark", 0 },
|
||||
{ &bench_hashmap_lookup_argp, 0, "Hashmap lookup benchmark", 0 },
|
||||
{},
|
||||
};
|
||||
|
||||
/* Make pos_args global, so that we can run argp_parse twice, if necessary */
|
||||
static int pos_args;
|
||||
|
||||
static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
static int pos_args;
|
||||
|
||||
switch (key) {
|
||||
case 'v':
|
||||
env.verbose = true;
|
||||
@ -329,6 +334,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
case 'a':
|
||||
env.affinity = true;
|
||||
break;
|
||||
case 'q':
|
||||
env.quiet = true;
|
||||
break;
|
||||
case ARG_PROD_AFFINITY_SET:
|
||||
env.affinity = true;
|
||||
if (parse_num_list(arg, &env.prod_cpus.cpus,
|
||||
@ -359,7 +367,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void parse_cmdline_args(int argc, char **argv)
|
||||
static void parse_cmdline_args_init(int argc, char **argv)
|
||||
{
|
||||
static const struct argp argp = {
|
||||
.options = opts,
|
||||
@ -369,9 +377,25 @@ static void parse_cmdline_args(int argc, char **argv)
|
||||
};
|
||||
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
|
||||
exit(1);
|
||||
if (!env.list && !env.bench_name) {
|
||||
argp_help(&argp, stderr, ARGP_HELP_DOC, "bench");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void parse_cmdline_args_final(int argc, char **argv)
|
||||
{
|
||||
struct argp_child bench_parsers[2] = {};
|
||||
const struct argp argp = {
|
||||
.options = opts,
|
||||
.parser = parse_arg,
|
||||
.doc = argp_program_doc,
|
||||
.children = bench_parsers,
|
||||
};
|
||||
|
||||
/* Parse arguments the second time with the correct set of parsers */
|
||||
if (bench->argp) {
|
||||
bench_parsers[0].argp = bench->argp;
|
||||
bench_parsers[0].header = bench->name;
|
||||
pos_args = 0;
|
||||
if (argp_parse(&argp, argc, argv, 0, NULL, NULL))
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,6 +514,7 @@ extern const struct bench bench_local_storage_cache_seq_get;
|
||||
extern const struct bench bench_local_storage_cache_interleaved_get;
|
||||
extern const struct bench bench_local_storage_cache_hashmap_control;
|
||||
extern const struct bench bench_local_storage_tasks_trace;
|
||||
extern const struct bench bench_bpf_hashmap_lookup;
|
||||
|
||||
static const struct bench *benchs[] = {
|
||||
&bench_count_global,
|
||||
@ -529,17 +554,17 @@ static const struct bench *benchs[] = {
|
||||
&bench_local_storage_cache_interleaved_get,
|
||||
&bench_local_storage_cache_hashmap_control,
|
||||
&bench_local_storage_tasks_trace,
|
||||
&bench_bpf_hashmap_lookup,
|
||||
};
|
||||
|
||||
static void setup_benchmark()
|
||||
static void find_benchmark(void)
|
||||
{
|
||||
int i, err;
|
||||
int i;
|
||||
|
||||
if (!env.bench_name) {
|
||||
fprintf(stderr, "benchmark name is not specified\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(benchs); i++) {
|
||||
if (strcmp(benchs[i]->name, env.bench_name) == 0) {
|
||||
bench = benchs[i];
|
||||
@ -550,8 +575,14 @@ static void setup_benchmark()
|
||||
fprintf(stderr, "benchmark '%s' not found\n", env.bench_name);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
printf("Setting up benchmark '%s'...\n", bench->name);
|
||||
static void setup_benchmark(void)
|
||||
{
|
||||
int i, err;
|
||||
|
||||
if (!env.quiet)
|
||||
printf("Setting up benchmark '%s'...\n", bench->name);
|
||||
|
||||
state.producers = calloc(env.producer_cnt, sizeof(*state.producers));
|
||||
state.consumers = calloc(env.consumer_cnt, sizeof(*state.consumers));
|
||||
@ -597,7 +628,8 @@ static void setup_benchmark()
|
||||
next_cpu(&env.prod_cpus));
|
||||
}
|
||||
|
||||
printf("Benchmark '%s' started.\n", bench->name);
|
||||
if (!env.quiet)
|
||||
printf("Benchmark '%s' started.\n", bench->name);
|
||||
}
|
||||
|
||||
static pthread_mutex_t bench_done_mtx = PTHREAD_MUTEX_INITIALIZER;
|
||||
@ -621,7 +653,7 @@ static void collect_measurements(long delta_ns) {
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
parse_cmdline_args(argc, argv);
|
||||
parse_cmdline_args_init(argc, argv);
|
||||
|
||||
if (env.list) {
|
||||
int i;
|
||||
@ -633,6 +665,9 @@ int main(int argc, char **argv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
find_benchmark();
|
||||
parse_cmdline_args_final(argc, argv);
|
||||
|
||||
setup_benchmark();
|
||||
|
||||
setup_timer();
|
||||
|
@ -24,6 +24,7 @@ struct env {
|
||||
bool verbose;
|
||||
bool list;
|
||||
bool affinity;
|
||||
bool quiet;
|
||||
int consumer_cnt;
|
||||
int producer_cnt;
|
||||
struct cpu_set prod_cpus;
|
||||
@ -47,6 +48,7 @@ struct bench_res {
|
||||
|
||||
struct bench {
|
||||
const char *name;
|
||||
const struct argp *argp;
|
||||
void (*validate)(void);
|
||||
void (*setup)(void);
|
||||
void *(*producer_thread)(void *ctx);
|
||||
|
@ -428,6 +428,7 @@ static void *consumer(void *input)
|
||||
|
||||
const struct bench bench_bloom_lookup = {
|
||||
.name = "bloom-lookup",
|
||||
.argp = &bench_bloom_map_argp,
|
||||
.validate = validate,
|
||||
.setup = bloom_lookup_setup,
|
||||
.producer_thread = producer,
|
||||
@ -439,6 +440,7 @@ const struct bench bench_bloom_lookup = {
|
||||
|
||||
const struct bench bench_bloom_update = {
|
||||
.name = "bloom-update",
|
||||
.argp = &bench_bloom_map_argp,
|
||||
.validate = validate,
|
||||
.setup = bloom_update_setup,
|
||||
.producer_thread = producer,
|
||||
@ -450,6 +452,7 @@ const struct bench bench_bloom_update = {
|
||||
|
||||
const struct bench bench_bloom_false_positive = {
|
||||
.name = "bloom-false-positive",
|
||||
.argp = &bench_bloom_map_argp,
|
||||
.validate = validate,
|
||||
.setup = false_positive_setup,
|
||||
.producer_thread = producer,
|
||||
@ -461,6 +464,7 @@ const struct bench bench_bloom_false_positive = {
|
||||
|
||||
const struct bench bench_hashmap_without_bloom = {
|
||||
.name = "hashmap-without-bloom",
|
||||
.argp = &bench_bloom_map_argp,
|
||||
.validate = validate,
|
||||
.setup = hashmap_no_bloom_setup,
|
||||
.producer_thread = producer,
|
||||
@ -472,6 +476,7 @@ const struct bench bench_hashmap_without_bloom = {
|
||||
|
||||
const struct bench bench_hashmap_with_bloom = {
|
||||
.name = "hashmap-with-bloom",
|
||||
.argp = &bench_bloom_map_argp,
|
||||
.validate = validate,
|
||||
.setup = hashmap_with_bloom_setup,
|
||||
.producer_thread = producer,
|
||||
|
@ -1,7 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Bytedance */
|
||||
|
||||
#include <argp.h>
|
||||
#include "bench.h"
|
||||
#include "bpf_hashmap_full_update_bench.skel.h"
|
||||
#include "bpf_util.h"
|
||||
@ -68,7 +67,7 @@ static void setup(void)
|
||||
bpf_map_update_elem(map_fd, &i, &i, BPF_ANY);
|
||||
}
|
||||
|
||||
void hashmap_report_final(struct bench_res res[], int res_cnt)
|
||||
static void hashmap_report_final(struct bench_res res[], int res_cnt)
|
||||
{
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
int i;
|
||||
@ -85,7 +84,7 @@ void hashmap_report_final(struct bench_res res[], int res_cnt)
|
||||
}
|
||||
|
||||
const struct bench bench_bpf_hashmap_full_update = {
|
||||
.name = "bpf-hashmap-ful-update",
|
||||
.name = "bpf-hashmap-full-update",
|
||||
.validate = validate,
|
||||
.setup = setup,
|
||||
.producer_thread = producer,
|
||||
|
283
tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
Normal file
283
tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c
Normal file
@ -0,0 +1,283 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Isovalent */
|
||||
|
||||
#include <sys/random.h>
|
||||
#include <argp.h>
|
||||
#include "bench.h"
|
||||
#include "bpf_hashmap_lookup.skel.h"
|
||||
#include "bpf_util.h"
|
||||
|
||||
/* BPF triggering benchmarks */
|
||||
static struct ctx {
|
||||
struct bpf_hashmap_lookup *skel;
|
||||
} ctx;
|
||||
|
||||
/* only available to kernel, so define it here */
|
||||
#define BPF_MAX_LOOPS (1<<23)
|
||||
|
||||
#define MAX_KEY_SIZE 1024 /* the size of the key map */
|
||||
|
||||
static struct {
|
||||
__u32 key_size;
|
||||
__u32 map_flags;
|
||||
__u32 max_entries;
|
||||
__u32 nr_entries;
|
||||
__u32 nr_loops;
|
||||
} args = {
|
||||
.key_size = 4,
|
||||
.map_flags = 0,
|
||||
.max_entries = 1000,
|
||||
.nr_entries = 500,
|
||||
.nr_loops = 1000000,
|
||||
};
|
||||
|
||||
enum {
|
||||
ARG_KEY_SIZE = 8001,
|
||||
ARG_MAP_FLAGS,
|
||||
ARG_MAX_ENTRIES,
|
||||
ARG_NR_ENTRIES,
|
||||
ARG_NR_LOOPS,
|
||||
};
|
||||
|
||||
static const struct argp_option opts[] = {
|
||||
{ "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0,
|
||||
"The hashmap key size (max 1024)"},
|
||||
{ "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0,
|
||||
"The hashmap flags passed to BPF_MAP_CREATE"},
|
||||
{ "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0,
|
||||
"The hashmap max entries"},
|
||||
{ "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
|
||||
"The number of entries to insert/lookup"},
|
||||
{ "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0,
|
||||
"The number of loops for the benchmark"},
|
||||
{},
|
||||
};
|
||||
|
||||
static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
long ret;
|
||||
|
||||
switch (key) {
|
||||
case ARG_KEY_SIZE:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1 || ret > MAX_KEY_SIZE) {
|
||||
fprintf(stderr, "invalid key_size");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.key_size = ret;
|
||||
break;
|
||||
case ARG_MAP_FLAGS:
|
||||
ret = strtol(arg, NULL, 0);
|
||||
if (ret < 0 || ret > UINT_MAX) {
|
||||
fprintf(stderr, "invalid map_flags");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.map_flags = ret;
|
||||
break;
|
||||
case ARG_MAX_ENTRIES:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1 || ret > UINT_MAX) {
|
||||
fprintf(stderr, "invalid max_entries");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.max_entries = ret;
|
||||
break;
|
||||
case ARG_NR_ENTRIES:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1 || ret > UINT_MAX) {
|
||||
fprintf(stderr, "invalid nr_entries");
|
||||
argp_usage(state);
|
||||
}
|
||||
args.nr_entries = ret;
|
||||
break;
|
||||
case ARG_NR_LOOPS:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 1 || ret > BPF_MAX_LOOPS) {
|
||||
fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n",
|
||||
ret, BPF_MAX_LOOPS);
|
||||
argp_usage(state);
|
||||
}
|
||||
args.nr_loops = ret;
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct argp bench_hashmap_lookup_argp = {
|
||||
.options = opts,
|
||||
.parser = parse_arg,
|
||||
};
|
||||
|
||||
static void validate(void)
|
||||
{
|
||||
if (env.consumer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (args.nr_entries > args.max_entries) {
|
||||
fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n",
|
||||
args.max_entries, args.nr_entries);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void *producer(void *input)
|
||||
{
|
||||
while (true) {
|
||||
/* trigger the bpf program */
|
||||
syscall(__NR_getpgid);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *consumer(void *input)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void measure(struct bench_res *res)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void patch_key(u32 i, u32 *key)
|
||||
{
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
*key = i + 1;
|
||||
#else
|
||||
*key = __builtin_bswap32(i + 1);
|
||||
#endif
|
||||
/* the rest of key is random */
|
||||
}
|
||||
|
||||
static void setup(void)
|
||||
{
|
||||
struct bpf_link *link;
|
||||
int map_fd;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
setup_libbpf();
|
||||
|
||||
ctx.skel = bpf_hashmap_lookup__open();
|
||||
if (!ctx.skel) {
|
||||
fprintf(stderr, "failed to open skeleton\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries);
|
||||
bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size);
|
||||
bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8);
|
||||
bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags);
|
||||
|
||||
ctx.skel->bss->nr_entries = args.nr_entries;
|
||||
ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries;
|
||||
|
||||
if (args.key_size > 4) {
|
||||
for (i = 1; i < args.key_size/4; i++)
|
||||
ctx.skel->bss->key[i] = 2654435761 * i;
|
||||
}
|
||||
|
||||
ret = bpf_hashmap_lookup__load(ctx.skel);
|
||||
if (ret) {
|
||||
bpf_hashmap_lookup__destroy(ctx.skel);
|
||||
fprintf(stderr, "failed to load map: %s", strerror(-ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* fill in the hash_map */
|
||||
map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
|
||||
for (u64 i = 0; i < args.nr_entries; i++) {
|
||||
patch_key(i, ctx.skel->bss->key);
|
||||
bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY);
|
||||
}
|
||||
|
||||
link = bpf_program__attach(ctx.skel->progs.benchmark);
|
||||
if (!link) {
|
||||
fprintf(stderr, "failed to attach program!\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static inline double events_from_time(u64 time)
|
||||
{
|
||||
if (time)
|
||||
return args.nr_loops * 1000000000llu / time / 1000000.0L;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time)
|
||||
{
|
||||
int i, n = 0;
|
||||
|
||||
*events_mean = 0;
|
||||
*events_stddev = 0;
|
||||
*mean_time = 0;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (!times[i])
|
||||
break;
|
||||
*mean_time += times[i];
|
||||
*events_mean += events_from_time(times[i]);
|
||||
n += 1;
|
||||
}
|
||||
if (!n)
|
||||
return 0;
|
||||
|
||||
*mean_time /= n;
|
||||
*events_mean /= n;
|
||||
|
||||
if (n > 1) {
|
||||
for (i = 0; i < n; i++) {
|
||||
double events_i = *events_mean - events_from_time(times[i]);
|
||||
*events_stddev += events_i * events_i / (n - 1);
|
||||
}
|
||||
*events_stddev = sqrt(*events_stddev);
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static void hashmap_report_final(struct bench_res res[], int res_cnt)
|
||||
{
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
double events_mean, events_stddev;
|
||||
u64 mean_time;
|
||||
int i, n;
|
||||
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean,
|
||||
&events_stddev, &mean_time);
|
||||
if (n == 0)
|
||||
continue;
|
||||
|
||||
if (env.quiet) {
|
||||
/* we expect only one cpu to be present */
|
||||
if (env.affinity)
|
||||
printf("%.3lf\n", events_mean);
|
||||
else
|
||||
printf("cpu%02d %.3lf\n", i, events_mean);
|
||||
} else {
|
||||
printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec"
|
||||
" (approximated from %d samples of ~%lums)\n",
|
||||
i, events_mean, 2*events_stddev,
|
||||
n, mean_time / 1000000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const struct bench bench_bpf_hashmap_lookup = {
|
||||
.name = "bpf-hashmap-lookup",
|
||||
.argp = &bench_hashmap_lookup_argp,
|
||||
.validate = validate,
|
||||
.setup = setup,
|
||||
.producer_thread = producer,
|
||||
.consumer_thread = consumer,
|
||||
.measure = measure,
|
||||
.report_progress = NULL,
|
||||
.report_final = hashmap_report_final,
|
||||
};
|
@ -95,6 +95,7 @@ static void setup(void)
|
||||
|
||||
const struct bench bench_bpf_loop = {
|
||||
.name = "bpf-loop",
|
||||
.argp = &bench_bpf_loop_argp,
|
||||
.validate = validate,
|
||||
.setup = setup,
|
||||
.producer_thread = producer,
|
||||
|
@ -255,6 +255,7 @@ static void *producer(void *input)
|
||||
*/
|
||||
const struct bench bench_local_storage_cache_seq_get = {
|
||||
.name = "local-storage-cache-seq-get",
|
||||
.argp = &bench_local_storage_argp,
|
||||
.validate = validate,
|
||||
.setup = local_storage_cache_get_setup,
|
||||
.producer_thread = producer,
|
||||
@ -266,6 +267,7 @@ const struct bench bench_local_storage_cache_seq_get = {
|
||||
|
||||
const struct bench bench_local_storage_cache_interleaved_get = {
|
||||
.name = "local-storage-cache-int-get",
|
||||
.argp = &bench_local_storage_argp,
|
||||
.validate = validate,
|
||||
.setup = local_storage_cache_get_interleaved_setup,
|
||||
.producer_thread = producer,
|
||||
@ -277,6 +279,7 @@ const struct bench bench_local_storage_cache_interleaved_get = {
|
||||
|
||||
const struct bench bench_local_storage_cache_hashmap_control = {
|
||||
.name = "local-storage-cache-hashmap-control",
|
||||
.argp = &bench_local_storage_argp,
|
||||
.validate = validate,
|
||||
.setup = hashmap_setup,
|
||||
.producer_thread = producer,
|
||||
|
@ -12,17 +12,14 @@
|
||||
static struct {
|
||||
__u32 nr_procs;
|
||||
__u32 kthread_pid;
|
||||
bool quiet;
|
||||
} args = {
|
||||
.nr_procs = 1000,
|
||||
.kthread_pid = 0,
|
||||
.quiet = false,
|
||||
};
|
||||
|
||||
enum {
|
||||
ARG_NR_PROCS = 7000,
|
||||
ARG_KTHREAD_PID = 7001,
|
||||
ARG_QUIET = 7002,
|
||||
};
|
||||
|
||||
static const struct argp_option opts[] = {
|
||||
@ -30,8 +27,6 @@ static const struct argp_option opts[] = {
|
||||
"Set number of user processes to spin up"},
|
||||
{ "kthread_pid", ARG_KTHREAD_PID, "PID", 0,
|
||||
"Pid of rcu_tasks_trace kthread for ticks tracking"},
|
||||
{ "quiet", ARG_QUIET, "{0,1}", 0,
|
||||
"If true, don't report progress"},
|
||||
{},
|
||||
};
|
||||
|
||||
@ -56,14 +51,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
|
||||
}
|
||||
args.kthread_pid = ret;
|
||||
break;
|
||||
case ARG_QUIET:
|
||||
ret = strtol(arg, NULL, 10);
|
||||
if (ret < 0 || ret > 1) {
|
||||
fprintf(stderr, "invalid quiet %ld\n", ret);
|
||||
argp_usage(state);
|
||||
}
|
||||
args.quiet = ret;
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
@ -230,7 +217,7 @@ static void report_progress(int iter, struct bench_res *res, long delta_ns)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (args.quiet)
|
||||
if (env.quiet)
|
||||
return;
|
||||
|
||||
printf("Iter %d\t avg tasks_trace grace period latency\t%lf ns\n",
|
||||
@ -271,6 +258,7 @@ static void report_final(struct bench_res res[], int res_cnt)
|
||||
*/
|
||||
const struct bench bench_local_storage_tasks_trace = {
|
||||
.name = "local-storage-tasks-trace",
|
||||
.argp = &bench_local_storage_rcu_tasks_trace_argp,
|
||||
.validate = validate,
|
||||
.setup = local_storage_tasks_trace_setup,
|
||||
.producer_thread = producer,
|
||||
|
@ -518,6 +518,7 @@ static void *perfbuf_custom_consumer(void *input)
|
||||
|
||||
const struct bench bench_rb_libbpf = {
|
||||
.name = "rb-libbpf",
|
||||
.argp = &bench_ringbufs_argp,
|
||||
.validate = bufs_validate,
|
||||
.setup = ringbuf_libbpf_setup,
|
||||
.producer_thread = bufs_sample_producer,
|
||||
@ -529,6 +530,7 @@ const struct bench bench_rb_libbpf = {
|
||||
|
||||
const struct bench bench_rb_custom = {
|
||||
.name = "rb-custom",
|
||||
.argp = &bench_ringbufs_argp,
|
||||
.validate = bufs_validate,
|
||||
.setup = ringbuf_custom_setup,
|
||||
.producer_thread = bufs_sample_producer,
|
||||
@ -540,6 +542,7 @@ const struct bench bench_rb_custom = {
|
||||
|
||||
const struct bench bench_pb_libbpf = {
|
||||
.name = "pb-libbpf",
|
||||
.argp = &bench_ringbufs_argp,
|
||||
.validate = bufs_validate,
|
||||
.setup = perfbuf_libbpf_setup,
|
||||
.producer_thread = bufs_sample_producer,
|
||||
@ -551,6 +554,7 @@ const struct bench bench_pb_libbpf = {
|
||||
|
||||
const struct bench bench_pb_custom = {
|
||||
.name = "pb-custom",
|
||||
.argp = &bench_ringbufs_argp,
|
||||
.validate = bufs_validate,
|
||||
.setup = perfbuf_libbpf_setup,
|
||||
.producer_thread = bufs_sample_producer,
|
||||
|
@ -140,6 +140,7 @@ static void strncmp_measure(struct bench_res *res)
|
||||
|
||||
const struct bench bench_strncmp_no_helper = {
|
||||
.name = "strncmp-no-helper",
|
||||
.argp = &bench_strncmp_argp,
|
||||
.validate = strncmp_validate,
|
||||
.setup = strncmp_no_helper_setup,
|
||||
.producer_thread = strncmp_producer,
|
||||
@ -151,6 +152,7 @@ const struct bench bench_strncmp_no_helper = {
|
||||
|
||||
const struct bench bench_strncmp_helper = {
|
||||
.name = "strncmp-helper",
|
||||
.argp = &bench_strncmp_argp,
|
||||
.validate = strncmp_validate,
|
||||
.setup = strncmp_helper_setup,
|
||||
.producer_thread = strncmp_producer,
|
||||
|
@ -6,6 +6,6 @@ source ./benchs/run_common.sh
|
||||
set -eufo pipefail
|
||||
|
||||
nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1`
|
||||
summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-ful-update)
|
||||
summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-full-update)
|
||||
printf "$summary"
|
||||
printf "\n"
|
||||
|
@ -8,4 +8,4 @@ if [ -z $kthread_pid ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet 1 local-storage-tasks-trace
|
||||
./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet local-storage-tasks-trace
|
||||
|
@ -65,4 +65,28 @@ extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ks
|
||||
*/
|
||||
extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
|
||||
|
||||
/* Description
|
||||
* Remove 'node' from rbtree with root 'root'
|
||||
* Returns
|
||||
* Pointer to the removed node, or NULL if 'root' didn't contain 'node'
|
||||
*/
|
||||
extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
|
||||
struct bpf_rb_node *node) __ksym;
|
||||
|
||||
/* Description
|
||||
* Add 'node' to rbtree with root 'root' using comparator 'less'
|
||||
* Returns
|
||||
* Nothing
|
||||
*/
|
||||
extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
|
||||
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym;
|
||||
|
||||
/* Description
|
||||
* Return the first (leftmost) node in input tree
|
||||
* Returns
|
||||
* Pointer to the node, which is _not_ removed from the tree. If the tree
|
||||
* contains no nodes, returns NULL.
|
||||
*/
|
||||
extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
|
||||
|
||||
#endif
|
||||
|
@ -18,7 +18,7 @@ static __u32 get_map_id_from_fd(int map_fd)
|
||||
uint32_t info_len = sizeof(map_info);
|
||||
int ret;
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
ret = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
CHECK(ret < 0, "Finding map info failed", "error:%s\n",
|
||||
strerror(errno));
|
||||
|
||||
|
@ -195,8 +195,8 @@ static void check_bpf_link_info(const struct bpf_program *prog)
|
||||
return;
|
||||
|
||||
info_len = sizeof(info);
|
||||
err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
|
||||
ASSERT_OK(err, "bpf_obj_get_info_by_fd");
|
||||
err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
|
||||
ASSERT_OK(err, "bpf_link_get_info_by_fd");
|
||||
ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
@ -684,13 +684,13 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
|
||||
|
||||
/* setup filtering map_id in bpf program */
|
||||
map_info_len = sizeof(map_info);
|
||||
err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
|
||||
err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
|
||||
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
|
||||
strerror(errno)))
|
||||
goto free_map2;
|
||||
skel->bss->map1_id = map_info.id;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
|
||||
err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
|
||||
if (CHECK(err, "get_map_info", "get map info failed: %s\n",
|
||||
strerror(errno)))
|
||||
goto free_map2;
|
||||
|
@ -44,7 +44,7 @@ void serial_test_bpf_obj_id(void)
|
||||
CHECK(err >= 0 || errno != ENOENT,
|
||||
"get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno);
|
||||
|
||||
/* Check bpf_obj_get_info_by_fd() */
|
||||
/* Check bpf_map_get_info_by_fd() */
|
||||
bzero(zeros, sizeof(zeros));
|
||||
for (i = 0; i < nr_iters; i++) {
|
||||
now = time(NULL);
|
||||
@ -79,7 +79,7 @@ void serial_test_bpf_obj_id(void)
|
||||
/* Check getting map info */
|
||||
info_len = sizeof(struct bpf_map_info) * 2;
|
||||
bzero(&map_infos[i], info_len);
|
||||
err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
|
||||
err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i],
|
||||
&info_len);
|
||||
if (CHECK(err ||
|
||||
map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
|
||||
@ -118,8 +118,8 @@ void serial_test_bpf_obj_id(void)
|
||||
err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
|
||||
if (CHECK_FAIL(err))
|
||||
goto done;
|
||||
err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
|
||||
&info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i],
|
||||
&info_len);
|
||||
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
|
||||
+ (prog_infos[i].load_time / nsec_per_sec);
|
||||
if (CHECK(err ||
|
||||
@ -161,8 +161,8 @@ void serial_test_bpf_obj_id(void)
|
||||
bzero(&link_infos[i], info_len);
|
||||
link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
|
||||
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
|
||||
err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
|
||||
&link_infos[i], &info_len);
|
||||
err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]),
|
||||
&link_infos[i], &info_len);
|
||||
if (CHECK(err ||
|
||||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
|
||||
link_infos[i].prog_id != prog_infos[i].id ||
|
||||
@ -217,7 +217,7 @@ void serial_test_bpf_obj_id(void)
|
||||
* prog_info.map_ids = NULL
|
||||
*/
|
||||
prog_info.nr_map_ids = 1;
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
if (CHECK(!err || errno != EFAULT,
|
||||
"get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
|
||||
err, errno, EFAULT))
|
||||
@ -228,7 +228,7 @@ void serial_test_bpf_obj_id(void)
|
||||
saved_map_id = *(int *)((long)prog_infos[i].map_ids);
|
||||
prog_info.map_ids = prog_infos[i].map_ids;
|
||||
prog_info.nr_map_ids = 2;
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len);
|
||||
prog_infos[i].jited_prog_insns = 0;
|
||||
prog_infos[i].xlated_prog_insns = 0;
|
||||
CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
|
||||
@ -277,7 +277,7 @@ void serial_test_bpf_obj_id(void)
|
||||
if (CHECK_FAIL(err))
|
||||
goto done;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
CHECK(err || info_len != sizeof(struct bpf_map_info) ||
|
||||
memcmp(&map_info, &map_infos[i], info_len) ||
|
||||
array_value != array_magic_value,
|
||||
@ -322,7 +322,7 @@ void serial_test_bpf_obj_id(void)
|
||||
|
||||
nr_id_found++;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(link_fd, &link_info, &info_len);
|
||||
err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len);
|
||||
cmp_res = memcmp(&link_info, &link_infos[i],
|
||||
offsetof(struct bpf_link_info, raw_tracepoint));
|
||||
CHECK(err || info_len != sizeof(link_info) || cmp_res,
|
||||
|
@ -4422,7 +4422,7 @@ static int test_big_btf_info(unsigned int test_num)
|
||||
info->btf = ptr_to_u64(user_btf);
|
||||
info->btf_size = raw_btf_size;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
|
||||
if (CHECK(!err, "!err")) {
|
||||
err = -1;
|
||||
goto done;
|
||||
@ -4435,7 +4435,7 @@ static int test_big_btf_info(unsigned int test_num)
|
||||
* to userspace.
|
||||
*/
|
||||
info_garbage.garbage = 0;
|
||||
err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd, info, &info_len);
|
||||
if (CHECK(err || info_len != sizeof(*info),
|
||||
"err:%d errno:%d info_len:%u sizeof(*info):%zu",
|
||||
err, errno, info_len, sizeof(*info))) {
|
||||
@ -4499,7 +4499,7 @@ static int test_btf_id(unsigned int test_num)
|
||||
|
||||
/* Test BPF_OBJ_GET_INFO_BY_ID on btf_id */
|
||||
info_len = sizeof(info[0]);
|
||||
err = bpf_obj_get_info_by_fd(btf_fd[0], &info[0], &info_len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd[0], &info[0], &info_len);
|
||||
if (CHECK(err, "errno:%d", errno)) {
|
||||
err = -1;
|
||||
goto done;
|
||||
@ -4512,7 +4512,7 @@ static int test_btf_id(unsigned int test_num)
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
err = bpf_obj_get_info_by_fd(btf_fd[1], &info[1], &info_len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd[1], &info[1], &info_len);
|
||||
if (CHECK(err || info[0].id != info[1].id ||
|
||||
info[0].btf_size != info[1].btf_size ||
|
||||
(ret = memcmp(user_btf[0], user_btf[1], info[0].btf_size)),
|
||||
@ -4535,7 +4535,7 @@ static int test_btf_id(unsigned int test_num)
|
||||
}
|
||||
|
||||
info_len = sizeof(map_info);
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
if (CHECK(err || map_info.btf_id != info[0].id ||
|
||||
map_info.btf_key_type_id != 1 || map_info.btf_value_type_id != 2,
|
||||
"err:%d errno:%d info.id:%u btf_id:%u btf_key_type_id:%u btf_value_type_id:%u",
|
||||
@ -4638,7 +4638,7 @@ static void do_test_get_info(unsigned int test_num)
|
||||
info.btf_size = user_btf_size;
|
||||
|
||||
ret = 0;
|
||||
err = bpf_obj_get_info_by_fd(btf_fd, &info, &info_len);
|
||||
err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
|
||||
if (CHECK(err || !info.id || info_len != sizeof(info) ||
|
||||
info.btf_size != raw_btf_size ||
|
||||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)),
|
||||
@ -4755,7 +4755,7 @@ static void do_test_file(unsigned int test_num)
|
||||
|
||||
/* get necessary program info */
|
||||
info_len = sizeof(struct bpf_prog_info);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
|
||||
if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
|
||||
fprintf(stderr, "%s\n", btf_log_buf);
|
||||
@ -4787,7 +4787,7 @@ static void do_test_file(unsigned int test_num)
|
||||
info.func_info_rec_size = rec_size;
|
||||
info.func_info = ptr_to_u64(func_info);
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
|
||||
if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
|
||||
fprintf(stderr, "%s\n", btf_log_buf);
|
||||
@ -6405,7 +6405,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
|
||||
|
||||
/* get necessary lens */
|
||||
info_len = sizeof(struct bpf_prog_info);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) {
|
||||
fprintf(stderr, "%s\n", btf_log_buf);
|
||||
return -1;
|
||||
@ -6435,7 +6435,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
|
||||
info.nr_func_info = nr_func_info;
|
||||
info.func_info_rec_size = rec_size;
|
||||
info.func_info = ptr_to_u64(func_info);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) {
|
||||
fprintf(stderr, "%s\n", btf_log_buf);
|
||||
err = -1;
|
||||
@ -6499,7 +6499,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
|
||||
nr_jited_func_lens = nr_jited_ksyms;
|
||||
|
||||
info_len = sizeof(struct bpf_prog_info);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (CHECK(err < 0, "err:%d errno:%d", err, errno)) {
|
||||
err = -1;
|
||||
goto done;
|
||||
@ -6573,7 +6573,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
|
||||
info.jited_func_lens = ptr_to_u64(jited_func_lens);
|
||||
}
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
|
||||
/*
|
||||
* Only recheck the info.*line_info* fields.
|
||||
|
@ -14,7 +14,7 @@ static __u32 bpf_map_id(struct bpf_map *map)
|
||||
int err;
|
||||
|
||||
memset(&info, 0, info_len);
|
||||
err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
|
||||
err = bpf_map_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
|
||||
if (err)
|
||||
return 0;
|
||||
return info.id;
|
||||
|
@ -8,9 +8,6 @@
|
||||
#include "cgrp_kfunc_failure.skel.h"
|
||||
#include "cgrp_kfunc_success.skel.h"
|
||||
|
||||
static size_t log_buf_sz = 1 << 20; /* 1 MB */
|
||||
static char obj_log_buf[1048576];
|
||||
|
||||
static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void)
|
||||
{
|
||||
struct cgrp_kfunc_success *skel;
|
||||
@ -89,65 +86,6 @@ static const char * const success_tests[] = {
|
||||
"test_cgrp_get_ancestors",
|
||||
};
|
||||
|
||||
static struct {
|
||||
const char *prog_name;
|
||||
const char *expected_err_msg;
|
||||
} failure_tests[] = {
|
||||
{"cgrp_kfunc_acquire_untrusted", "Possibly NULL pointer passed to trusted arg0"},
|
||||
{"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"},
|
||||
{"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
|
||||
{"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
|
||||
{"cgrp_kfunc_acquire_null", "Possibly NULL pointer passed to trusted arg0"},
|
||||
{"cgrp_kfunc_acquire_unreleased", "Unreleased reference"},
|
||||
{"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
|
||||
{"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
|
||||
{"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"},
|
||||
{"cgrp_kfunc_xchg_unreleased", "Unreleased reference"},
|
||||
{"cgrp_kfunc_get_unreleased", "Unreleased reference"},
|
||||
{"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"},
|
||||
{"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"},
|
||||
{"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
|
||||
{"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"},
|
||||
};
|
||||
|
||||
static void verify_fail(const char *prog_name, const char *expected_err_msg)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_object_open_opts, opts);
|
||||
struct cgrp_kfunc_failure *skel;
|
||||
int err, i;
|
||||
|
||||
opts.kernel_log_buf = obj_log_buf;
|
||||
opts.kernel_log_size = log_buf_sz;
|
||||
opts.kernel_log_level = 1;
|
||||
|
||||
skel = cgrp_kfunc_failure__open_opts(&opts);
|
||||
if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts"))
|
||||
goto cleanup;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
|
||||
struct bpf_program *prog;
|
||||
const char *curr_name = failure_tests[i].prog_name;
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, curr_name);
|
||||
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
|
||||
goto cleanup;
|
||||
|
||||
bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name));
|
||||
}
|
||||
|
||||
err = cgrp_kfunc_failure__load(skel);
|
||||
if (!ASSERT_ERR(err, "unexpected load success"))
|
||||
goto cleanup;
|
||||
|
||||
if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
|
||||
fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
|
||||
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
cgrp_kfunc_failure__destroy(skel);
|
||||
}
|
||||
|
||||
void test_cgrp_kfunc(void)
|
||||
{
|
||||
int i, err;
|
||||
@ -163,12 +101,7 @@ void test_cgrp_kfunc(void)
|
||||
run_success_test(success_tests[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
|
||||
if (!test__start_subtest(failure_tests[i].prog_name))
|
||||
continue;
|
||||
|
||||
verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
|
||||
}
|
||||
RUN_TESTS(cgrp_kfunc_failure);
|
||||
|
||||
cleanup:
|
||||
cleanup_cgroup_environment();
|
||||
|
@ -59,7 +59,7 @@ static void test_check_mtu_xdp_attach(void)
|
||||
|
||||
memset(&link_info, 0, sizeof(link_info));
|
||||
fd = bpf_link__fd(link);
|
||||
err = bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
|
||||
err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len);
|
||||
if (CHECK(err, "link_info", "failed: %d\n", err))
|
||||
goto out;
|
||||
|
||||
|
@ -5,14 +5,10 @@
|
||||
#include "dynptr_fail.skel.h"
|
||||
#include "dynptr_success.skel.h"
|
||||
|
||||
static struct {
|
||||
const char *prog_name;
|
||||
const char *expected_err_msg;
|
||||
} dynptr_tests[] = {
|
||||
/* success cases */
|
||||
{"test_read_write", NULL},
|
||||
{"test_data_slice", NULL},
|
||||
{"test_ringbuf", NULL},
|
||||
static const char * const success_tests[] = {
|
||||
"test_read_write",
|
||||
"test_data_slice",
|
||||
"test_ringbuf",
|
||||
};
|
||||
|
||||
static void verify_success(const char *prog_name)
|
||||
@ -53,11 +49,11 @@ void test_dynptr(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dynptr_tests); i++) {
|
||||
if (!test__start_subtest(dynptr_tests[i].prog_name))
|
||||
for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
|
||||
if (!test__start_subtest(success_tests[i]))
|
||||
continue;
|
||||
|
||||
verify_success(dynptr_tests[i].prog_name);
|
||||
verify_success(success_tests[i]);
|
||||
}
|
||||
|
||||
RUN_TESTS(dynptr_fail);
|
||||
|
@ -28,7 +28,7 @@ void test_enable_stats(void)
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.test_enable_stats);
|
||||
memset(&info, 0, info_len);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (CHECK(err, "get_prog_info",
|
||||
"failed to get bpf_prog_info for fd %d\n", prog_fd))
|
||||
goto cleanup;
|
||||
|
@ -79,7 +79,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||
return;
|
||||
|
||||
info_len = sizeof(prog_info);
|
||||
err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(tgt_fd, &prog_info, &info_len);
|
||||
if (!ASSERT_OK(err, "tgt_fd_get_info"))
|
||||
goto close_prog;
|
||||
|
||||
@ -136,8 +136,8 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||
|
||||
info_len = sizeof(link_info);
|
||||
memset(&link_info, 0, sizeof(link_info));
|
||||
err = bpf_obj_get_info_by_fd(bpf_link__fd(link[i]),
|
||||
&link_info, &info_len);
|
||||
err = bpf_link_get_info_by_fd(bpf_link__fd(link[i]),
|
||||
&link_info, &info_len);
|
||||
ASSERT_OK(err, "link_fd_get_info");
|
||||
ASSERT_EQ(link_info.tracing.attach_type,
|
||||
bpf_program__expected_attach_type(prog[i]),
|
||||
@ -417,7 +417,7 @@ static int find_prog_btf_id(const char *name, __u32 attach_prog_fd)
|
||||
struct btf *btf;
|
||||
int ret;
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
|
||||
ret = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -483,12 +483,12 @@ static void test_fentry_to_cgroup_bpf(void)
|
||||
if (!ASSERT_GE(fentry_fd, 0, "load_fentry"))
|
||||
goto cleanup;
|
||||
|
||||
/* Make sure bpf_obj_get_info_by_fd works correctly when attaching
|
||||
/* Make sure bpf_prog_get_info_by_fd works correctly when attaching
|
||||
* to another BPF program.
|
||||
*/
|
||||
|
||||
ASSERT_OK(bpf_obj_get_info_by_fd(fentry_fd, &info, &info_len),
|
||||
"bpf_obj_get_info_by_fd");
|
||||
ASSERT_OK(bpf_prog_get_info_by_fd(fentry_fd, &info, &info_len),
|
||||
"bpf_prog_get_info_by_fd");
|
||||
|
||||
ASSERT_EQ(info.btf_id, 0, "info.btf_id");
|
||||
ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id");
|
||||
|
187
tools/testing/selftests/bpf/prog_tests/fib_lookup.c
Normal file
187
tools/testing/selftests/bpf/prog_tests/fib_lookup.c
Normal file
@ -0,0 +1,187 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <net/if.h>
|
||||
|
||||
#include "test_progs.h"
|
||||
#include "network_helpers.h"
|
||||
#include "fib_lookup.skel.h"
|
||||
|
||||
#define SYS(fmt, ...) \
|
||||
({ \
|
||||
char cmd[1024]; \
|
||||
snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
|
||||
if (!ASSERT_OK(system(cmd), cmd)) \
|
||||
goto fail; \
|
||||
})
|
||||
|
||||
#define NS_TEST "fib_lookup_ns"
|
||||
#define IPV6_IFACE_ADDR "face::face"
|
||||
#define IPV6_NUD_FAILED_ADDR "face::1"
|
||||
#define IPV6_NUD_STALE_ADDR "face::2"
|
||||
#define IPV4_IFACE_ADDR "10.0.0.254"
|
||||
#define IPV4_NUD_FAILED_ADDR "10.0.0.1"
|
||||
#define IPV4_NUD_STALE_ADDR "10.0.0.2"
|
||||
#define DMAC "11:11:11:11:11:11"
|
||||
#define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, }
|
||||
|
||||
struct fib_lookup_test {
|
||||
const char *desc;
|
||||
const char *daddr;
|
||||
int expected_ret;
|
||||
int lookup_flags;
|
||||
__u8 dmac[6];
|
||||
};
|
||||
|
||||
static const struct fib_lookup_test tests[] = {
|
||||
{ .desc = "IPv6 failed neigh",
|
||||
.daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
|
||||
{ .desc = "IPv6 stale neigh",
|
||||
.daddr = IPV6_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
|
||||
.dmac = DMAC_INIT, },
|
||||
{ .desc = "IPv6 skip neigh",
|
||||
.daddr = IPV6_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
|
||||
.lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
|
||||
{ .desc = "IPv4 failed neigh",
|
||||
.daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_NO_NEIGH, },
|
||||
{ .desc = "IPv4 stale neigh",
|
||||
.daddr = IPV4_NUD_STALE_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
|
||||
.dmac = DMAC_INIT, },
|
||||
{ .desc = "IPv4 skip neigh",
|
||||
.daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS,
|
||||
.lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, },
|
||||
};
|
||||
|
||||
static int ifindex;
|
||||
|
||||
static int setup_netns(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
SYS("ip link add veth1 type veth peer name veth2");
|
||||
SYS("ip link set dev veth1 up");
|
||||
|
||||
SYS("ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC);
|
||||
|
||||
SYS("ip addr add %s/24 dev veth1 nodad", IPV4_IFACE_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR);
|
||||
SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC);
|
||||
|
||||
err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1");
|
||||
if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)"))
|
||||
goto fail;
|
||||
|
||||
err = write_sysctl("/proc/sys/net/ipv6/conf/veth1/forwarding", "1");
|
||||
if (!ASSERT_OK(err, "write_sysctl(net.ipv6.conf.veth1.forwarding)"))
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
memset(params, 0, sizeof(*params));
|
||||
|
||||
params->l4_protocol = IPPROTO_TCP;
|
||||
params->ifindex = ifindex;
|
||||
|
||||
if (inet_pton(AF_INET6, daddr, params->ipv6_dst) == 1) {
|
||||
params->family = AF_INET6;
|
||||
ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src);
|
||||
if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)"))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = inet_pton(AF_INET, daddr, ¶ms->ipv4_dst);
|
||||
if (!ASSERT_EQ(ret, 1, "convert IP[46] address"))
|
||||
return -1;
|
||||
params->family = AF_INET;
|
||||
ret = inet_pton(AF_INET, IPV4_IFACE_ADDR, ¶ms->ipv4_src);
|
||||
if (!ASSERT_EQ(ret, 1, "inet_pton(IPV4_IFACE_ADDR)"))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mac_str(char *b, const __u8 *mac)
|
||||
{
|
||||
sprintf(b, "%02X:%02X:%02X:%02X:%02X:%02X",
|
||||
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
|
||||
}
|
||||
|
||||
void test_fib_lookup(void)
|
||||
{
|
||||
struct bpf_fib_lookup *fib_params;
|
||||
struct nstoken *nstoken = NULL;
|
||||
struct __sk_buff skb = { };
|
||||
struct fib_lookup *skel;
|
||||
int prog_fd, err, ret, i;
|
||||
|
||||
/* The test does not use the skb->data, so
|
||||
* use pkt_v6 for both v6 and v4 test.
|
||||
*/
|
||||
LIBBPF_OPTS(bpf_test_run_opts, run_opts,
|
||||
.data_in = &pkt_v6,
|
||||
.data_size_in = sizeof(pkt_v6),
|
||||
.ctx_in = &skb,
|
||||
.ctx_size_in = sizeof(skb),
|
||||
);
|
||||
|
||||
skel = fib_lookup__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
|
||||
return;
|
||||
prog_fd = bpf_program__fd(skel->progs.fib_lookup);
|
||||
|
||||
SYS("ip netns add %s", NS_TEST);
|
||||
|
||||
nstoken = open_netns(NS_TEST);
|
||||
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
|
||||
goto fail;
|
||||
|
||||
if (setup_netns())
|
||||
goto fail;
|
||||
|
||||
ifindex = if_nametoindex("veth1");
|
||||
skb.ifindex = ifindex;
|
||||
fib_params = &skel->bss->fib_params;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
printf("Testing %s\n", tests[i].desc);
|
||||
|
||||
if (set_lookup_params(fib_params, tests[i].daddr))
|
||||
continue;
|
||||
skel->bss->fib_lookup_ret = -1;
|
||||
skel->bss->lookup_flags = BPF_FIB_LOOKUP_OUTPUT |
|
||||
tests[i].lookup_flags;
|
||||
|
||||
err = bpf_prog_test_run_opts(prog_fd, &run_opts);
|
||||
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
|
||||
continue;
|
||||
|
||||
ASSERT_EQ(tests[i].expected_ret, skel->bss->fib_lookup_ret,
|
||||
"fib_lookup_ret");
|
||||
|
||||
ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac));
|
||||
if (!ASSERT_EQ(ret, 0, "dmac not match")) {
|
||||
char expected[18], actual[18];
|
||||
|
||||
mac_str(expected, tests[i].dmac);
|
||||
mac_str(actual, fib_params->dmac);
|
||||
printf("dmac expected %s actual %s\n", expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
if (nstoken)
|
||||
close_netns(nstoken);
|
||||
system("ip netns del " NS_TEST " &> /dev/null");
|
||||
fib_lookup__destroy(skel);
|
||||
}
|
@ -60,9 +60,9 @@ static __u32 query_prog_id(int prog)
|
||||
__u32 info_len = sizeof(info);
|
||||
int err;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog, &info, &info_len);
|
||||
if (CHECK_FAIL(err || info_len != sizeof(info))) {
|
||||
perror("bpf_obj_get_info_by_fd");
|
||||
perror("bpf_prog_get_info_by_fd");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -497,7 +497,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
|
||||
}
|
||||
|
||||
info_len = sizeof(info);
|
||||
err = bpf_obj_get_info_by_fd(link, &info, &info_len);
|
||||
err = bpf_link_get_info_by_fd(link, &info, &info_len);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_obj_get_info");
|
||||
goto out_unlink;
|
||||
@ -521,7 +521,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
|
||||
|
||||
link_id = info.id;
|
||||
info_len = sizeof(info);
|
||||
err = bpf_obj_get_info_by_fd(link, &info, &info_len);
|
||||
err = bpf_link_get_info_by_fd(link, &info, &info_len);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_obj_get_info");
|
||||
goto out_unlink;
|
||||
@ -546,7 +546,7 @@ static void test_link_get_info(int netns, int prog1, int prog2)
|
||||
netns = -1;
|
||||
|
||||
info_len = sizeof(info);
|
||||
err = bpf_obj_get_info_by_fd(link, &info, &info_len);
|
||||
err = bpf_link_get_info_by_fd(link, &info, &info_len);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_obj_get_info");
|
||||
goto out_unlink;
|
||||
|
101
tools/testing/selftests/bpf/prog_tests/htab_reuse.c
Normal file
101
tools/testing/selftests/bpf/prog_tests/htab_reuse.c
Normal file
@ -0,0 +1,101 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
|
||||
#define _GNU_SOURCE
|
||||
#include <sched.h>
|
||||
#include <stdbool.h>
|
||||
#include <test_progs.h>
|
||||
#include "htab_reuse.skel.h"
|
||||
|
||||
struct htab_op_ctx {
|
||||
int fd;
|
||||
int loop;
|
||||
bool stop;
|
||||
};
|
||||
|
||||
struct htab_val {
|
||||
unsigned int lock;
|
||||
unsigned int data;
|
||||
};
|
||||
|
||||
static void *htab_lookup_fn(void *arg)
|
||||
{
|
||||
struct htab_op_ctx *ctx = arg;
|
||||
int i = 0;
|
||||
|
||||
while (i++ < ctx->loop && !ctx->stop) {
|
||||
struct htab_val value;
|
||||
unsigned int key;
|
||||
|
||||
/* Use BPF_F_LOCK to use spin-lock in map value. */
|
||||
key = 7;
|
||||
bpf_map_lookup_elem_flags(ctx->fd, &key, &value, BPF_F_LOCK);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *htab_update_fn(void *arg)
|
||||
{
|
||||
struct htab_op_ctx *ctx = arg;
|
||||
int i = 0;
|
||||
|
||||
while (i++ < ctx->loop && !ctx->stop) {
|
||||
struct htab_val value;
|
||||
unsigned int key;
|
||||
|
||||
key = 7;
|
||||
value.lock = 0;
|
||||
value.data = key;
|
||||
bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
|
||||
bpf_map_delete_elem(ctx->fd, &key);
|
||||
|
||||
key = 24;
|
||||
value.lock = 0;
|
||||
value.data = key;
|
||||
bpf_map_update_elem(ctx->fd, &key, &value, BPF_F_LOCK);
|
||||
bpf_map_delete_elem(ctx->fd, &key);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void test_htab_reuse(void)
|
||||
{
|
||||
unsigned int i, wr_nr = 1, rd_nr = 4;
|
||||
pthread_t tids[wr_nr + rd_nr];
|
||||
struct htab_reuse *skel;
|
||||
struct htab_op_ctx ctx;
|
||||
int err;
|
||||
|
||||
skel = htab_reuse__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "htab_reuse__open_and_load"))
|
||||
return;
|
||||
|
||||
ctx.fd = bpf_map__fd(skel->maps.htab);
|
||||
ctx.loop = 500;
|
||||
ctx.stop = false;
|
||||
|
||||
memset(tids, 0, sizeof(tids));
|
||||
for (i = 0; i < wr_nr; i++) {
|
||||
err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx);
|
||||
if (!ASSERT_OK(err, "pthread_create")) {
|
||||
ctx.stop = true;
|
||||
goto reap;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < rd_nr; i++) {
|
||||
err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx);
|
||||
if (!ASSERT_OK(err, "pthread_create")) {
|
||||
ctx.stop = true;
|
||||
goto reap;
|
||||
}
|
||||
}
|
||||
|
||||
reap:
|
||||
for (i = 0; i < wr_nr + rd_nr; i++) {
|
||||
if (!tids[i])
|
||||
continue;
|
||||
pthread_join(tids[i], NULL);
|
||||
}
|
||||
htab_reuse__destroy(skel);
|
||||
}
|
@ -10,17 +10,11 @@
|
||||
#include <test_progs.h>
|
||||
#include "test_kfunc_dynptr_param.skel.h"
|
||||
|
||||
static size_t log_buf_sz = 1048576; /* 1 MB */
|
||||
static char obj_log_buf[1048576];
|
||||
|
||||
static struct {
|
||||
const char *prog_name;
|
||||
const char *expected_verifier_err_msg;
|
||||
int expected_runtime_err;
|
||||
} kfunc_dynptr_tests[] = {
|
||||
{"not_valid_dynptr", "cannot pass in dynptr at an offset=-8", 0},
|
||||
{"not_ptr_to_stack", "arg#0 expected pointer to stack or dynptr_ptr", 0},
|
||||
{"dynptr_data_null", NULL, -EBADMSG},
|
||||
{"dynptr_data_null", -EBADMSG},
|
||||
};
|
||||
|
||||
static bool kfunc_not_supported;
|
||||
@ -38,29 +32,15 @@ static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void verify_fail(const char *prog_name, const char *expected_err_msg)
|
||||
static bool has_pkcs7_kfunc_support(void)
|
||||
{
|
||||
struct test_kfunc_dynptr_param *skel;
|
||||
LIBBPF_OPTS(bpf_object_open_opts, opts);
|
||||
libbpf_print_fn_t old_print_cb;
|
||||
struct bpf_program *prog;
|
||||
int err;
|
||||
|
||||
opts.kernel_log_buf = obj_log_buf;
|
||||
opts.kernel_log_size = log_buf_sz;
|
||||
opts.kernel_log_level = 1;
|
||||
|
||||
skel = test_kfunc_dynptr_param__open_opts(&opts);
|
||||
if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open_opts"))
|
||||
goto cleanup;
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
|
||||
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
|
||||
goto cleanup;
|
||||
|
||||
bpf_program__set_autoload(prog, true);
|
||||
|
||||
bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize());
|
||||
skel = test_kfunc_dynptr_param__open();
|
||||
if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open"))
|
||||
return false;
|
||||
|
||||
kfunc_not_supported = false;
|
||||
|
||||
@ -72,26 +52,18 @@ static void verify_fail(const char *prog_name, const char *expected_err_msg)
|
||||
fprintf(stderr,
|
||||
"%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
|
||||
__func__);
|
||||
test__skip();
|
||||
goto cleanup;
|
||||
test_kfunc_dynptr_param__destroy(skel);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ASSERT_ERR(err, "unexpected load success"))
|
||||
goto cleanup;
|
||||
|
||||
if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
|
||||
fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
|
||||
fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
test_kfunc_dynptr_param__destroy(skel);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void verify_success(const char *prog_name, int expected_runtime_err)
|
||||
{
|
||||
struct test_kfunc_dynptr_param *skel;
|
||||
libbpf_print_fn_t old_print_cb;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_link *link;
|
||||
__u32 next_id;
|
||||
@ -103,21 +75,7 @@ static void verify_success(const char *prog_name, int expected_runtime_err)
|
||||
|
||||
skel->bss->pid = getpid();
|
||||
|
||||
bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize());
|
||||
|
||||
kfunc_not_supported = false;
|
||||
|
||||
old_print_cb = libbpf_set_print(libbpf_print_cb);
|
||||
err = test_kfunc_dynptr_param__load(skel);
|
||||
libbpf_set_print(old_print_cb);
|
||||
|
||||
if (err < 0 && kfunc_not_supported) {
|
||||
fprintf(stderr,
|
||||
"%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n",
|
||||
__func__);
|
||||
test__skip();
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (!ASSERT_OK(err, "test_kfunc_dynptr_param__load"))
|
||||
goto cleanup;
|
||||
@ -147,15 +105,15 @@ void test_kfunc_dynptr_param(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!has_pkcs7_kfunc_support())
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kfunc_dynptr_tests); i++) {
|
||||
if (!test__start_subtest(kfunc_dynptr_tests[i].prog_name))
|
||||
continue;
|
||||
|
||||
if (kfunc_dynptr_tests[i].expected_verifier_err_msg)
|
||||
verify_fail(kfunc_dynptr_tests[i].prog_name,
|
||||
kfunc_dynptr_tests[i].expected_verifier_err_msg);
|
||||
else
|
||||
verify_success(kfunc_dynptr_tests[i].prog_name,
|
||||
kfunc_dynptr_tests[i].expected_runtime_err);
|
||||
verify_success(kfunc_dynptr_tests[i].prog_name,
|
||||
kfunc_dynptr_tests[i].expected_runtime_err);
|
||||
}
|
||||
RUN_TESTS(test_kfunc_dynptr_param);
|
||||
}
|
||||
|
@ -29,9 +29,9 @@ void test_libbpf_get_fd_by_id_opts(void)
|
||||
if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach"))
|
||||
goto close_prog;
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.data_input),
|
||||
ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.data_input),
|
||||
&info_m, &len);
|
||||
if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
|
||||
if (!ASSERT_OK(ret, "bpf_map_get_info_by_fd"))
|
||||
goto close_prog;
|
||||
|
||||
fd = bpf_map_get_fd_by_id(info_m.id);
|
||||
|
@ -58,12 +58,12 @@ static struct {
|
||||
TEST(inner_map, pop_front)
|
||||
TEST(inner_map, pop_back)
|
||||
#undef TEST
|
||||
{ "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" },
|
||||
{ "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" },
|
||||
{ "map_compat_tp", "tracing progs cannot use bpf_list_head yet" },
|
||||
{ "map_compat_perf", "tracing progs cannot use bpf_list_head yet" },
|
||||
{ "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" },
|
||||
{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" },
|
||||
{ "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
|
||||
{ "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
|
||||
{ "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
|
||||
@ -78,8 +78,6 @@ static struct {
|
||||
{ "direct_write_head", "direct access to bpf_list_head is disallowed" },
|
||||
{ "direct_read_node", "direct access to bpf_list_node is disallowed" },
|
||||
{ "direct_write_node", "direct access to bpf_list_node is disallowed" },
|
||||
{ "write_after_push_front", "only read is supported" },
|
||||
{ "write_after_push_back", "only read is supported" },
|
||||
{ "use_after_unlock_push_front", "invalid mem access 'scalar'" },
|
||||
{ "use_after_unlock_push_back", "invalid mem access 'scalar'" },
|
||||
{ "double_push_front", "arg#1 expected pointer to allocated object" },
|
||||
@ -717,6 +715,43 @@ static void test_btf(void)
|
||||
btf__free(btf);
|
||||
break;
|
||||
}
|
||||
|
||||
while (test__start_subtest("btf: list_node and rb_node in same struct")) {
|
||||
btf = init_btf();
|
||||
if (!ASSERT_OK_PTR(btf, "init_btf"))
|
||||
break;
|
||||
|
||||
id = btf__add_struct(btf, "bpf_rb_node", 24);
|
||||
if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node"))
|
||||
break;
|
||||
id = btf__add_struct(btf, "bar", 40);
|
||||
if (!ASSERT_EQ(id, 6, "btf__add_struct bar"))
|
||||
break;
|
||||
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::a"))
|
||||
break;
|
||||
err = btf__add_field(btf, "c", 5, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::c"))
|
||||
break;
|
||||
|
||||
id = btf__add_struct(btf, "foo", 20);
|
||||
if (!ASSERT_EQ(id, 7, "btf__add_struct foo"))
|
||||
break;
|
||||
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::a"))
|
||||
break;
|
||||
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::b"))
|
||||
break;
|
||||
id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0);
|
||||
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a"))
|
||||
break;
|
||||
|
||||
err = btf__load_into_kernel(btf);
|
||||
ASSERT_EQ(err, -EINVAL, "check btf");
|
||||
btf__free(btf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void test_linked_list(void)
|
||||
|
@ -47,7 +47,8 @@ static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
|
||||
|
||||
fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
|
||||
ASSERT_GE(fd, 0, "prog_get_fd_by_id");
|
||||
ASSERT_OK(bpf_obj_get_info_by_fd(fd, &info, &info_len), "prog_info_by_fd");
|
||||
ASSERT_OK(bpf_prog_get_info_by_fd(fd, &info, &info_len),
|
||||
"prog_info_by_fd");
|
||||
close(fd);
|
||||
|
||||
if (info.attach_btf_id ==
|
||||
|
@ -16,7 +16,7 @@ static int duration;
|
||||
static int prog_holds_map(int prog_fd, int map_fd)
|
||||
{
|
||||
struct bpf_prog_info prog_info = {};
|
||||
struct bpf_prog_info map_info = {};
|
||||
struct bpf_map_info map_info = {};
|
||||
__u32 prog_info_len;
|
||||
__u32 map_info_len;
|
||||
__u32 *map_ids;
|
||||
@ -25,12 +25,12 @@ static int prog_holds_map(int prog_fd, int map_fd)
|
||||
int i;
|
||||
|
||||
map_info_len = sizeof(map_info);
|
||||
ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
|
||||
ret = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
prog_info_len = sizeof(prog_info);
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret)
|
||||
return -errno;
|
||||
|
||||
@ -44,7 +44,7 @@ static int prog_holds_map(int prog_fd, int map_fd)
|
||||
prog_info.map_ids = ptr_to_u64(map_ids);
|
||||
prog_info_len = sizeof(prog_info);
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
|
||||
if (ret) {
|
||||
ret = -errno;
|
||||
goto free_map_ids;
|
||||
|
@ -488,7 +488,7 @@ static void run_test(struct migrate_reuseport_test_case *test_case,
|
||||
goto close_servers;
|
||||
}
|
||||
|
||||
/* Tie requests to the first four listners */
|
||||
/* Tie requests to the first four listeners */
|
||||
err = start_clients(test_case);
|
||||
if (!ASSERT_OK(err, "start_clients"))
|
||||
goto close_clients;
|
||||
|
@ -64,7 +64,7 @@ void test_mmap(void)
|
||||
|
||||
/* get map's ID */
|
||||
memset(&map_info, 0, map_info_sz);
|
||||
err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
|
||||
err = bpf_map_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
|
||||
if (CHECK(err, "map_get_info", "failed %d\n", errno))
|
||||
goto cleanup;
|
||||
data_map_id = map_info.id;
|
||||
|
@ -54,7 +54,7 @@ void serial_test_perf_link(void)
|
||||
goto cleanup;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len);
|
||||
err = bpf_link_get_info_by_fd(link_fd, &info, &info_len);
|
||||
if (!ASSERT_OK(err, "link_get_info"))
|
||||
goto cleanup;
|
||||
|
||||
|
@ -18,7 +18,7 @@ __u32 get_map_id(struct bpf_object *obj, const char *name)
|
||||
if (CHECK(!map, "find map", "NULL map"))
|
||||
return 0;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(bpf_map__fd(map),
|
||||
err = bpf_map_get_info_by_fd(bpf_map__fd(map),
|
||||
&map_info, &map_info_len);
|
||||
CHECK(err, "get map info", "err %d errno %d", err, errno);
|
||||
return map_info.id;
|
||||
|
@ -12,7 +12,7 @@ static void check_run_cnt(int prog_fd, __u64 run_cnt)
|
||||
__u32 info_len = sizeof(info);
|
||||
int err;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd))
|
||||
return;
|
||||
|
||||
|
117
tools/testing/selftests/bpf/prog_tests/rbtree.c
Normal file
117
tools/testing/selftests/bpf/prog_tests/rbtree.c
Normal file
@ -0,0 +1,117 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
#include "rbtree.skel.h"
|
||||
#include "rbtree_fail.skel.h"
|
||||
#include "rbtree_btf_fail__wrong_node_type.skel.h"
|
||||
#include "rbtree_btf_fail__add_wrong_type.skel.h"
|
||||
|
||||
static void test_rbtree_add_nodes(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct rbtree *skel;
|
||||
int ret;
|
||||
|
||||
skel = rbtree__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
|
||||
ASSERT_OK(ret, "rbtree_add_nodes run");
|
||||
ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
|
||||
ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
|
||||
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rbtree_add_and_remove(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct rbtree *skel;
|
||||
int ret;
|
||||
|
||||
skel = rbtree__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
|
||||
ASSERT_OK(ret, "rbtree_add_and_remove");
|
||||
ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
|
||||
ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
|
||||
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rbtree_first_and_remove(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct rbtree *skel;
|
||||
int ret;
|
||||
|
||||
skel = rbtree__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
|
||||
ASSERT_OK(ret, "rbtree_first_and_remove");
|
||||
ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
|
||||
ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
|
||||
ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
|
||||
ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
|
||||
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
void test_rbtree_success(void)
|
||||
{
|
||||
if (test__start_subtest("rbtree_add_nodes"))
|
||||
test_rbtree_add_nodes();
|
||||
if (test__start_subtest("rbtree_add_and_remove"))
|
||||
test_rbtree_add_and_remove();
|
||||
if (test__start_subtest("rbtree_first_and_remove"))
|
||||
test_rbtree_first_and_remove();
|
||||
}
|
||||
|
||||
#define BTF_FAIL_TEST(suffix) \
|
||||
void test_rbtree_btf_fail__##suffix(void) \
|
||||
{ \
|
||||
struct rbtree_btf_fail__##suffix *skel; \
|
||||
\
|
||||
skel = rbtree_btf_fail__##suffix##__open_and_load(); \
|
||||
if (!ASSERT_ERR_PTR(skel, \
|
||||
"rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \
|
||||
rbtree_btf_fail__##suffix##__destroy(skel); \
|
||||
}
|
||||
|
||||
#define RUN_BTF_FAIL_TEST(suffix) \
|
||||
if (test__start_subtest("rbtree_btf_fail__" #suffix)) \
|
||||
test_rbtree_btf_fail__##suffix();
|
||||
|
||||
BTF_FAIL_TEST(wrong_node_type);
|
||||
BTF_FAIL_TEST(add_wrong_type);
|
||||
|
||||
void test_rbtree_btf_fail(void)
|
||||
{
|
||||
RUN_BTF_FAIL_TEST(wrong_node_type);
|
||||
RUN_BTF_FAIL_TEST(add_wrong_type);
|
||||
}
|
||||
|
||||
void test_rbtree_fail(void)
|
||||
{
|
||||
RUN_TESTS(rbtree_fail);
|
||||
}
|
@ -31,8 +31,8 @@ void test_recursion(void)
|
||||
bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key);
|
||||
ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2");
|
||||
|
||||
err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_delete),
|
||||
&prog_info, &prog_info_len);
|
||||
err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.on_delete),
|
||||
&prog_info, &prog_info_len);
|
||||
if (!ASSERT_OK(err, "get_prog_info"))
|
||||
goto out;
|
||||
ASSERT_EQ(prog_info.recursion_misses, 2, "recursion_misses");
|
||||
|
@ -299,9 +299,9 @@ static __u32 query_prog_id(int prog_fd)
|
||||
__u32 info_len = sizeof(info);
|
||||
int err;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd") ||
|
||||
!ASSERT_EQ(info_len, sizeof(info), "bpf_obj_get_info_by_fd"))
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
|
||||
!ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
|
||||
return 0;
|
||||
|
||||
return info.id;
|
||||
|
@ -119,19 +119,19 @@ static void test_recursion(void)
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.on_lookup);
|
||||
memset(&info, 0, sizeof(info));
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
ASSERT_OK(err, "get prog info");
|
||||
ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion");
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.on_update);
|
||||
memset(&info, 0, sizeof(info));
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
ASSERT_OK(err, "get prog info");
|
||||
ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion");
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.on_enter);
|
||||
memset(&info, 0, sizeof(info));
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
ASSERT_OK(err, "get prog info");
|
||||
ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion");
|
||||
|
||||
@ -221,7 +221,7 @@ static void test_nodeadlock(void)
|
||||
|
||||
info_len = sizeof(info);
|
||||
prog_fd = bpf_program__fd(skel->progs.socket_post_create);
|
||||
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
|
||||
ASSERT_OK(err, "get prog info");
|
||||
ASSERT_EQ(info.recursion_misses, 0, "prog recursion");
|
||||
|
||||
|
@ -29,8 +29,8 @@ static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd)
|
||||
__u32 info_len = sizeof(info);
|
||||
int ret;
|
||||
|
||||
ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
|
||||
if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
|
||||
ret = bpf_prog_get_info_by_fd(fd, &info, &info_len);
|
||||
if (!ASSERT_OK(ret, "bpf_prog_get_info_by_fd"))
|
||||
return ret;
|
||||
|
||||
ret = bpf_tc_attach(hook, &opts);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user