bpf-for-netdev

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZmykPwAKCRDbK58LschI
 g7LOAQDVPkJ9k50/xrWIBtgvkGq1jCrMlpwEh49QYO0xoqh1IgEA+6Xje9jCIsdp
 AHz9WmZ6G0EpTuDgFq50K1NVZ7MgSQE=
 =zKfv
 -----END PGP SIGNATURE-----

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2024-06-14

We've added 8 non-merge commits during the last 2 day(s) which contain
a total of 9 files changed, 92 insertions(+), 11 deletions(-).

The main changes are:

1) Silence a syzkaller splat under CONFIG_DEBUG_NET=y in pskb_pull_reason()
   triggered via __bpf_try_make_writable(), from Florian Westphal.

2) Fix removal of kfuncs during linking phase which then throws a kernel
   build warning via resolve_btfids about unresolved symbols,
   from Tony Ambardar.

3) Fix a UML x86_64 compilation failure from BPF as pcpu_hot symbol
   is not available on User Mode Linux, from Maciej Żenczykowski.

4) Fix a register corruption in reg_set_min_max triggering an invariant
   violation in BPF verifier, from Daniel Borkmann.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Harden __bpf_kfunc tag against linker kfunc removal
  compiler_types.h: Define __retain for __attribute__((__retain__))
  bpf: Avoid splat in pskb_pull_reason
  bpf: fix UML x86_64 compile failure
  selftests/bpf: Add test coverage for reg_set_min_max handling
  bpf: Reduce stack consumption in check_stack_write_fixed_off
  bpf: Fix reg_set_min_max corruption of fake_reg
  MAINTAINERS: mailmap: Update Stanislav's email address
====================

Link: https://lore.kernel.org/r/20240614203223.26500-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-06-14 17:57:09 -07:00
commit c64da10adb
9 changed files with 92 additions and 11 deletions

View File

@ -608,6 +608,7 @@ Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>

View File

@ -3980,7 +3980,7 @@ R: Song Liu <song@kernel.org>
R: Yonghong Song <yonghong.song@linux.dev>
R: John Fastabend <john.fastabend@gmail.com>
R: KP Singh <kpsingh@kernel.org>
R: Stanislav Fomichev <sdf@google.com>
R: Stanislav Fomichev <sdf@fomichev.me>
R: Hao Luo <haoluo@google.com>
R: Jiri Olsa <jolsa@kernel.org>
L: bpf@vger.kernel.org

View File

@ -746,6 +746,8 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
/* buffer used to temporary hold constants as scalar registers */
struct bpf_reg_state fake_reg[2];
/* buffer used to generate temporary string representations,
* e.g., in reg_type_str() to generate reg_type string
*/

View File

@ -82,7 +82,7 @@
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
#define __bpf_kfunc __used noinline
#define __bpf_kfunc __used __retain noinline
#define __bpf_kfunc_start_defs() \
__diag_push(); \

View File

@ -143,6 +143,29 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __preserve_most
#endif
/*
* Annotating a function/variable with __retain tells the compiler to place
* the object in its own section and set the flag SHF_GNU_RETAIN. This flag
* instructs the linker to retain the object during garbage-cleanup or LTO
* phases.
*
* Note that the __used macro is also used to prevent functions or data
* being optimized out, but operates at the compiler/IR-level and may still
* allow unintended removal of objects during linking.
*
* Optional: only supported since gcc >= 11, clang >= 13
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#retain
*/
#if __has_attribute(__retain__) && \
(defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
defined(CONFIG_LTO_CLANG))
# define __retain __attribute__((__retain__))
#else
# define __retain
#endif
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>

View File

@ -4549,11 +4549,12 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
state->stack[spi].spilled_ptr.id = 0;
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
env->bpf_capable) {
struct bpf_reg_state fake_reg = {};
struct bpf_reg_state *tmp_reg = &env->fake_reg[0];
__mark_reg_known(&fake_reg, insn->imm);
fake_reg.type = SCALAR_VALUE;
save_register_state(env, state, spi, &fake_reg, size);
memset(tmp_reg, 0, sizeof(*tmp_reg));
__mark_reg_known(tmp_reg, insn->imm);
tmp_reg->type = SCALAR_VALUE;
save_register_state(env, state, spi, tmp_reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@ -15113,7 +15114,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
struct bpf_reg_state fake_reg = {};
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
int pred = -1;
@ -15179,7 +15179,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
src_reg = &fake_reg;
src_reg = &env->fake_reg[0];
memset(src_reg, 0, sizeof(*src_reg));
src_reg->type = SCALAR_VALUE;
__mark_reg_known(src_reg, insn->imm);
}
@ -15239,10 +15240,16 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
&other_branch_regs[insn->src_reg],
dst_reg, src_reg, opcode, is_jmp32);
} else /* BPF_SRC(insn->code) == BPF_K */ {
/* reg_set_min_max() can mangle the fake_reg. Make a copy
* so that these are two different memory locations. The
* src_reg is not used beyond here in context of K.
*/
memcpy(&env->fake_reg[1], &env->fake_reg[0],
sizeof(env->fake_reg[0]));
err = reg_set_min_max(env,
&other_branch_regs[insn->dst_reg],
src_reg /* fake one */,
dst_reg, src_reg /* same fake one */,
&env->fake_reg[0],
dst_reg, &env->fake_reg[1],
opcode, is_jmp32);
}
if (err)
@ -20313,7 +20320,7 @@ patch_map_ops_generic:
goto next_insn;
}
#ifdef CONFIG_X86_64
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
/* Implement bpf_get_smp_processor_id() inline. */
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
prog->jit_requested && bpf_jit_supports_percpu_insn()) {

View File

@ -1665,6 +1665,11 @@ static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
static inline int __bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
#ifdef CONFIG_DEBUG_NET
/* Avoid a splat in pskb_may_pull_reason() */
if (write_len > INT_MAX)
return -EINVAL;
#endif
return skb_ensure_writable(skb, write_len);
}

View File

@ -53,6 +53,7 @@
#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
#include "verifier_or_jmp32_k.skel.h"
#include "verifier_precision.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
#include "verifier_raw_stack.skel.h"
@ -170,6 +171,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); }
void test_verifier_precision(void) { RUN(verifier_precision); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); }

View File

@ -0,0 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("or_jmp32_k: bit ops + branch on unknown value")
__failure
__msg("R0 invalid mem access 'scalar'")
__naked void or_jmp32_k(void)
{
asm volatile (" \
r0 = 0xffffffff; \
r0 /= 1; \
r1 = 0; \
w1 = -1; \
w1 >>= 1; \
w0 &= w1; \
w0 |= 2; \
if w0 != 0x7ffffffd goto l1; \
r0 = 1; \
exit; \
l3: \
r0 = 5; \
*(u64*)(r0 - 8) = r0; \
exit; \
l2: \
w0 -= 0xe; \
if w0 == 1 goto l3; \
r0 = 4; \
exit; \
l1: \
w0 -= 0x7ffffff0; \
if w0 s>= 0xe goto l2; \
r0 = 3; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";