Merge branch 'x86-bpf-fixes-for-the-bpf-jit-with-retbleed-stuff'

Joan Bruguera Micó says:

====================
x86/bpf: Fixes for the BPF JIT with retbleed=stuff

From: Joan Bruguera Micó <joanbrugueram@gmail.com>

Fixes two issues that cause kernels panic when using the BPF JIT with
the call depth tracking / stuffing mitigation for Skylake processors
(`retbleed=stuff`). Both issues can be triggered by running simple
BPF programs (e.g. running the test suite should trigger both).

The first (resubmit) fixes a trivial issue related to calculating the
destination IP for call instructions with call depth tracking.

The second is related to using the correct IP for relocations, related
to the recently introduced %rip-relative addressing for PER_CPU_VAR.

Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
---
v2:
  Simplify calculation of "ip".
  Add more details to the commit message.

Joan Bruguera Micó (1):
  x86/bpf: Fix IP for relocating call depth accounting
====================

Link: https://lore.kernel.org/r/20240401185821.224068-1-ubizjak@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2024-04-01 20:37:56 -07:00
commit 8c3fe029d7
3 changed files with 12 additions and 15 deletions

View File

@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
extern void callthunks_patch_module_calls(struct callthunk_sites *sites, extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
struct module *mod); struct module *mod);
extern void *callthunks_translate_call_dest(void *dest); extern void *callthunks_translate_call_dest(void *dest);
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func); extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
#else #else
static __always_inline void callthunks_patch_builtin_calls(void) {} static __always_inline void callthunks_patch_builtin_calls(void) {}
static __always_inline void static __always_inline void
@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
return dest; return dest;
} }
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog, static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
void *func) void *func, void *ip)
{ {
return 0; return 0;
} }

View File

@ -314,7 +314,7 @@ static bool is_callthunk(void *addr)
return !bcmp(pad, insn_buff, tmpl_size); return !bcmp(pad, insn_buff, tmpl_size);
} }
int x86_call_depth_emit_accounting(u8 **pprog, void *func) int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
{ {
unsigned int tmpl_size = SKL_TMPL_SIZE; unsigned int tmpl_size = SKL_TMPL_SIZE;
u8 insn_buff[MAX_PATCH_LEN]; u8 insn_buff[MAX_PATCH_LEN];
@ -327,7 +327,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
return 0; return 0;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size); memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, tmpl_size, *pprog, apply_relocation(insn_buff, tmpl_size, ip,
skl_call_thunk_template, tmpl_size); skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size); memcpy(*pprog, insn_buff, tmpl_size);

View File

@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
static int emit_rsb_call(u8 **pprog, void *func, void *ip) static int emit_rsb_call(u8 **pprog, void *func, void *ip)
{ {
OPTIMIZER_HIDE_VAR(func); OPTIMIZER_HIDE_VAR(func);
x86_call_depth_emit_accounting(pprog, func); ip += x86_call_depth_emit_accounting(pprog, func, ip);
return emit_patch(pprog, func, ip, 0xE8); return emit_patch(pprog, func, ip, 0xE8);
} }
@ -1972,20 +1972,17 @@ populate_extable:
/* call */ /* call */
case BPF_JMP | BPF_CALL: { case BPF_JMP | BPF_CALL: {
int offs; u8 *ip = image + addrs[i - 1];
func = (u8 *) __bpf_call_base + imm32; func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) { if (tail_call_reachable) {
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
if (!imm32) ip += 7;
return -EINVAL;
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
} else {
if (!imm32)
return -EINVAL;
offs = x86_call_depth_emit_accounting(&prog, func);
} }
if (emit_call(&prog, func, image + addrs[i - 1] + offs)) if (!imm32)
return -EINVAL;
ip += x86_call_depth_emit_accounting(&prog, func, ip);
if (emit_call(&prog, func, ip))
return -EINVAL; return -EINVAL;
break; break;
} }
@ -2835,7 +2832,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
* Direct-call fentry stub, as such it needs accounting for the * Direct-call fentry stub, as such it needs accounting for the
* __fentry__ call. * __fentry__ call.
*/ */
x86_call_depth_emit_accounting(&prog, NULL); x86_call_depth_emit_accounting(&prog, NULL, image);
} }
EMIT1(0x55); /* push rbp */ EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */