mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
bpf: Support new 32bit offset jmp instruction
Add interpreter/jit/verifier support for 32bit offset jmp instruction. If a conditional jmp instruction needs more than 16bit offset, it can be simulated with a conditional jmp + a 32bit jmp insn. Acked-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Yonghong Song <yonghong.song@linux.dev> Link: https://lore.kernel.org/r/20230728011231.3716103-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
7058e3a31e
commit
4cd58e9af8
@ -1815,16 +1815,24 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
|
||||
break;
|
||||
|
||||
case BPF_JMP | BPF_JA:
|
||||
if (insn->off == -1)
|
||||
/* -1 jmp instructions will always jump
|
||||
* backwards two bytes. Explicitly handling
|
||||
* this case avoids wasting too many passes
|
||||
* when there are long sequences of replaced
|
||||
* dead code.
|
||||
*/
|
||||
jmp_offset = -2;
|
||||
else
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
case BPF_JMP32 | BPF_JA:
|
||||
if (BPF_CLASS(insn->code) == BPF_JMP) {
|
||||
if (insn->off == -1)
|
||||
/* -1 jmp instructions will always jump
|
||||
* backwards two bytes. Explicitly handling
|
||||
* this case avoids wasting too many passes
|
||||
* when there are long sequences of replaced
|
||||
* dead code.
|
||||
*/
|
||||
jmp_offset = -2;
|
||||
else
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
} else {
|
||||
if (insn->imm == -1)
|
||||
jmp_offset = -2;
|
||||
else
|
||||
jmp_offset = addrs[i + insn->imm] - addrs[i];
|
||||
}
|
||||
|
||||
if (!jmp_offset) {
|
||||
/*
|
||||
|
@ -373,7 +373,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
|
||||
{
|
||||
const s32 off_min = S16_MIN, off_max = S16_MAX;
|
||||
s32 delta = end_new - end_old;
|
||||
s32 off = insn->off;
|
||||
s32 off;
|
||||
|
||||
if (insn->code == (BPF_JMP32 | BPF_JA))
|
||||
off = insn->imm;
|
||||
else
|
||||
off = insn->off;
|
||||
|
||||
if (curr < pos && curr + off + 1 >= end_old)
|
||||
off += delta;
|
||||
@ -381,8 +386,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
|
||||
off -= delta;
|
||||
if (off < off_min || off > off_max)
|
||||
return -ERANGE;
|
||||
if (!probe_pass)
|
||||
insn->off = off;
|
||||
if (!probe_pass) {
|
||||
if (insn->code == (BPF_JMP32 | BPF_JA))
|
||||
insn->imm = off;
|
||||
else
|
||||
insn->off = off;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1593,6 +1602,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
|
||||
INSN_3(JMP, JSLE, K), \
|
||||
INSN_3(JMP, JSET, K), \
|
||||
INSN_2(JMP, JA), \
|
||||
INSN_2(JMP32, JA), \
|
||||
/* Store instructions. */ \
|
||||
/* Register based. */ \
|
||||
INSN_3(STX, MEM, B), \
|
||||
@ -1989,6 +1999,9 @@ out:
|
||||
JMP_JA:
|
||||
insn += insn->off;
|
||||
CONT;
|
||||
JMP32_JA:
|
||||
insn += insn->imm;
|
||||
CONT;
|
||||
JMP_EXIT:
|
||||
return BPF_R0;
|
||||
/* JMP */
|
||||
|
@ -2855,7 +2855,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
||||
goto next;
|
||||
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
|
||||
goto next;
|
||||
off = i + insn[i].off + 1;
|
||||
if (code == (BPF_JMP32 | BPF_JA))
|
||||
off = i + insn[i].imm + 1;
|
||||
else
|
||||
off = i + insn[i].off + 1;
|
||||
if (off < subprog_start || off >= subprog_end) {
|
||||
verbose(env, "jump out of range from insn %d to %d\n", i, off);
|
||||
return -EINVAL;
|
||||
@ -2867,6 +2870,7 @@ next:
|
||||
* or unconditional jump back
|
||||
*/
|
||||
if (code != (BPF_JMP | BPF_EXIT) &&
|
||||
code != (BPF_JMP32 | BPF_JA) &&
|
||||
code != (BPF_JMP | BPF_JA)) {
|
||||
verbose(env, "last insn is not an exit or jmp\n");
|
||||
return -EINVAL;
|
||||
@ -14792,7 +14796,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
||||
static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
|
||||
int ret;
|
||||
int ret, off;
|
||||
|
||||
if (bpf_pseudo_func(insn))
|
||||
return visit_func_call_insn(t, insns, env, true);
|
||||
@ -14840,14 +14844,19 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
if (BPF_SRC(insn->code) != BPF_K)
|
||||
return -EINVAL;
|
||||
|
||||
if (BPF_CLASS(insn->code) == BPF_JMP)
|
||||
off = insn->off;
|
||||
else
|
||||
off = insn->imm;
|
||||
|
||||
/* unconditional jump with single edge */
|
||||
ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
|
||||
ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mark_prune_point(env, t + insn->off + 1);
|
||||
mark_jmp_point(env, t + insn->off + 1);
|
||||
mark_prune_point(env, t + off + 1);
|
||||
mark_jmp_point(env, t + off + 1);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -16643,15 +16652,18 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
mark_reg_scratched(env, BPF_REG_0);
|
||||
} else if (opcode == BPF_JA) {
|
||||
if (BPF_SRC(insn->code) != BPF_K ||
|
||||
insn->imm != 0 ||
|
||||
insn->src_reg != BPF_REG_0 ||
|
||||
insn->dst_reg != BPF_REG_0 ||
|
||||
class == BPF_JMP32) {
|
||||
(class == BPF_JMP && insn->imm != 0) ||
|
||||
(class == BPF_JMP32 && insn->off != 0)) {
|
||||
verbose(env, "BPF_JA uses reserved fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
env->insn_idx += insn->off + 1;
|
||||
if (class == BPF_JMP)
|
||||
env->insn_idx += insn->off + 1;
|
||||
else
|
||||
env->insn_idx += insn->imm + 1;
|
||||
continue;
|
||||
|
||||
} else if (opcode == BPF_EXIT) {
|
||||
@ -17498,13 +17510,13 @@ static bool insn_is_cond_jump(u8 code)
|
||||
{
|
||||
u8 op;
|
||||
|
||||
op = BPF_OP(code);
|
||||
if (BPF_CLASS(code) == BPF_JMP32)
|
||||
return true;
|
||||
return op != BPF_JA;
|
||||
|
||||
if (BPF_CLASS(code) != BPF_JMP)
|
||||
return false;
|
||||
|
||||
op = BPF_OP(code);
|
||||
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user