mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 08:02:07 +00:00
bpf: Refactor BPF_PSEUDO_CALL checking as a helper function
There is no functionality change. This refactoring intends to facilitate next patch change with BPF_PSEUDO_FUNC. Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20210204234827.1628953-1-yhs@fb.com
This commit is contained in:
parent
ecda49c522
commit
23a2d70c7a
@ -228,6 +228,12 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
|
||||
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
|
||||
}
|
||||
|
||||
static bool bpf_pseudo_call(const struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_JMP | BPF_CALL) &&
|
||||
insn->src_reg == BPF_PSEUDO_CALL;
|
||||
}
|
||||
|
||||
struct bpf_call_arg_meta {
|
||||
struct bpf_map *map_ptr;
|
||||
bool raw_mode;
|
||||
@ -1486,9 +1492,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
||||
|
||||
/* determine subprog starts. The end is one before the next starts */
|
||||
for (i = 0; i < insn_cnt; i++) {
|
||||
if (insn[i].code != (BPF_JMP | BPF_CALL))
|
||||
continue;
|
||||
if (insn[i].src_reg != BPF_PSEUDO_CALL)
|
||||
if (!bpf_pseudo_call(insn + i))
|
||||
continue;
|
||||
if (!env->bpf_capable) {
|
||||
verbose(env,
|
||||
@ -3074,9 +3078,7 @@ process_func:
|
||||
continue_func:
|
||||
subprog_end = subprog[idx + 1].start;
|
||||
for (; i < subprog_end; i++) {
|
||||
if (insn[i].code != (BPF_JMP | BPF_CALL))
|
||||
continue;
|
||||
if (insn[i].src_reg != BPF_PSEUDO_CALL)
|
||||
if (!bpf_pseudo_call(insn + i))
|
||||
continue;
|
||||
/* remember insn and function to return to */
|
||||
ret_insn[frame] = i + 1;
|
||||
@ -10846,8 +10848,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
return 0;
|
||||
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
if (!bpf_pseudo_call(insn))
|
||||
continue;
|
||||
/* Upon error here we cannot fall back to interpreter but
|
||||
* need a hard reject of the program. Thus -EFAULT is
|
||||
@ -10976,8 +10977,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
for (i = 0; i < env->subprog_cnt; i++) {
|
||||
insn = func[i]->insnsi;
|
||||
for (j = 0; j < func[i]->len; j++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
if (!bpf_pseudo_call(insn))
|
||||
continue;
|
||||
subprog = insn->off;
|
||||
insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
|
||||
@ -11022,8 +11022,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
* later look the same as if they were interpreted only.
|
||||
*/
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
if (!bpf_pseudo_call(insn))
|
||||
continue;
|
||||
insn->off = env->insn_aux_data[i].call_imm;
|
||||
subprog = find_subprog(env, i + insn->off + 1);
|
||||
@ -11052,8 +11051,7 @@ out_undo_insn:
|
||||
/* cleanup main prog to be interpreted */
|
||||
prog->jit_requested = 0;
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
if (!bpf_pseudo_call(insn))
|
||||
continue;
|
||||
insn->off = 0;
|
||||
insn->imm = env->insn_aux_data[i].call_imm;
|
||||
@ -11088,8 +11086,7 @@ static int fixup_call_args(struct bpf_verifier_env *env)
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < prog->len; i++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
if (!bpf_pseudo_call(insn))
|
||||
continue;
|
||||
depth = get_callee_stack_depth(env, insn, i);
|
||||
if (depth < 0)
|
||||
|
Loading…
Reference in New Issue
Block a user