bpf: Add bpf_get_func_ip helper for tracing programs
Adding bpf_get_func_ip helper for BPF_PROG_TYPE_TRACING programs, specifically for all trampoline attach types. The trampoline's caller IP address is stored in (ctx - 8) address. so there's no reason to actually call the helper, but rather fixup the call instruction and return [ctx - 8] value directly. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20210714094400.396467-4-jolsa@kernel.org
This commit is contained in:
committed by
Alexei Starovoitov
parent
1e37392ccc
commit
9b99edcae5
@@ -4841,6 +4841,12 @@ union bpf_attr {
|
|||||||
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
||||||
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
||||||
* own timer which would have led to a deadlock otherwise.
|
* own timer which would have led to a deadlock otherwise.
|
||||||
|
*
|
||||||
|
* u64 bpf_get_func_ip(void *ctx)
|
||||||
|
* Description
|
||||||
|
* Get address of the traced function (for tracing programs).
|
||||||
|
* Return
|
||||||
|
* Address of the traced function.
|
||||||
*/
|
*/
|
||||||
#define __BPF_FUNC_MAPPER(FN) \
|
#define __BPF_FUNC_MAPPER(FN) \
|
||||||
FN(unspec), \
|
FN(unspec), \
|
||||||
@@ -5016,6 +5022,7 @@ union bpf_attr {
|
|||||||
FN(timer_set_callback), \
|
FN(timer_set_callback), \
|
||||||
FN(timer_start), \
|
FN(timer_start), \
|
||||||
FN(timer_cancel), \
|
FN(timer_cancel), \
|
||||||
|
FN(get_func_ip), \
|
||||||
/* */
|
/* */
|
||||||
|
|
||||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||||
|
|||||||
@@ -6161,6 +6161,27 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_get_func_ip(struct bpf_verifier_env *env)
|
||||||
|
{
|
||||||
|
enum bpf_attach_type eatype = env->prog->expected_attach_type;
|
||||||
|
enum bpf_prog_type type = resolve_prog_type(env->prog);
|
||||||
|
int func_id = BPF_FUNC_get_func_ip;
|
||||||
|
|
||||||
|
if (type == BPF_PROG_TYPE_TRACING) {
|
||||||
|
if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT &&
|
||||||
|
eatype != BPF_MODIFY_RETURN) {
|
||||||
|
verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
|
||||||
|
func_id_name(func_id), func_id);
|
||||||
|
return -ENOTSUPP;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
verbose(env, "func %s#%d not supported for program type %d\n",
|
||||||
|
func_id_name(func_id), func_id, type);
|
||||||
|
return -ENOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||||
int *insn_idx_p)
|
int *insn_idx_p)
|
||||||
{
|
{
|
||||||
@@ -6439,6 +6460,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
|||||||
if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
|
if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
|
||||||
env->prog->call_get_stack = true;
|
env->prog->call_get_stack = true;
|
||||||
|
|
||||||
|
if (func_id == BPF_FUNC_get_func_ip) {
|
||||||
|
if (check_get_func_ip(env))
|
||||||
|
return -ENOTSUPP;
|
||||||
|
env->prog->call_get_func_ip = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (changes_data)
|
if (changes_data)
|
||||||
clear_all_pkt_pointers(env);
|
clear_all_pkt_pointers(env);
|
||||||
return 0;
|
return 0;
|
||||||
@@ -12632,6 +12659,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||||||
{
|
{
|
||||||
struct bpf_prog *prog = env->prog;
|
struct bpf_prog *prog = env->prog;
|
||||||
bool expect_blinding = bpf_jit_blinding_enabled(prog);
|
bool expect_blinding = bpf_jit_blinding_enabled(prog);
|
||||||
|
enum bpf_prog_type prog_type = resolve_prog_type(prog);
|
||||||
struct bpf_insn *insn = prog->insnsi;
|
struct bpf_insn *insn = prog->insnsi;
|
||||||
const struct bpf_func_proto *fn;
|
const struct bpf_func_proto *fn;
|
||||||
const int insn_cnt = prog->len;
|
const int insn_cnt = prog->len;
|
||||||
@@ -12998,6 +13026,21 @@ patch_map_ops_generic:
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Implement bpf_get_func_ip inline. */
|
||||||
|
if (prog_type == BPF_PROG_TYPE_TRACING &&
|
||||||
|
insn->imm == BPF_FUNC_get_func_ip) {
|
||||||
|
/* Load IP address from ctx - 8 */
|
||||||
|
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
|
||||||
|
|
||||||
|
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
|
||||||
|
if (!new_prog)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
env->prog = prog = new_prog;
|
||||||
|
insn = new_prog->insnsi + i + delta;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
patch_call_imm:
|
patch_call_imm:
|
||||||
fn = env->ops->get_func_proto(insn->imm, env->prog);
|
fn = env->ops->get_func_proto(insn->imm, env->prog);
|
||||||
/* all functions that have prototype and verifier allowed
|
/* all functions that have prototype and verifier allowed
|
||||||
|
|||||||
@@ -948,6 +948,19 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
|
|||||||
.arg5_type = ARG_ANYTHING,
|
.arg5_type = ARG_ANYTHING,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
|
||||||
|
{
|
||||||
|
/* This helper call is inlined by verifier. */
|
||||||
|
return ((u64 *)ctx)[-1];
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
|
||||||
|
.func = bpf_get_func_ip_tracing,
|
||||||
|
.gpl_only = true,
|
||||||
|
.ret_type = RET_INTEGER,
|
||||||
|
.arg1_type = ARG_PTR_TO_CTX,
|
||||||
|
};
|
||||||
|
|
||||||
const struct bpf_func_proto *
|
const struct bpf_func_proto *
|
||||||
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
@@ -1058,6 +1071,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||||||
return &bpf_for_each_map_elem_proto;
|
return &bpf_for_each_map_elem_proto;
|
||||||
case BPF_FUNC_snprintf:
|
case BPF_FUNC_snprintf:
|
||||||
return &bpf_snprintf_proto;
|
return &bpf_snprintf_proto;
|
||||||
|
case BPF_FUNC_get_func_ip:
|
||||||
|
return &bpf_get_func_ip_proto_tracing;
|
||||||
default:
|
default:
|
||||||
return bpf_base_func_proto(func_id);
|
return bpf_base_func_proto(func_id);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4841,6 +4841,12 @@ union bpf_attr {
|
|||||||
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
||||||
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
||||||
* own timer which would have led to a deadlock otherwise.
|
* own timer which would have led to a deadlock otherwise.
|
||||||
|
*
|
||||||
|
* u64 bpf_get_func_ip(void *ctx)
|
||||||
|
* Description
|
||||||
|
* Get address of the traced function (for tracing programs).
|
||||||
|
* Return
|
||||||
|
* Address of the traced function.
|
||||||
*/
|
*/
|
||||||
#define __BPF_FUNC_MAPPER(FN) \
|
#define __BPF_FUNC_MAPPER(FN) \
|
||||||
FN(unspec), \
|
FN(unspec), \
|
||||||
@@ -5016,6 +5022,7 @@ union bpf_attr {
|
|||||||
FN(timer_set_callback), \
|
FN(timer_set_callback), \
|
||||||
FN(timer_start), \
|
FN(timer_start), \
|
||||||
FN(timer_cancel), \
|
FN(timer_cancel), \
|
||||||
|
FN(get_func_ip), \
|
||||||
/* */
|
/* */
|
||||||
|
|
||||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||||
|
|||||||
Reference in New Issue
Block a user