Merge branch 'pkt-access-fixes'
Daniel Borkmann says: ==================== Several fixes to get direct packet access in order from verifier side. Also test suite fix to run cg_skb as unpriv and an improvement to make direct packet write less error prone in future. ==================== Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
4d716e10e3
@ -99,7 +99,6 @@ BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
|
|||||||
const struct bpf_func_proto bpf_map_pop_elem_proto = {
|
const struct bpf_func_proto bpf_map_pop_elem_proto = {
|
||||||
.func = bpf_map_pop_elem,
|
.func = bpf_map_pop_elem,
|
||||||
.gpl_only = false,
|
.gpl_only = false,
|
||||||
.pkt_access = true,
|
|
||||||
.ret_type = RET_INTEGER,
|
.ret_type = RET_INTEGER,
|
||||||
.arg1_type = ARG_CONST_MAP_PTR,
|
.arg1_type = ARG_CONST_MAP_PTR,
|
||||||
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
|
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
|
||||||
@ -113,7 +112,6 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
|
|||||||
const struct bpf_func_proto bpf_map_peek_elem_proto = {
|
const struct bpf_func_proto bpf_map_peek_elem_proto = {
|
||||||
.func = bpf_map_pop_elem,
|
.func = bpf_map_pop_elem,
|
||||||
.gpl_only = false,
|
.gpl_only = false,
|
||||||
.pkt_access = true,
|
|
||||||
.ret_type = RET_INTEGER,
|
.ret_type = RET_INTEGER,
|
||||||
.arg1_type = ARG_CONST_MAP_PTR,
|
.arg1_type = ARG_CONST_MAP_PTR,
|
||||||
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
|
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
|
||||||
|
@ -122,6 +122,7 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
|
|||||||
raw_spin_lock_irqsave(&qs->lock, flags);
|
raw_spin_lock_irqsave(&qs->lock, flags);
|
||||||
|
|
||||||
if (queue_stack_map_is_empty(qs)) {
|
if (queue_stack_map_is_empty(qs)) {
|
||||||
|
memset(value, 0, qs->map.value_size);
|
||||||
err = -ENOENT;
|
err = -ENOENT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -151,6 +152,7 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
|
|||||||
raw_spin_lock_irqsave(&qs->lock, flags);
|
raw_spin_lock_irqsave(&qs->lock, flags);
|
||||||
|
|
||||||
if (queue_stack_map_is_empty(qs)) {
|
if (queue_stack_map_is_empty(qs)) {
|
||||||
|
memset(value, 0, qs->map.value_size);
|
||||||
err = -ENOENT;
|
err = -ENOENT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1387,21 +1387,24 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
|
|||||||
enum bpf_access_type t)
|
enum bpf_access_type t)
|
||||||
{
|
{
|
||||||
switch (env->prog->type) {
|
switch (env->prog->type) {
|
||||||
|
/* Program types only with direct read access go here! */
|
||||||
case BPF_PROG_TYPE_LWT_IN:
|
case BPF_PROG_TYPE_LWT_IN:
|
||||||
case BPF_PROG_TYPE_LWT_OUT:
|
case BPF_PROG_TYPE_LWT_OUT:
|
||||||
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
|
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
|
||||||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||||
/* dst_input() and dst_output() can't write for now */
|
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||||
|
case BPF_PROG_TYPE_CGROUP_SKB:
|
||||||
if (t == BPF_WRITE)
|
if (t == BPF_WRITE)
|
||||||
return false;
|
return false;
|
||||||
/* fallthrough */
|
/* fallthrough */
|
||||||
|
|
||||||
|
/* Program types with direct read + write access go here! */
|
||||||
case BPF_PROG_TYPE_SCHED_CLS:
|
case BPF_PROG_TYPE_SCHED_CLS:
|
||||||
case BPF_PROG_TYPE_SCHED_ACT:
|
case BPF_PROG_TYPE_SCHED_ACT:
|
||||||
case BPF_PROG_TYPE_XDP:
|
case BPF_PROG_TYPE_XDP:
|
||||||
case BPF_PROG_TYPE_LWT_XMIT:
|
case BPF_PROG_TYPE_LWT_XMIT:
|
||||||
case BPF_PROG_TYPE_SK_SKB:
|
case BPF_PROG_TYPE_SK_SKB:
|
||||||
case BPF_PROG_TYPE_SK_MSG:
|
case BPF_PROG_TYPE_SK_MSG:
|
||||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
|
||||||
if (meta)
|
if (meta)
|
||||||
return meta->pkt_access;
|
return meta->pkt_access;
|
||||||
|
|
||||||
@ -5706,7 +5709,11 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|||||||
bool is_narrower_load;
|
bool is_narrower_load;
|
||||||
u32 target_size;
|
u32 target_size;
|
||||||
|
|
||||||
if (ops->gen_prologue) {
|
if (ops->gen_prologue || env->seen_direct_write) {
|
||||||
|
if (!ops->gen_prologue) {
|
||||||
|
verbose(env, "bpf verifier is misconfigured\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
|
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
|
||||||
env->prog);
|
env->prog);
|
||||||
if (cnt >= ARRAY_SIZE(insn_buf)) {
|
if (cnt >= ARRAY_SIZE(insn_buf)) {
|
||||||
|
@ -5496,7 +5496,13 @@ static bool cg_skb_is_valid_access(int off, int size,
|
|||||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||||
case bpf_ctx_range(struct __sk_buff, flow_keys):
|
case bpf_ctx_range(struct __sk_buff, flow_keys):
|
||||||
return false;
|
return false;
|
||||||
|
case bpf_ctx_range(struct __sk_buff, data):
|
||||||
|
case bpf_ctx_range(struct __sk_buff, data_end):
|
||||||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
return false;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == BPF_WRITE) {
|
if (type == BPF_WRITE) {
|
||||||
switch (off) {
|
switch (off) {
|
||||||
case bpf_ctx_range(struct __sk_buff, mark):
|
case bpf_ctx_range(struct __sk_buff, mark):
|
||||||
@ -5638,6 +5644,15 @@ static bool sock_filter_is_valid_access(int off, int size,
|
|||||||
prog->expected_attach_type);
|
prog->expected_attach_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||||
|
const struct bpf_prog *prog)
|
||||||
|
{
|
||||||
|
/* Neither direct read nor direct write requires any preliminary
|
||||||
|
* action.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||||
const struct bpf_prog *prog, int drop_verdict)
|
const struct bpf_prog *prog, int drop_verdict)
|
||||||
{
|
{
|
||||||
@ -7204,6 +7219,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
|
|||||||
.get_func_proto = xdp_func_proto,
|
.get_func_proto = xdp_func_proto,
|
||||||
.is_valid_access = xdp_is_valid_access,
|
.is_valid_access = xdp_is_valid_access,
|
||||||
.convert_ctx_access = xdp_convert_ctx_access,
|
.convert_ctx_access = xdp_convert_ctx_access,
|
||||||
|
.gen_prologue = bpf_noop_prologue,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct bpf_prog_ops xdp_prog_ops = {
|
const struct bpf_prog_ops xdp_prog_ops = {
|
||||||
@ -7302,6 +7318,7 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = {
|
|||||||
.get_func_proto = sk_msg_func_proto,
|
.get_func_proto = sk_msg_func_proto,
|
||||||
.is_valid_access = sk_msg_is_valid_access,
|
.is_valid_access = sk_msg_is_valid_access,
|
||||||
.convert_ctx_access = sk_msg_convert_ctx_access,
|
.convert_ctx_access = sk_msg_convert_ctx_access,
|
||||||
|
.gen_prologue = bpf_noop_prologue,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct bpf_prog_ops sk_msg_prog_ops = {
|
const struct bpf_prog_ops sk_msg_prog_ops = {
|
||||||
|
@ -4891,6 +4891,8 @@ static struct bpf_test tests[] = {
|
|||||||
BPF_EXIT_INSN(),
|
BPF_EXIT_INSN(),
|
||||||
},
|
},
|
||||||
.result = ACCEPT,
|
.result = ACCEPT,
|
||||||
|
.result_unpriv = REJECT,
|
||||||
|
.errstr_unpriv = "invalid bpf_context access off=76 size=4",
|
||||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5146,6 +5148,7 @@ static struct bpf_test tests[] = {
|
|||||||
.fixup_cgroup_storage = { 1 },
|
.fixup_cgroup_storage = { 1 },
|
||||||
.result = REJECT,
|
.result = REJECT,
|
||||||
.errstr = "get_local_storage() doesn't support non-zero flags",
|
.errstr = "get_local_storage() doesn't support non-zero flags",
|
||||||
|
.errstr_unpriv = "R2 leaks addr into helper function",
|
||||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -5261,6 +5264,7 @@ static struct bpf_test tests[] = {
|
|||||||
.fixup_percpu_cgroup_storage = { 1 },
|
.fixup_percpu_cgroup_storage = { 1 },
|
||||||
.result = REJECT,
|
.result = REJECT,
|
||||||
.errstr = "get_local_storage() doesn't support non-zero flags",
|
.errstr = "get_local_storage() doesn't support non-zero flags",
|
||||||
|
.errstr_unpriv = "R2 leaks addr into helper function",
|
||||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -14050,6 +14054,13 @@ static void get_unpriv_disabled()
|
|||||||
fclose(fd);
|
fclose(fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool test_as_unpriv(struct bpf_test *test)
|
||||||
|
{
|
||||||
|
return !test->prog_type ||
|
||||||
|
test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
|
||||||
|
test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
|
||||||
|
}
|
||||||
|
|
||||||
static int do_test(bool unpriv, unsigned int from, unsigned int to)
|
static int do_test(bool unpriv, unsigned int from, unsigned int to)
|
||||||
{
|
{
|
||||||
int i, passes = 0, errors = 0, skips = 0;
|
int i, passes = 0, errors = 0, skips = 0;
|
||||||
@ -14060,10 +14071,10 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
|
|||||||
/* Program types that are not supported by non-root we
|
/* Program types that are not supported by non-root we
|
||||||
* skip right away.
|
* skip right away.
|
||||||
*/
|
*/
|
||||||
if (!test->prog_type && unpriv_disabled) {
|
if (test_as_unpriv(test) && unpriv_disabled) {
|
||||||
printf("#%d/u %s SKIP\n", i, test->descr);
|
printf("#%d/u %s SKIP\n", i, test->descr);
|
||||||
skips++;
|
skips++;
|
||||||
} else if (!test->prog_type) {
|
} else if (test_as_unpriv(test)) {
|
||||||
if (!unpriv)
|
if (!unpriv)
|
||||||
set_admin(false);
|
set_admin(false);
|
||||||
printf("#%d/u %s ", i, test->descr);
|
printf("#%d/u %s ", i, test->descr);
|
||||||
|
Loading…
Reference in New Issue
Block a user