bpf: Centralize permissions checks for all BPF map types

This allows to do more centralized decisions later on, and generally
makes it very explicit which maps are privileged and which are not
(e.g., LRU_HASH and LRU_PERCPU_HASH, which are privileged HASH variants,
as opposed to unprivileged HASH and HASH_PERCPU; now this is explicit
and easy to verify).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/bpf/20230613223533.3689589-4-andrii@kernel.org
This commit is contained in:
Andrii Nakryiko 2023-06-13 15:35:32 -07:00 committed by Daniel Borkmann
parent 22db41226b
commit 6c3eba1c5e
14 changed files with 52 additions and 44 deletions

View File

@ -86,9 +86,6 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
int numa_node = bpf_map_attr_numa_node(attr); int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_bloom_filter *bloom; struct bpf_bloom_filter *bloom;
if (!bpf_capable())
return ERR_PTR(-EPERM);
if (attr->key_size != 0 || attr->value_size == 0 || if (attr->key_size != 0 || attr->value_size == 0 ||
attr->max_entries == 0 || attr->max_entries == 0 ||
attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||

View File

@ -723,9 +723,6 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
!attr->btf_key_type_id || !attr->btf_value_type_id) !attr->btf_key_type_id || !attr->btf_value_type_id)
return -EINVAL; return -EINVAL;
if (!bpf_capable())
return -EPERM;
if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
return -E2BIG; return -E2BIG;

View File

@ -655,9 +655,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
const struct btf_type *t, *vt; const struct btf_type *t, *vt;
struct bpf_map *map; struct bpf_map *map;
if (!bpf_capable())
return ERR_PTR(-EPERM);
st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
if (!st_ops) if (!st_ops)
return ERR_PTR(-ENOTSUPP); return ERR_PTR(-ENOTSUPP);

View File

@ -28,7 +28,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/capability.h>
#include <trace/events/xdp.h> #include <trace/events/xdp.h>
#include <linux/btf_ids.h> #include <linux/btf_ids.h>
@ -89,9 +88,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
u32 value_size = attr->value_size; u32 value_size = attr->value_size;
struct bpf_cpu_map *cmap; struct bpf_cpu_map *cmap;
if (!bpf_capable())
return ERR_PTR(-EPERM);
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 || if (attr->max_entries == 0 || attr->key_size != 4 ||
(value_size != offsetofend(struct bpf_cpumap_val, qsize) && (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&

View File

@ -160,9 +160,6 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
struct bpf_dtab *dtab; struct bpf_dtab *dtab;
int err; int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE); dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
if (!dtab) if (!dtab)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -422,12 +422,6 @@ static int htab_map_alloc_check(union bpf_attr *attr)
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
offsetof(struct htab_elem, hash_node.pprev)); offsetof(struct htab_elem, hash_node.pprev));
if (lru && !bpf_capable())
/* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_BPF.
*/
return -EPERM;
if (zero_seed && !capable(CAP_SYS_ADMIN)) if (zero_seed && !capable(CAP_SYS_ADMIN))
/* Guard against local DoS, and discourage production use. */ /* Guard against local DoS, and discourage production use. */
return -EPERM; return -EPERM;

View File

@ -544,9 +544,6 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
{ {
struct lpm_trie *trie; struct lpm_trie *trie;
if (!bpf_capable())
return ERR_PTR(-EPERM);
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || if (attr->max_entries == 0 ||
!(attr->map_flags & BPF_F_NO_PREALLOC) || !(attr->map_flags & BPF_F_NO_PREALLOC) ||

View File

@ -7,7 +7,6 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/capability.h>
#include <linux/btf_ids.h> #include <linux/btf_ids.h>
#include "percpu_freelist.h" #include "percpu_freelist.h"
@ -46,9 +45,6 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
/* Called from syscall */ /* Called from syscall */
static int queue_stack_map_alloc_check(union bpf_attr *attr) static int queue_stack_map_alloc_check(union bpf_attr *attr)
{ {
if (!bpf_capable())
return -EPERM;
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 0 || if (attr->max_entries == 0 || attr->key_size != 0 ||
attr->value_size == 0 || attr->value_size == 0 ||

View File

@ -151,9 +151,6 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
int numa_node = bpf_map_attr_numa_node(attr); int numa_node = bpf_map_attr_numa_node(attr);
struct reuseport_array *array; struct reuseport_array *array;
if (!bpf_capable())
return ERR_PTR(-EPERM);
/* allocate all map elements and zero-initialize them */ /* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node); array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node);
if (!array) if (!array)

View File

@ -74,9 +74,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
u64 cost, n_buckets; u64 cost, n_buckets;
int err; int err;
if (!bpf_capable())
return ERR_PTR(-EPERM);
if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);

View File

@ -1156,6 +1156,53 @@ static int map_create(union bpf_attr *attr)
if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
return -EPERM; return -EPERM;
/* check privileged map type permissions */
switch (map_type) {
case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_PERCPU_ARRAY:
case BPF_MAP_TYPE_PROG_ARRAY:
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
case BPF_MAP_TYPE_CGROUP_ARRAY:
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_PERCPU_HASH:
case BPF_MAP_TYPE_HASH_OF_MAPS:
case BPF_MAP_TYPE_RINGBUF:
case BPF_MAP_TYPE_USER_RINGBUF:
case BPF_MAP_TYPE_CGROUP_STORAGE:
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
/* unprivileged */
break;
case BPF_MAP_TYPE_SK_STORAGE:
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_TASK_STORAGE:
case BPF_MAP_TYPE_CGRP_STORAGE:
case BPF_MAP_TYPE_BLOOM_FILTER:
case BPF_MAP_TYPE_LPM_TRIE:
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
case BPF_MAP_TYPE_STACK_TRACE:
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
case BPF_MAP_TYPE_LRU_HASH:
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
case BPF_MAP_TYPE_STRUCT_OPS:
case BPF_MAP_TYPE_CPUMAP:
if (!bpf_capable())
return -EPERM;
break;
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_XSKMAP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
break;
default:
WARN(1, "unsupported map type %d", map_type);
return -EPERM;
}
map = ops->map_alloc(attr); map = ops->map_alloc(attr);
if (IS_ERR(map)) if (IS_ERR(map))
return PTR_ERR(map); return PTR_ERR(map);

View File

@ -32,8 +32,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{ {
struct bpf_stab *stab; struct bpf_stab *stab;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
if (attr->max_entries == 0 || if (attr->max_entries == 0 ||
attr->key_size != 4 || attr->key_size != 4 ||
(attr->value_size != sizeof(u32) && (attr->value_size != sizeof(u32) &&
@ -1085,8 +1083,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
struct bpf_shtab *htab; struct bpf_shtab *htab;
int i, err; int i, err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
if (attr->max_entries == 0 || if (attr->max_entries == 0 ||
attr->key_size == 0 || attr->key_size == 0 ||
(attr->value_size != sizeof(u32) && (attr->value_size != sizeof(u32) &&

View File

@ -5,7 +5,6 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/capability.h>
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched.h> #include <linux/sched.h>
@ -68,9 +67,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
int numa_node; int numa_node;
u64 size; u64 size;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
if (attr->max_entries == 0 || attr->key_size != 4 || if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->value_size != 4 ||
attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))

View File

@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
prog_insns, prog_insn_cnt, &load_opts), prog_insns, prog_insn_cnt, &load_opts),
-EPERM, "prog_load_fails"); -EPERM, "prog_load_fails");
for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++) /* some map types require particular correct parameters which could be
* sanity-checked before enforcing -EPERM, so only validate that
* the simple ARRAY and HASH maps are failing with -EPERM
*/
for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL), ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
-EPERM, "map_create_fails"); -EPERM, "map_create_fails");