mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 21:02:19 +00:00
bpf: Centralize permissions checks for all BPF map types
This allows to do more centralized decisions later on, and generally makes it very explicit which maps are privileged and which are not (e.g., LRU_HASH and LRU_PERCPU_HASH, which are privileged HASH variants, as opposed to unprivileged HASH and HASH_PERCPU; now this is explicit and easy to verify). Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/bpf/20230613223533.3689589-4-andrii@kernel.org
This commit is contained in:
parent
22db41226b
commit
6c3eba1c5e
@ -86,9 +86,6 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
struct bpf_bloom_filter *bloom;
|
||||
|
||||
if (!bpf_capable())
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
if (attr->key_size != 0 || attr->value_size == 0 ||
|
||||
attr->max_entries == 0 ||
|
||||
attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
|
||||
|
@ -723,9 +723,6 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
|
||||
!attr->btf_key_type_id || !attr->btf_value_type_id)
|
||||
return -EINVAL;
|
||||
|
||||
if (!bpf_capable())
|
||||
return -EPERM;
|
||||
|
||||
if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
|
||||
return -E2BIG;
|
||||
|
||||
|
@ -655,9 +655,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
const struct btf_type *t, *vt;
|
||||
struct bpf_map *map;
|
||||
|
||||
if (!bpf_capable())
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
|
||||
if (!st_ops)
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/capability.h>
|
||||
#include <trace/events/xdp.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
@ -89,9 +88,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
|
||||
u32 value_size = attr->value_size;
|
||||
struct bpf_cpu_map *cmap;
|
||||
|
||||
if (!bpf_capable())
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||
(value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
|
||||
|
@ -160,9 +160,6 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||
struct bpf_dtab *dtab;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
|
||||
if (!dtab)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -422,12 +422,6 @@ static int htab_map_alloc_check(union bpf_attr *attr)
|
||||
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
|
||||
offsetof(struct htab_elem, hash_node.pprev));
|
||||
|
||||
if (lru && !bpf_capable())
|
||||
/* LRU implementation is much complicated than other
|
||||
* maps. Hence, limit to CAP_BPF.
|
||||
*/
|
||||
return -EPERM;
|
||||
|
||||
if (zero_seed && !capable(CAP_SYS_ADMIN))
|
||||
/* Guard against local DoS, and discourage production use. */
|
||||
return -EPERM;
|
||||
|
@ -544,9 +544,6 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct lpm_trie *trie;
|
||||
|
||||
if (!bpf_capable())
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 ||
|
||||
!(attr->map_flags & BPF_F_NO_PREALLOC) ||
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include "percpu_freelist.h"
|
||||
|
||||
@ -46,9 +45,6 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
|
||||
/* Called from syscall */
|
||||
static int queue_stack_map_alloc_check(union bpf_attr *attr)
|
||||
{
|
||||
if (!bpf_capable())
|
||||
return -EPERM;
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 || attr->key_size != 0 ||
|
||||
attr->value_size == 0 ||
|
||||
|
@ -151,9 +151,6 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
struct reuseport_array *array;
|
||||
|
||||
if (!bpf_capable())
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
/* allocate all map elements and zero-initialize them */
|
||||
array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node);
|
||||
if (!array)
|
||||
|
@ -74,9 +74,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
||||
u64 cost, n_buckets;
|
||||
int err;
|
||||
|
||||
if (!bpf_capable())
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -1156,6 +1156,53 @@ static int map_create(union bpf_attr *attr)
|
||||
if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
|
||||
return -EPERM;
|
||||
|
||||
/* check privileged map type permissions */
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_ARRAY:
|
||||
case BPF_MAP_TYPE_PERCPU_ARRAY:
|
||||
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
|
||||
case BPF_MAP_TYPE_CGROUP_ARRAY:
|
||||
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
|
||||
case BPF_MAP_TYPE_HASH:
|
||||
case BPF_MAP_TYPE_PERCPU_HASH:
|
||||
case BPF_MAP_TYPE_HASH_OF_MAPS:
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
case BPF_MAP_TYPE_USER_RINGBUF:
|
||||
case BPF_MAP_TYPE_CGROUP_STORAGE:
|
||||
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
|
||||
/* unprivileged */
|
||||
break;
|
||||
case BPF_MAP_TYPE_SK_STORAGE:
|
||||
case BPF_MAP_TYPE_INODE_STORAGE:
|
||||
case BPF_MAP_TYPE_TASK_STORAGE:
|
||||
case BPF_MAP_TYPE_CGRP_STORAGE:
|
||||
case BPF_MAP_TYPE_BLOOM_FILTER:
|
||||
case BPF_MAP_TYPE_LPM_TRIE:
|
||||
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
|
||||
case BPF_MAP_TYPE_STACK_TRACE:
|
||||
case BPF_MAP_TYPE_QUEUE:
|
||||
case BPF_MAP_TYPE_STACK:
|
||||
case BPF_MAP_TYPE_LRU_HASH:
|
||||
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
|
||||
case BPF_MAP_TYPE_STRUCT_OPS:
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
if (!bpf_capable())
|
||||
return -EPERM;
|
||||
break;
|
||||
case BPF_MAP_TYPE_SOCKMAP:
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "unsupported map type %d", map_type);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
map = ops->map_alloc(attr);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
@ -32,8 +32,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_stab *stab;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
if (attr->max_entries == 0 ||
|
||||
attr->key_size != 4 ||
|
||||
(attr->value_size != sizeof(u32) &&
|
||||
@ -1085,8 +1083,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
|
||||
struct bpf_shtab *htab;
|
||||
int i, err;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
if (attr->max_entries == 0 ||
|
||||
attr->key_size == 0 ||
|
||||
(attr->value_size != sizeof(u32) &&
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/capability.h>
|
||||
#include <net/xdp_sock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
@ -68,9 +67,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
|
||||
int numa_node;
|
||||
u64 size;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||
attr->value_size != 4 ||
|
||||
attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
|
||||
|
@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
|
||||
prog_insns, prog_insn_cnt, &load_opts),
|
||||
-EPERM, "prog_load_fails");
|
||||
|
||||
for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++)
|
||||
/* some map types require particular correct parameters which could be
|
||||
* sanity-checked before enforcing -EPERM, so only validate that
|
||||
* the simple ARRAY and HASH maps are failing with -EPERM
|
||||
*/
|
||||
for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
|
||||
ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
|
||||
-EPERM, "map_create_fails");
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user