linux/kernel/bpf/queue_stack_maps.c

279 lines
6.6 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* queue_stack_maps.c: BPF queue and stack maps
*
* Copyright (c) 2018 Politecnico di Torino
*/
#include <linux/bpf.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/btf_ids.h>
#include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
struct bpf_queue_stack {
struct bpf_map map;
raw_spinlock_t lock;
u32 head, tail;
u32 size; /* max_entries + 1 */
bpf, libbpf: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20200507185057.GA13981@embeddedor
2020-05-07 18:50:57 +00:00
char elements[] __aligned(8);
};
static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
{
return container_of(map, struct bpf_queue_stack, map);
}
static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
{
return qs->head == qs->tail;
}
static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
{
u32 head = qs->head + 1;
if (unlikely(head >= qs->size))
head = 0;
return head == qs->tail;
}
/* Called from syscall */
static int queue_stack_map_alloc_check(union bpf_attr *attr)
{
if (!bpf_capable())
return -EPERM;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 0 ||
attr->value_size == 0 ||
attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
!bpf_map_flags_access_ok(attr->map_flags))
return -EINVAL;
if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
*/
return -E2BIG;
return 0;
}
static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_queue_stack *qs;
u64 size, queue_size;
size = (u64) attr->max_entries + 1;
queue_size = sizeof(*qs) + size * attr->value_size;
qs = bpf_map_area_alloc(queue_size, numa_node);
if (!qs)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&qs->map, attr);
qs->size = size;
raw_spin_lock_init(&qs->lock);
return &qs->map;
}
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void queue_stack_map_free(struct bpf_map *map)
{
struct bpf_queue_stack *qs = bpf_queue_stack(map);
bpf_map_area_free(qs);
}
static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
{
struct bpf_queue_stack *qs = bpf_queue_stack(map);
unsigned long flags;
int err = 0;
void *ptr;
raw_spin_lock_irqsave(&qs->lock, flags);
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
err = -ENOENT;
goto out;
}
ptr = &qs->elements[qs->tail * qs->map.value_size];
memcpy(value, ptr, qs->map.value_size);
if (delete) {
if (unlikely(++qs->tail >= qs->size))
qs->tail = 0;
}
out:
raw_spin_unlock_irqrestore(&qs->lock, flags);
return err;
}
static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
{
struct bpf_queue_stack *qs = bpf_queue_stack(map);
unsigned long flags;
int err = 0;
void *ptr;
u32 index;
raw_spin_lock_irqsave(&qs->lock, flags);
if (queue_stack_map_is_empty(qs)) {
memset(value, 0, qs->map.value_size);
err = -ENOENT;
goto out;
}
index = qs->head - 1;
if (unlikely(index >= qs->size))
index = qs->size - 1;
ptr = &qs->elements[index * qs->map.value_size];
memcpy(value, ptr, qs->map.value_size);
if (delete)
qs->head = index;
out:
raw_spin_unlock_irqrestore(&qs->lock, flags);
return err;
}
/* Called from syscall or from eBPF program */
static int queue_map_peek_elem(struct bpf_map *map, void *value)
{
return __queue_map_get(map, value, false);
}
/* Called from syscall or from eBPF program */
static int stack_map_peek_elem(struct bpf_map *map, void *value)
{
return __stack_map_get(map, value, false);
}
/* Called from syscall or from eBPF program */
static int queue_map_pop_elem(struct bpf_map *map, void *value)
{
return __queue_map_get(map, value, true);
}
/* Called from syscall or from eBPF program */
static int stack_map_pop_elem(struct bpf_map *map, void *value)
{
return __stack_map_get(map, value, true);
}
/* Called from syscall or from eBPF program */
static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
u64 flags)
{
struct bpf_queue_stack *qs = bpf_queue_stack(map);
unsigned long irq_flags;
int err = 0;
void *dst;
/* BPF_EXIST is used to force making room for a new element in case the
* map is full
*/
bool replace = (flags & BPF_EXIST);
/* Check supported flags for queue and stack maps */
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
return -EINVAL;
raw_spin_lock_irqsave(&qs->lock, irq_flags);
if (queue_stack_map_is_full(qs)) {
if (!replace) {
err = -E2BIG;
goto out;
}
/* advance tail pointer to overwrite oldest element */
if (unlikely(++qs->tail >= qs->size))
qs->tail = 0;
}
dst = &qs->elements[qs->head * qs->map.value_size];
memcpy(dst, value, qs->map.value_size);
if (unlikely(++qs->head >= qs->size))
qs->head = 0;
out:
raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
return err;
}
/* Called from syscall or from eBPF program */
static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
{
return NULL;
}
/* Called from syscall or from eBPF program */
static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
return -EINVAL;
}
/* Called from syscall or from eBPF program */
static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
{
return -EINVAL;
}
/* Called from syscall */
static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
void *next_key)
{
return -EINVAL;
}
BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
const struct bpf_map_ops queue_map_ops = {
bpf: Add map_meta_equal map ops Some properties of the inner map is used in the verification time. When an inner map is inserted to an outer map at runtime, bpf_map_meta_equal() is currently used to ensure those properties of the inserting inner map stays the same as the verification time. In particular, the current bpf_map_meta_equal() checks max_entries which turns out to be too restrictive for most of the maps which do not use max_entries during the verification time. It limits the use case that wants to replace a smaller inner map with a larger inner map. There are some maps do use max_entries during verification though. For example, the map_gen_lookup in array_map_ops uses the max_entries to generate the inline lookup code. To accommodate differences between maps, the map_meta_equal is added to bpf_map_ops. Each map-type can decide what to check when its map is used as an inner map during runtime. Also, some map types cannot be used as an inner map and they are currently black listed in bpf_map_meta_alloc() in map_in_map.c. It is not unusual that the new map types may not aware that such blacklist exists. This patch enforces an explicit opt-in and only allows a map to be used as an inner map if it has implemented the map_meta_equal ops. It is based on the discussion in [1]. All maps that support inner map has its map_meta_equal points to bpf_map_meta_equal in this patch. A later patch will relax the max_entries check for most maps. bpf_types.h counts 28 map types. This patch adds 23 ".map_meta_equal" by using coccinelle. -5 for BPF_MAP_TYPE_PROG_ARRAY BPF_MAP_TYPE_(PERCPU)_CGROUP_STORAGE BPF_MAP_TYPE_STRUCT_OPS BPF_MAP_TYPE_ARRAY_OF_MAPS BPF_MAP_TYPE_HASH_OF_MAPS The "if (inner_map->inner_map_meta)" check in bpf_map_meta_alloc() is moved such that the same error is returned. [1]: https://lore.kernel.org/bpf/20200522022342.899756-1-kafai@fb.com/ Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20200828011806.1970400-1-kafai@fb.com
2020-08-28 01:18:06 +00:00
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check,
.map_alloc = queue_stack_map_alloc,
.map_free = queue_stack_map_free,
.map_lookup_elem = queue_stack_map_lookup_elem,
.map_update_elem = queue_stack_map_update_elem,
.map_delete_elem = queue_stack_map_delete_elem,
.map_push_elem = queue_stack_map_push_elem,
.map_pop_elem = queue_map_pop_elem,
.map_peek_elem = queue_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
.map_btf_id = &queue_map_btf_ids[0],
};
const struct bpf_map_ops stack_map_ops = {
bpf: Add map_meta_equal map ops Some properties of the inner map is used in the verification time. When an inner map is inserted to an outer map at runtime, bpf_map_meta_equal() is currently used to ensure those properties of the inserting inner map stays the same as the verification time. In particular, the current bpf_map_meta_equal() checks max_entries which turns out to be too restrictive for most of the maps which do not use max_entries during the verification time. It limits the use case that wants to replace a smaller inner map with a larger inner map. There are some maps do use max_entries during verification though. For example, the map_gen_lookup in array_map_ops uses the max_entries to generate the inline lookup code. To accommodate differences between maps, the map_meta_equal is added to bpf_map_ops. Each map-type can decide what to check when its map is used as an inner map during runtime. Also, some map types cannot be used as an inner map and they are currently black listed in bpf_map_meta_alloc() in map_in_map.c. It is not unusual that the new map types may not aware that such blacklist exists. This patch enforces an explicit opt-in and only allows a map to be used as an inner map if it has implemented the map_meta_equal ops. It is based on the discussion in [1]. All maps that support inner map has its map_meta_equal points to bpf_map_meta_equal in this patch. A later patch will relax the max_entries check for most maps. bpf_types.h counts 28 map types. This patch adds 23 ".map_meta_equal" by using coccinelle. -5 for BPF_MAP_TYPE_PROG_ARRAY BPF_MAP_TYPE_(PERCPU)_CGROUP_STORAGE BPF_MAP_TYPE_STRUCT_OPS BPF_MAP_TYPE_ARRAY_OF_MAPS BPF_MAP_TYPE_HASH_OF_MAPS The "if (inner_map->inner_map_meta)" check in bpf_map_meta_alloc() is moved such that the same error is returned. [1]: https://lore.kernel.org/bpf/20200522022342.899756-1-kafai@fb.com/ Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20200828011806.1970400-1-kafai@fb.com
2020-08-28 01:18:06 +00:00
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check,
.map_alloc = queue_stack_map_alloc,
.map_free = queue_stack_map_free,
.map_lookup_elem = queue_stack_map_lookup_elem,
.map_update_elem = queue_stack_map_update_elem,
.map_delete_elem = queue_stack_map_delete_elem,
.map_push_elem = queue_stack_map_push_elem,
.map_pop_elem = stack_map_pop_elem,
.map_peek_elem = stack_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
.map_btf_id = &queue_map_btf_ids[0],
};