mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars
Teach libbpf to not add BPF_F_MMAPABLE flag unnecessarily for ARRAY maps that are backing data sections, if such data sections don't expose any variables to user-space. Exposed variables are those that have STB_GLOBAL or STB_WEAK ELF binding and correspond to BTF VAR's BTF_VAR_GLOBAL_ALLOCATED linkage. The overall idea is that if some data section doesn't have any variable that is exposed through BPF skeleton, then there is no reason to make such BPF array mmapable. Making BPF array mmapable is not a free no-op action, because BPF verifier doesn't allow users to put special objects (such as BPF spin locks, RB tree nodes, linked list nodes, kptrs, etc; anything that has a sensitive internal state that should not be modified arbitrarily from user space) into mmapable arrays, as there is no way to prevent user space from corrupting such sensitive state through direct memory access through memory-mapped region. By making sure that libbpf doesn't add BPF_F_MMAPABLE flag to BPF array maps corresponding to data sections that only have static variables (which are not supposed to be visible to user space according to libbpf and BPF skeleton rules), users now can have spinlocks, kptrs, etc in either default .bss/.data sections or custom .data.* sections (assuming there are no global variables in such sections). The only possible hiccup with this approach is the need to use global variables during BPF static linking, even if it's not intended to be shared with user space through BPF skeleton. To allow such scenarios, extend libbpf's STV_HIDDEN ELF visibility attribute handling to variables. Libbpf is already treating global hidden BPF subprograms as static subprograms and adjusts BTF accordingly to make BPF verifier verify such subprograms as static subprograms with preserving entire BPF verifier state between subprog calls. This patch teaches libbpf to treat global hidden variables as static ones and adjust BTF information accordingly as well. This allows to share variables between multiple object files during static linking, but still keep them internal to BPF program and not get them exposed through BPF skeleton. Note, that if the user has some advanced scenario where they absolutely need BPF_F_MMAPABLE flag on .data/.bss/.rodata BPF array map despite only having static variables, they still can achieve this by forcing it through explicit bpf_map__set_map_flags() API. Acked-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Dave Marchevsky <davemarchevsky@fb.com> Link: https://lore.kernel.org/r/20221019002816.359650-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
f33f742d56
commit
4fcac46c7e
@ -1577,7 +1577,38 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map);
|
||||
map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
|
||||
|
||||
/* Internal BPF map is mmap()'able only if at least one of corresponding
|
||||
* DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
|
||||
* variable and it's not marked as __hidden (which turns it into, effectively,
|
||||
* a STATIC variable).
|
||||
*/
|
||||
static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
|
||||
{
|
||||
const struct btf_type *t, *vt;
|
||||
struct btf_var_secinfo *vsi;
|
||||
int i, n;
|
||||
|
||||
if (!map->btf_value_type_id)
|
||||
return false;
|
||||
|
||||
t = btf__type_by_id(obj->btf, map->btf_value_type_id);
|
||||
if (!btf_is_datasec(t))
|
||||
return false;
|
||||
|
||||
vsi = btf_var_secinfos(t);
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
|
||||
vt = btf__type_by_id(obj->btf, vsi->type);
|
||||
if (!btf_is_var(vt))
|
||||
continue;
|
||||
|
||||
if (btf_var(vt)->linkage != BTF_VAR_STATIC)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
|
||||
@ -1609,7 +1640,12 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
|
||||
def->max_entries = 1;
|
||||
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
|
||||
? BPF_F_RDONLY_PROG : 0;
|
||||
def->map_flags |= BPF_F_MMAPABLE;
|
||||
|
||||
/* failures are fine because of maps like .rodata.str1.1 */
|
||||
(void) map_fill_btf_type_info(obj, map);
|
||||
|
||||
if (map_is_mmapable(obj, map))
|
||||
def->map_flags |= BPF_F_MMAPABLE;
|
||||
|
||||
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
|
||||
map->name, map->sec_idx, map->sec_offset, def->map_flags);
|
||||
@ -1626,9 +1662,6 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* failures are fine because of maps like .rodata.str1.1 */
|
||||
(void) bpf_map_find_btf_info(obj, map);
|
||||
|
||||
if (data)
|
||||
memcpy(map->mmaped, data, data_sz);
|
||||
|
||||
@ -2540,7 +2573,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
|
||||
fill_map_from_def(map->inner_map, &inner_def);
|
||||
}
|
||||
|
||||
err = bpf_map_find_btf_info(obj, map);
|
||||
err = map_fill_btf_type_info(obj, map);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -2848,6 +2881,7 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
|
||||
__u32 size = 0, i, vars = btf_vlen(t);
|
||||
const char *sec_name = btf__name_by_offset(btf, t->name_off);
|
||||
struct btf_var_secinfo *vsi;
|
||||
bool fixup_offsets = false;
|
||||
int err;
|
||||
|
||||
if (!sec_name) {
|
||||
@ -2855,21 +2889,34 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* extern-backing datasecs (.ksyms, .kconfig) have their size and
|
||||
* variable offsets set at the previous step, so we skip any fixups
|
||||
* for such sections
|
||||
/* Extern-backing datasecs (.ksyms, .kconfig) have their size and
|
||||
* variable offsets set at the previous step. Further, not every
|
||||
* extern BTF VAR has corresponding ELF symbol preserved, so we skip
|
||||
* all fixups altogether for such sections and go straight to sorting
|
||||
* VARs within their DATASEC.
|
||||
*/
|
||||
if (t->size)
|
||||
if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
|
||||
goto sort_vars;
|
||||
|
||||
err = find_elf_sec_sz(obj, sec_name, &size);
|
||||
if (err || !size) {
|
||||
pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
|
||||
sec_name, size, err);
|
||||
return -ENOENT;
|
||||
}
|
||||
/* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
|
||||
* fix this up. But BPF static linker already fixes this up and fills
|
||||
* all the sizes and offsets during static linking. So this step has
|
||||
* to be optional. But the STV_HIDDEN handling is non-optional for any
|
||||
* non-extern DATASEC, so the variable fixup loop below handles both
|
||||
* functions at the same time, paying the cost of BTF VAR <-> ELF
|
||||
* symbol matching just once.
|
||||
*/
|
||||
if (t->size == 0) {
|
||||
err = find_elf_sec_sz(obj, sec_name, &size);
|
||||
if (err || !size) {
|
||||
pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
|
||||
sec_name, size, err);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
t->size = size;
|
||||
t->size = size;
|
||||
fixup_offsets = true;
|
||||
}
|
||||
|
||||
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
|
||||
const struct btf_type *t_var;
|
||||
@ -2901,7 +2948,19 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
vsi->offset = sym->st_value;
|
||||
if (fixup_offsets)
|
||||
vsi->offset = sym->st_value;
|
||||
|
||||
/* if variable is a global/weak symbol, but has restricted
|
||||
* (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
|
||||
* as static. This follows similar logic for functions (BPF
|
||||
* subprogs) and influences libbpf's further decisions about
|
||||
* whether to make global data BPF array maps as
|
||||
* BPF_F_MMAPABLE.
|
||||
*/
|
||||
if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
|
||||
|| ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
|
||||
var->linkage = BTF_VAR_STATIC;
|
||||
}
|
||||
|
||||
sort_vars:
|
||||
@ -4223,7 +4282,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
|
||||
static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
|
||||
{
|
||||
int id;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user