forked from Minki/linux
bpf: fix refcnt overflow
On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK,
the malicious application may overflow 32-bit bpf program refcnt.
It's also possible to overflow map refcnt on 1Tb system.
Impose 32k hard limit which means that the same bpf program or
map cannot be shared by more than 32k processes.
Fixes: 1be7f75d16
("bpf: enable non-root eBPF programs")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bd34cf66cc
commit
92117d8443
@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
|
||||
void bpf_register_map_type(struct bpf_map_type_list *tl);
|
||||
|
||||
struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
|
||||
void bpf_prog_put(struct bpf_prog *prog);
|
||||
void bpf_prog_put_rcu(struct bpf_prog *prog);
|
||||
|
||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
|
||||
struct bpf_map *__bpf_map_get(struct fd f);
|
||||
void bpf_map_inc(struct bpf_map *map, bool uref);
|
||||
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
|
||||
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
int bpf_map_precharge_memlock(u32 pages);
|
||||
|
@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case BPF_TYPE_PROG:
|
||||
atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
|
||||
raw = bpf_prog_inc(raw);
|
||||
break;
|
||||
case BPF_TYPE_MAP:
|
||||
bpf_map_inc(raw, true);
|
||||
raw = bpf_map_inc(raw, true);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
|
||||
goto out;
|
||||
|
||||
raw = bpf_any_get(inode->i_private, *type);
|
||||
touch_atime(&path);
|
||||
if (!IS_ERR(raw))
|
||||
touch_atime(&path);
|
||||
|
||||
path_put(&path);
|
||||
return raw;
|
||||
|
@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
|
||||
return f.file->private_data;
|
||||
}
|
||||
|
||||
void bpf_map_inc(struct bpf_map *map, bool uref)
|
||||
/* prog's and map's refcnt limit */
|
||||
#define BPF_MAX_REFCNT 32768
|
||||
|
||||
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
|
||||
{
|
||||
atomic_inc(&map->refcnt);
|
||||
if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
|
||||
atomic_dec(&map->refcnt);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
if (uref)
|
||||
atomic_inc(&map->usercnt);
|
||||
return map;
|
||||
}
|
||||
|
||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||
@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||
if (IS_ERR(map))
|
||||
return map;
|
||||
|
||||
bpf_map_inc(map, true);
|
||||
map = bpf_map_inc(map, true);
|
||||
fdput(f);
|
||||
|
||||
return map;
|
||||
@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
|
||||
return f.file->private_data;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
|
||||
{
|
||||
if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
|
||||
atomic_dec(&prog->aux->refcnt);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
return prog;
|
||||
}
|
||||
|
||||
/* called by sockets/tracing/seccomp before attaching program to an event
|
||||
* pairs with bpf_prog_put()
|
||||
*/
|
||||
@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||
if (IS_ERR(prog))
|
||||
return prog;
|
||||
|
||||
atomic_inc(&prog->aux->refcnt);
|
||||
prog = bpf_prog_inc(prog);
|
||||
fdput(f);
|
||||
|
||||
return prog;
|
||||
|
@ -2049,15 +2049,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
/* remember this map */
|
||||
env->used_maps[env->used_map_cnt++] = map;
|
||||
|
||||
/* hold the map. If the program is rejected by verifier,
|
||||
* the map will be released by release_maps() or it
|
||||
* will be used by the valid program until it's unloaded
|
||||
* and all maps are released in free_bpf_prog_info()
|
||||
*/
|
||||
bpf_map_inc(map, false);
|
||||
map = bpf_map_inc(map, false);
|
||||
if (IS_ERR(map)) {
|
||||
fdput(f);
|
||||
return PTR_ERR(map);
|
||||
}
|
||||
env->used_maps[env->used_map_cnt++] = map;
|
||||
|
||||
fdput(f);
|
||||
next_insn:
|
||||
insn++;
|
||||
|
Loading…
Reference in New Issue
Block a user