mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
Merge branch 'xdp-bpf-fixes'
John Fastabend says: ==================== net: Fixes for XDP/BPF The following fixes, UAPI updates, and small improvement, i. XDP needs to be called inside RCU with preempt disabled. ii. Not strictly a bug fix but we have an attach command in the sockmap UAPI already to avoid having a single kernel released with only the attach and not the detach I'm pushing this into net branch. Its early in the RC cycle so I think this is OK (not ideal but better than supporting a UAPI with a missing detach forever). iii. Final patch replace cpu_relax with cond_resched in devmap. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a7bc57749f
@ -385,14 +385,14 @@ static inline void __dev_map_flush(struct bpf_map *map)
|
||||
|
||||
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
|
||||
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
int sock_map_attach_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
|
||||
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
|
||||
#else
|
||||
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int sock_map_attach_prog(struct bpf_map *map,
|
||||
static inline int sock_map_prog(struct bpf_map *map,
|
||||
struct bpf_prog *prog,
|
||||
u32 type)
|
||||
{
|
||||
|
@ -159,7 +159,7 @@ static void dev_map_free(struct bpf_map *map)
|
||||
unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
|
||||
|
||||
while (!bitmap_empty(bitmap, dtab->map.max_entries))
|
||||
cpu_relax();
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
for (i = 0; i < dtab->map.max_entries; i++) {
|
||||
|
@ -792,7 +792,7 @@ out_progs:
|
||||
return err;
|
||||
}
|
||||
|
||||
int sock_map_attach_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
|
||||
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
|
||||
{
|
||||
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
|
||||
struct bpf_prog *orig;
|
||||
|
@ -1096,10 +1096,10 @@ static int bpf_obj_get(const union bpf_attr *attr)
|
||||
|
||||
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
|
||||
|
||||
static int sockmap_get_from_fd(const union bpf_attr *attr)
|
||||
static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
|
||||
{
|
||||
struct bpf_prog *prog = NULL;
|
||||
int ufd = attr->target_fd;
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_map *map;
|
||||
struct fd f;
|
||||
int err;
|
||||
@ -1109,15 +1109,19 @@ static int sockmap_get_from_fd(const union bpf_attr *attr)
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
prog = bpf_prog_get_type(attr->attach_bpf_fd, BPF_PROG_TYPE_SK_SKB);
|
||||
if (attach) {
|
||||
prog = bpf_prog_get_type(attr->attach_bpf_fd,
|
||||
BPF_PROG_TYPE_SK_SKB);
|
||||
if (IS_ERR(prog)) {
|
||||
fdput(f);
|
||||
return PTR_ERR(prog);
|
||||
}
|
||||
}
|
||||
|
||||
err = sock_map_attach_prog(map, prog, attr->attach_type);
|
||||
err = sock_map_prog(map, prog, attr->attach_type);
|
||||
if (err) {
|
||||
fdput(f);
|
||||
if (prog)
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
}
|
||||
@ -1155,7 +1159,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
|
||||
break;
|
||||
case BPF_SK_SKB_STREAM_PARSER:
|
||||
case BPF_SK_SKB_STREAM_VERDICT:
|
||||
return sockmap_get_from_fd(attr);
|
||||
return sockmap_get_from_fd(attr, true);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1204,7 +1208,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
|
||||
ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
|
||||
cgroup_put(cgrp);
|
||||
break;
|
||||
|
||||
case BPF_SK_SKB_STREAM_PARSER:
|
||||
case BPF_SK_SKB_STREAM_VERDICT:
|
||||
ret = sockmap_get_from_fd(attr, false);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3981,8 +3981,13 @@ static int netif_rx_internal(struct sk_buff *skb)
|
||||
trace_netif_rx(skb);
|
||||
|
||||
if (static_key_false(&generic_xdp_needed)) {
|
||||
int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
|
||||
skb);
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
|
||||
/* Consider XDP consuming the packet a success from
|
||||
* the netdev point of view we do not want to count
|
||||
@ -4500,18 +4505,20 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
|
||||
if (skb_defer_rx_timestamp(skb))
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (static_key_false(&generic_xdp_needed)) {
|
||||
int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
|
||||
skb);
|
||||
int ret;
|
||||
|
||||
if (ret != XDP_PASS) {
|
||||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
|
||||
if (ret != XDP_PASS)
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
#ifdef CONFIG_RPS
|
||||
if (static_key_false(&rps_needed)) {
|
||||
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
||||
|
@ -558,7 +558,7 @@ static void test_sockmap(int tasks, void *data)
|
||||
}
|
||||
}
|
||||
|
||||
/* Test attaching bad fds */
|
||||
/* Test attaching/detaching bad fds */
|
||||
err = bpf_prog_attach(-1, fd, BPF_SK_SKB_STREAM_PARSER, 0);
|
||||
if (!err) {
|
||||
printf("Failed invalid parser prog attach\n");
|
||||
@ -571,6 +571,30 @@ static void test_sockmap(int tasks, void *data)
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_attach(-1, fd, __MAX_BPF_ATTACH_TYPE, 0);
|
||||
if (!err) {
|
||||
printf("Failed unknown prog attach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
|
||||
if (err) {
|
||||
printf("Failed empty parser prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
|
||||
if (err) {
|
||||
printf("Failed empty verdict prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(fd, __MAX_BPF_ATTACH_TYPE);
|
||||
if (!err) {
|
||||
printf("Detach invalid prog successful\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
/* Load SK_SKB program and Attach */
|
||||
err = bpf_prog_load(SOCKMAP_PARSE_PROG,
|
||||
BPF_PROG_TYPE_SK_SKB, &obj, &parse_prog);
|
||||
@ -643,6 +667,13 @@ static void test_sockmap(int tasks, void *data)
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_attach(verdict_prog, map_fd_rx,
|
||||
__MAX_BPF_ATTACH_TYPE, 0);
|
||||
if (!err) {
|
||||
printf("Attached unknown bpf prog\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
/* Test map update elem afterwards fd lives in fd and map_fd */
|
||||
for (i = 0; i < 6; i++) {
|
||||
err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
|
||||
@ -809,6 +840,24 @@ static void test_sockmap(int tasks, void *data)
|
||||
assert(status == 0);
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
|
||||
if (!err) {
|
||||
printf("Detached an invalid prog type.\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
|
||||
if (err) {
|
||||
printf("Failed parser prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
|
||||
if (err) {
|
||||
printf("Failed parser prog detach\n");
|
||||
goto out_sockmap;
|
||||
}
|
||||
|
||||
/* Test map close sockets */
|
||||
for (i = 0; i < 6; i++)
|
||||
close(sfd[i]);
|
||||
|
Loading…
Reference in New Issue
Block a user