mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
xdp: Refactor devmap allocation code for reuse
The subsequent patch to add a new devmap sub-type can re-use much of the initialisation and allocation code, so refactor it into separate functions. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Acked-by: Yonghong Song <yhs@fb.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
6dbff13ca8
commit
fca16e5107
@ -60,9 +60,9 @@ struct xdp_bulk_queue {
|
|||||||
struct bpf_dtab_netdev {
|
struct bpf_dtab_netdev {
|
||||||
struct net_device *dev; /* must be first member, due to tracepoint */
|
struct net_device *dev; /* must be first member, due to tracepoint */
|
||||||
struct bpf_dtab *dtab;
|
struct bpf_dtab *dtab;
|
||||||
unsigned int bit;
|
|
||||||
struct xdp_bulk_queue __percpu *bulkq;
|
struct xdp_bulk_queue __percpu *bulkq;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
|
unsigned int idx; /* keep track of map index for tracepoint */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_dtab {
|
struct bpf_dtab {
|
||||||
@ -75,28 +75,21 @@ struct bpf_dtab {
|
|||||||
static DEFINE_SPINLOCK(dev_map_lock);
|
static DEFINE_SPINLOCK(dev_map_lock);
|
||||||
static LIST_HEAD(dev_map_list);
|
static LIST_HEAD(dev_map_list);
|
||||||
|
|
||||||
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab;
|
|
||||||
int err, cpu;
|
int err, cpu;
|
||||||
u64 cost;
|
u64 cost;
|
||||||
|
|
||||||
if (!capable(CAP_NET_ADMIN))
|
|
||||||
return ERR_PTR(-EPERM);
|
|
||||||
|
|
||||||
/* check sanity of attributes */
|
/* check sanity of attributes */
|
||||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||||
attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
|
attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
|
||||||
return ERR_PTR(-EINVAL);
|
return -EINVAL;
|
||||||
|
|
||||||
/* Lookup returns a pointer straight to dev->ifindex, so make sure the
|
/* Lookup returns a pointer straight to dev->ifindex, so make sure the
|
||||||
* verifier prevents writes from the BPF side
|
* verifier prevents writes from the BPF side
|
||||||
*/
|
*/
|
||||||
attr->map_flags |= BPF_F_RDONLY_PROG;
|
attr->map_flags |= BPF_F_RDONLY_PROG;
|
||||||
|
|
||||||
dtab = kzalloc(sizeof(*dtab), GFP_USER);
|
|
||||||
if (!dtab)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
bpf_map_init_from_attr(&dtab->map, attr);
|
bpf_map_init_from_attr(&dtab->map, attr);
|
||||||
|
|
||||||
@ -107,9 +100,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||||||
/* if map size is larger than memlock limit, reject it */
|
/* if map size is larger than memlock limit, reject it */
|
||||||
err = bpf_map_charge_init(&dtab->map.memory, cost);
|
err = bpf_map_charge_init(&dtab->map.memory, cost);
|
||||||
if (err)
|
if (err)
|
||||||
goto free_dtab;
|
return -EINVAL;
|
||||||
|
|
||||||
err = -ENOMEM;
|
|
||||||
|
|
||||||
dtab->flush_list = alloc_percpu(struct list_head);
|
dtab->flush_list = alloc_percpu(struct list_head);
|
||||||
if (!dtab->flush_list)
|
if (!dtab->flush_list)
|
||||||
@ -124,19 +115,38 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||||||
if (!dtab->netdev_map)
|
if (!dtab->netdev_map)
|
||||||
goto free_percpu;
|
goto free_percpu;
|
||||||
|
|
||||||
spin_lock(&dev_map_lock);
|
return 0;
|
||||||
list_add_tail_rcu(&dtab->list, &dev_map_list);
|
|
||||||
spin_unlock(&dev_map_lock);
|
|
||||||
|
|
||||||
return &dtab->map;
|
|
||||||
|
|
||||||
free_percpu:
|
free_percpu:
|
||||||
free_percpu(dtab->flush_list);
|
free_percpu(dtab->flush_list);
|
||||||
free_charge:
|
free_charge:
|
||||||
bpf_map_charge_finish(&dtab->map.memory);
|
bpf_map_charge_finish(&dtab->map.memory);
|
||||||
free_dtab:
|
return -ENOMEM;
|
||||||
kfree(dtab);
|
}
|
||||||
return ERR_PTR(err);
|
|
||||||
|
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||||
|
{
|
||||||
|
struct bpf_dtab *dtab;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (!capable(CAP_NET_ADMIN))
|
||||||
|
return ERR_PTR(-EPERM);
|
||||||
|
|
||||||
|
dtab = kzalloc(sizeof(*dtab), GFP_USER);
|
||||||
|
if (!dtab)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
err = dev_map_init_map(dtab, attr);
|
||||||
|
if (err) {
|
||||||
|
kfree(dtab);
|
||||||
|
return ERR_PTR(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&dev_map_lock);
|
||||||
|
list_add_tail_rcu(&dtab->list, &dev_map_list);
|
||||||
|
spin_unlock(&dev_map_lock);
|
||||||
|
|
||||||
|
return &dtab->map;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dev_map_free(struct bpf_map *map)
|
static void dev_map_free(struct bpf_map *map)
|
||||||
@ -235,7 +245,7 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
|
|||||||
out:
|
out:
|
||||||
bq->count = 0;
|
bq->count = 0;
|
||||||
|
|
||||||
trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
|
trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx,
|
||||||
sent, drops, bq->dev_rx, dev, err);
|
sent, drops, bq->dev_rx, dev, err);
|
||||||
bq->dev_rx = NULL;
|
bq->dev_rx = NULL;
|
||||||
__list_del_clearprev(&bq->flush_node);
|
__list_del_clearprev(&bq->flush_node);
|
||||||
@ -412,17 +422,52 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
|
||||||
u64 map_flags)
|
struct bpf_dtab *dtab,
|
||||||
|
u32 ifindex,
|
||||||
|
unsigned int idx)
|
||||||
|
{
|
||||||
|
gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
|
||||||
|
struct bpf_dtab_netdev *dev;
|
||||||
|
struct xdp_bulk_queue *bq;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
|
||||||
|
if (!dev)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
|
||||||
|
sizeof(void *), gfp);
|
||||||
|
if (!dev->bulkq) {
|
||||||
|
kfree(dev);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
bq = per_cpu_ptr(dev->bulkq, cpu);
|
||||||
|
bq->obj = dev;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev->dev = dev_get_by_index(net, ifindex);
|
||||||
|
if (!dev->dev) {
|
||||||
|
free_percpu(dev->bulkq);
|
||||||
|
kfree(dev);
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
dev->idx = idx;
|
||||||
|
dev->dtab = dtab;
|
||||||
|
|
||||||
|
return dev;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
|
||||||
|
void *key, void *value, u64 map_flags)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||||
struct net *net = current->nsproxy->net_ns;
|
|
||||||
gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
|
|
||||||
struct bpf_dtab_netdev *dev, *old_dev;
|
struct bpf_dtab_netdev *dev, *old_dev;
|
||||||
u32 ifindex = *(u32 *)value;
|
u32 ifindex = *(u32 *)value;
|
||||||
struct xdp_bulk_queue *bq;
|
|
||||||
u32 i = *(u32 *)key;
|
u32 i = *(u32 *)key;
|
||||||
int cpu;
|
|
||||||
|
|
||||||
if (unlikely(map_flags > BPF_EXIST))
|
if (unlikely(map_flags > BPF_EXIST))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -434,31 +479,9 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||||||
if (!ifindex) {
|
if (!ifindex) {
|
||||||
dev = NULL;
|
dev = NULL;
|
||||||
} else {
|
} else {
|
||||||
dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
|
dev = __dev_map_alloc_node(net, dtab, ifindex, i);
|
||||||
if (!dev)
|
if (IS_ERR(dev))
|
||||||
return -ENOMEM;
|
return PTR_ERR(dev);
|
||||||
|
|
||||||
dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
|
|
||||||
sizeof(void *), gfp);
|
|
||||||
if (!dev->bulkq) {
|
|
||||||
kfree(dev);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
bq = per_cpu_ptr(dev->bulkq, cpu);
|
|
||||||
bq->obj = dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev->dev = dev_get_by_index(net, ifindex);
|
|
||||||
if (!dev->dev) {
|
|
||||||
free_percpu(dev->bulkq);
|
|
||||||
kfree(dev);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev->bit = i;
|
|
||||||
dev->dtab = dtab;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use call_rcu() here to ensure rcu critical sections have completed
|
/* Use call_rcu() here to ensure rcu critical sections have completed
|
||||||
@ -472,6 +495,13 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||||
|
u64 map_flags)
|
||||||
|
{
|
||||||
|
return __dev_map_update_elem(current->nsproxy->net_ns,
|
||||||
|
map, key, value, map_flags);
|
||||||
|
}
|
||||||
|
|
||||||
const struct bpf_map_ops dev_map_ops = {
|
const struct bpf_map_ops dev_map_ops = {
|
||||||
.map_alloc = dev_map_alloc,
|
.map_alloc = dev_map_alloc,
|
||||||
.map_free = dev_map_free,
|
.map_free = dev_map_free,
|
||||||
|
Loading…
Reference in New Issue
Block a user