mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
bpf: Reshuffle some parts of bpf/offload.c
To avoid adding forward declarations in the main patch, shuffle some code around. No functional changes. Cc: John Fastabend <john.fastabend@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: Jakub Kicinski <kuba@kernel.org> Cc: Willem de Bruijn <willemb@google.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Anatoly Burakov <anatoly.burakov@intel.com> Cc: Alexander Lobakin <alexandr.lobakin@intel.com> Cc: Magnus Karlsson <magnus.karlsson@gmail.com> Cc: Maryam Tahhan <mtahhan@redhat.com> Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Signed-off-by: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/r/20230119221536.3349901-5-sdf@google.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
parent
f1fc43d039
commit
89bbc53a4d
@ -74,6 +74,121 @@ bpf_offload_find_netdev(struct net_device *netdev)
|
||||
return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
|
||||
}
|
||||
|
||||
static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct bpf_offload_netdev *ondev;
|
||||
int err;
|
||||
|
||||
ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
|
||||
if (!ondev)
|
||||
return -ENOMEM;
|
||||
|
||||
ondev->netdev = netdev;
|
||||
ondev->offdev = offdev;
|
||||
INIT_LIST_HEAD(&ondev->progs);
|
||||
INIT_LIST_HEAD(&ondev->maps);
|
||||
|
||||
down_write(&bpf_devs_lock);
|
||||
err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
|
||||
if (err) {
|
||||
netdev_warn(netdev, "failed to register for BPF offload\n");
|
||||
goto err_unlock_free;
|
||||
}
|
||||
|
||||
list_add(&ondev->offdev_netdevs, &offdev->netdevs);
|
||||
up_write(&bpf_devs_lock);
|
||||
return 0;
|
||||
|
||||
err_unlock_free:
|
||||
up_write(&bpf_devs_lock);
|
||||
kfree(ondev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog_offload *offload = prog->aux->offload;
|
||||
|
||||
if (offload->dev_state)
|
||||
offload->offdev->ops->destroy(prog);
|
||||
|
||||
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
|
||||
bpf_prog_free_id(prog, true);
|
||||
|
||||
list_del_init(&offload->offloads);
|
||||
kfree(offload);
|
||||
prog->aux->offload = NULL;
|
||||
}
|
||||
|
||||
static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
|
||||
enum bpf_netdev_command cmd)
|
||||
{
|
||||
struct netdev_bpf data = {};
|
||||
struct net_device *netdev;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
data.command = cmd;
|
||||
data.offmap = offmap;
|
||||
/* Caller must make sure netdev is valid */
|
||||
netdev = offmap->netdev;
|
||||
|
||||
return netdev->netdev_ops->ndo_bpf(netdev, &data);
|
||||
}
|
||||
|
||||
static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
|
||||
{
|
||||
WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
|
||||
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
|
||||
bpf_map_free_id(&offmap->map, true);
|
||||
list_del_init(&offmap->offloads);
|
||||
offmap->netdev = NULL;
|
||||
}
|
||||
|
||||
static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct bpf_offload_netdev *ondev, *altdev;
|
||||
struct bpf_offloaded_map *offmap, *mtmp;
|
||||
struct bpf_prog_offload *offload, *ptmp;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
down_write(&bpf_devs_lock);
|
||||
ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
|
||||
if (WARN_ON(!ondev))
|
||||
goto unlock;
|
||||
|
||||
WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
|
||||
list_del(&ondev->offdev_netdevs);
|
||||
|
||||
/* Try to move the objects to another netdev of the device */
|
||||
altdev = list_first_entry_or_null(&offdev->netdevs,
|
||||
struct bpf_offload_netdev,
|
||||
offdev_netdevs);
|
||||
if (altdev) {
|
||||
list_for_each_entry(offload, &ondev->progs, offloads)
|
||||
offload->netdev = altdev->netdev;
|
||||
list_splice_init(&ondev->progs, &altdev->progs);
|
||||
|
||||
list_for_each_entry(offmap, &ondev->maps, offloads)
|
||||
offmap->netdev = altdev->netdev;
|
||||
list_splice_init(&ondev->maps, &altdev->maps);
|
||||
} else {
|
||||
list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
|
||||
__bpf_prog_offload_destroy(offload->prog);
|
||||
list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
|
||||
__bpf_map_offload_destroy(offmap);
|
||||
}
|
||||
|
||||
WARN_ON(!list_empty(&ondev->progs));
|
||||
WARN_ON(!list_empty(&ondev->maps));
|
||||
kfree(ondev);
|
||||
unlock:
|
||||
up_write(&bpf_devs_lock);
|
||||
}
|
||||
|
||||
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_offload_netdev *ondev;
|
||||
@ -206,21 +321,6 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
|
||||
up_read(&bpf_devs_lock);
|
||||
}
|
||||
|
||||
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog_offload *offload = prog->aux->offload;
|
||||
|
||||
if (offload->dev_state)
|
||||
offload->offdev->ops->destroy(prog);
|
||||
|
||||
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
|
||||
bpf_prog_free_id(prog, true);
|
||||
|
||||
list_del_init(&offload->offloads);
|
||||
kfree(offload);
|
||||
prog->aux->offload = NULL;
|
||||
}
|
||||
|
||||
void bpf_prog_offload_destroy(struct bpf_prog *prog)
|
||||
{
|
||||
down_write(&bpf_devs_lock);
|
||||
@ -340,22 +440,6 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
|
||||
const struct bpf_prog_ops bpf_offload_prog_ops = {
|
||||
};
|
||||
|
||||
static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
|
||||
enum bpf_netdev_command cmd)
|
||||
{
|
||||
struct netdev_bpf data = {};
|
||||
struct net_device *netdev;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
data.command = cmd;
|
||||
data.offmap = offmap;
|
||||
/* Caller must make sure netdev is valid */
|
||||
netdev = offmap->netdev;
|
||||
|
||||
return netdev->netdev_ops->ndo_bpf(netdev, &data);
|
||||
}
|
||||
|
||||
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
@ -405,15 +489,6 @@ err_unlock:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
|
||||
{
|
||||
WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
|
||||
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
|
||||
bpf_map_free_id(&offmap->map, true);
|
||||
list_del_init(&offmap->offloads);
|
||||
offmap->netdev = NULL;
|
||||
}
|
||||
|
||||
void bpf_map_offload_map_free(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_offloaded_map *offmap = map_to_offmap(map);
|
||||
@ -592,77 +667,14 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
|
||||
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct bpf_offload_netdev *ondev;
|
||||
int err;
|
||||
|
||||
ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
|
||||
if (!ondev)
|
||||
return -ENOMEM;
|
||||
|
||||
ondev->netdev = netdev;
|
||||
ondev->offdev = offdev;
|
||||
INIT_LIST_HEAD(&ondev->progs);
|
||||
INIT_LIST_HEAD(&ondev->maps);
|
||||
|
||||
down_write(&bpf_devs_lock);
|
||||
err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
|
||||
if (err) {
|
||||
netdev_warn(netdev, "failed to register for BPF offload\n");
|
||||
goto err_unlock_free;
|
||||
}
|
||||
|
||||
list_add(&ondev->offdev_netdevs, &offdev->netdevs);
|
||||
up_write(&bpf_devs_lock);
|
||||
return 0;
|
||||
|
||||
err_unlock_free:
|
||||
up_write(&bpf_devs_lock);
|
||||
kfree(ondev);
|
||||
return err;
|
||||
return __bpf_offload_dev_netdev_register(offdev, netdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
|
||||
|
||||
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct bpf_offload_netdev *ondev, *altdev;
|
||||
struct bpf_offloaded_map *offmap, *mtmp;
|
||||
struct bpf_prog_offload *offload, *ptmp;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
down_write(&bpf_devs_lock);
|
||||
ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
|
||||
if (WARN_ON(!ondev))
|
||||
goto unlock;
|
||||
|
||||
WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
|
||||
list_del(&ondev->offdev_netdevs);
|
||||
|
||||
/* Try to move the objects to another netdev of the device */
|
||||
altdev = list_first_entry_or_null(&offdev->netdevs,
|
||||
struct bpf_offload_netdev,
|
||||
offdev_netdevs);
|
||||
if (altdev) {
|
||||
list_for_each_entry(offload, &ondev->progs, offloads)
|
||||
offload->netdev = altdev->netdev;
|
||||
list_splice_init(&ondev->progs, &altdev->progs);
|
||||
|
||||
list_for_each_entry(offmap, &ondev->maps, offloads)
|
||||
offmap->netdev = altdev->netdev;
|
||||
list_splice_init(&ondev->maps, &altdev->maps);
|
||||
} else {
|
||||
list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
|
||||
__bpf_prog_offload_destroy(offload->prog);
|
||||
list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
|
||||
__bpf_map_offload_destroy(offmap);
|
||||
}
|
||||
|
||||
WARN_ON(!list_empty(&ondev->progs));
|
||||
WARN_ON(!list_empty(&ondev->maps));
|
||||
kfree(ondev);
|
||||
unlock:
|
||||
up_write(&bpf_devs_lock);
|
||||
__bpf_offload_dev_netdev_unregister(offdev, netdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user