forked from Minki/linux
bpf: Convert bpf_prog refcnt to atomic64_t
Similarly to bpf_map's refcnt/usercnt, convert bpf_prog's refcnt to atomic64 and remove artificial 32k limit. This allows to make bpf_prog's refcounting non-failing, simplifying logic of users of bpf_prog_add/bpf_prog_inc. Validated compilation by running allyesconfig kernel build. Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20191117172806.2195367-3-andriin@fb.com
This commit is contained in:
parent
1e0bd5a091
commit
85192dbf4d
@ -3171,13 +3171,8 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
|
||||
bnxt_init_rxbd_pages(ring, type);
|
||||
|
||||
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
|
||||
rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
|
||||
if (IS_ERR(rxr->xdp_prog)) {
|
||||
int rc = PTR_ERR(rxr->xdp_prog);
|
||||
|
||||
rxr->xdp_prog = NULL;
|
||||
return rc;
|
||||
}
|
||||
bpf_prog_add(bp->xdp_prog, 1);
|
||||
rxr->xdp_prog = bp->xdp_prog;
|
||||
}
|
||||
prod = rxr->rx_prod;
|
||||
for (i = 0; i < bp->rx_ring_size; i++) {
|
||||
|
@ -1876,13 +1876,8 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
|
||||
|
||||
if (nic->xdp_prog) {
|
||||
/* Attach BPF program */
|
||||
nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
|
||||
if (!IS_ERR(nic->xdp_prog)) {
|
||||
bpf_attached = true;
|
||||
} else {
|
||||
ret = PTR_ERR(nic->xdp_prog);
|
||||
nic->xdp_prog = NULL;
|
||||
}
|
||||
bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
|
||||
bpf_attached = true;
|
||||
}
|
||||
|
||||
/* Calculate Tx queues needed for XDP and network stack */
|
||||
|
@ -1807,11 +1807,8 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
|
||||
if (prog && !xdp_mtu_valid(priv, dev->mtu))
|
||||
return -EINVAL;
|
||||
|
||||
if (prog) {
|
||||
prog = bpf_prog_add(prog, priv->num_channels);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
}
|
||||
if (prog)
|
||||
bpf_prog_add(prog, priv->num_channels);
|
||||
|
||||
up = netif_running(dev);
|
||||
need_update = (!!priv->xdp_prog != !!prog);
|
||||
|
@ -2286,11 +2286,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
lockdep_is_held(&priv->mdev->state_lock));
|
||||
|
||||
if (xdp_prog && carry_xdp_prog) {
|
||||
xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
|
||||
if (IS_ERR(xdp_prog)) {
|
||||
mlx4_en_free_resources(tmp);
|
||||
return PTR_ERR(xdp_prog);
|
||||
}
|
||||
bpf_prog_add(xdp_prog, tmp->rx_ring_num);
|
||||
for (i = 0; i < tmp->rx_ring_num; i++)
|
||||
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
|
||||
xdp_prog);
|
||||
@ -2782,11 +2778,9 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
||||
* program for a new one.
|
||||
*/
|
||||
if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
|
||||
if (prog) {
|
||||
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
}
|
||||
if (prog)
|
||||
bpf_prog_add(prog, priv->rx_ring_num - 1);
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
old_prog = rcu_dereference_protected(
|
||||
@ -2807,13 +2801,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
if (prog) {
|
||||
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
|
||||
if (IS_ERR(prog)) {
|
||||
err = PTR_ERR(prog);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (prog)
|
||||
bpf_prog_add(prog, priv->rx_ring_num - 1);
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
||||
@ -2862,7 +2851,6 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
||||
|
||||
unlock_out:
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
out:
|
||||
kfree(tmp);
|
||||
return err;
|
||||
}
|
||||
|
@ -408,12 +408,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
rq->stats = &c->priv->channel_stats[c->ix].rq;
|
||||
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
|
||||
|
||||
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
|
||||
if (IS_ERR(rq->xdp_prog)) {
|
||||
err = PTR_ERR(rq->xdp_prog);
|
||||
rq->xdp_prog = NULL;
|
||||
goto err_rq_wq_destroy;
|
||||
}
|
||||
if (params->xdp_prog)
|
||||
bpf_prog_inc(params->xdp_prog);
|
||||
rq->xdp_prog = params->xdp_prog;
|
||||
|
||||
rq_xdp_ix = rq->ix;
|
||||
if (xsk)
|
||||
@ -4406,16 +4403,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
|
||||
/* no need for full reset when exchanging programs */
|
||||
reset = (!priv->channels.params.xdp_prog || !prog);
|
||||
|
||||
if (was_opened && !reset) {
|
||||
if (was_opened && !reset)
|
||||
/* num_channels is invariant here, so we can take the
|
||||
* batched reference right upfront.
|
||||
*/
|
||||
prog = bpf_prog_add(prog, priv->channels.num);
|
||||
if (IS_ERR(prog)) {
|
||||
err = PTR_ERR(prog);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
bpf_prog_add(prog, priv->channels.num);
|
||||
|
||||
if (was_opened && reset) {
|
||||
struct mlx5e_channels new_channels = {};
|
||||
|
@ -2107,12 +2107,8 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
|
||||
if (IS_ERR(fp->rxq->xdp_prog)) {
|
||||
rc = PTR_ERR(fp->rxq->xdp_prog);
|
||||
fp->rxq->xdp_prog = NULL;
|
||||
goto out;
|
||||
}
|
||||
bpf_prog_add(edev->xdp_prog, 1);
|
||||
fp->rxq->xdp_prog = edev->xdp_prog;
|
||||
}
|
||||
|
||||
if (fp->type & QEDE_FASTPATH_TX) {
|
||||
|
@ -2445,11 +2445,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
if (!prog && !old_prog)
|
||||
return 0;
|
||||
|
||||
if (prog) {
|
||||
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
}
|
||||
if (prog)
|
||||
bpf_prog_add(prog, vi->max_queue_pairs - 1);
|
||||
|
||||
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
||||
if (netif_running(dev)) {
|
||||
|
@ -485,7 +485,7 @@ struct bpf_func_info_aux {
|
||||
};
|
||||
|
||||
struct bpf_prog_aux {
|
||||
atomic_t refcnt;
|
||||
atomic64_t refcnt;
|
||||
u32 used_map_cnt;
|
||||
u32 max_ctx_offset;
|
||||
u32 max_pkt_offset;
|
||||
@ -770,9 +770,9 @@ extern const struct bpf_verifier_ops xdp_analyzer_ops;
|
||||
struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
|
||||
bool attach_drv);
|
||||
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
|
||||
void bpf_prog_add(struct bpf_prog *prog, int i);
|
||||
void bpf_prog_sub(struct bpf_prog *prog, int i);
|
||||
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
|
||||
void bpf_prog_inc(struct bpf_prog *prog);
|
||||
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
|
||||
void bpf_prog_put(struct bpf_prog *prog);
|
||||
int __bpf_prog_charge(struct user_struct *user, u32 pages);
|
||||
@ -912,10 +912,8 @@ static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
|
||||
int i)
|
||||
static inline void bpf_prog_add(struct bpf_prog *prog, int i)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
|
||||
@ -926,9 +924,8 @@ static inline void bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
|
||||
static inline void bpf_prog_inc(struct bpf_prog *prog)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct bpf_prog *__must_check
|
||||
|
@ -31,7 +31,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case BPF_TYPE_PROG:
|
||||
raw = bpf_prog_inc(raw);
|
||||
bpf_prog_inc(raw);
|
||||
break;
|
||||
case BPF_TYPE_MAP:
|
||||
bpf_map_inc_with_uref(raw);
|
||||
@ -534,7 +534,8 @@ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type
|
||||
if (!bpf_prog_get_ok(prog, &type, false))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return bpf_prog_inc(prog);
|
||||
bpf_prog_inc(prog);
|
||||
return prog;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
|
||||
|
@ -1339,7 +1339,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
|
||||
|
||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||
{
|
||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||
if (atomic64_dec_and_test(&prog->aux->refcnt)) {
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
||||
/* bpf_prog_free_id() must be called first */
|
||||
bpf_prog_free_id(prog, do_idr_lock);
|
||||
@ -1445,16 +1445,9 @@ static struct bpf_prog *____bpf_prog_get(struct fd f)
|
||||
return f.file->private_data;
|
||||
}
|
||||
|
||||
/* prog's refcnt limit */
|
||||
#define BPF_MAX_REFCNT 32768
|
||||
|
||||
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
|
||||
void bpf_prog_add(struct bpf_prog *prog, int i)
|
||||
{
|
||||
if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
|
||||
atomic_sub(i, &prog->aux->refcnt);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
return prog;
|
||||
atomic64_add(i, &prog->aux->refcnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_add);
|
||||
|
||||
@ -1465,13 +1458,13 @@ void bpf_prog_sub(struct bpf_prog *prog, int i)
|
||||
* path holds a reference to the program, thus atomic_sub() can
|
||||
* be safely used in such cases!
|
||||
*/
|
||||
WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
|
||||
WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_sub);
|
||||
|
||||
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
|
||||
void bpf_prog_inc(struct bpf_prog *prog)
|
||||
{
|
||||
return bpf_prog_add(prog, 1);
|
||||
atomic64_inc(&prog->aux->refcnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_inc);
|
||||
|
||||
@ -1480,12 +1473,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
|
||||
{
|
||||
int refold;
|
||||
|
||||
refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
|
||||
|
||||
if (refold >= BPF_MAX_REFCNT) {
|
||||
__bpf_prog_put(prog, false);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
|
||||
|
||||
if (!refold)
|
||||
return ERR_PTR(-ENOENT);
|
||||
@ -1523,7 +1511,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
|
||||
goto out;
|
||||
}
|
||||
|
||||
prog = bpf_prog_inc(prog);
|
||||
bpf_prog_inc(prog);
|
||||
out:
|
||||
fdput(f);
|
||||
return prog;
|
||||
@ -1714,7 +1702,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||
prog->orig_prog = NULL;
|
||||
prog->jited = 0;
|
||||
|
||||
atomic_set(&prog->aux->refcnt, 1);
|
||||
atomic64_set(&prog->aux->refcnt, 1);
|
||||
prog->gpl_compatible = is_gpl ? 1 : 0;
|
||||
|
||||
if (bpf_prog_is_dev_bound(prog->aux)) {
|
||||
|
@ -10477,12 +10477,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
context = parent_event->overflow_handler_context;
|
||||
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
|
||||
if (overflow_handler == bpf_overflow_handler) {
|
||||
struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
|
||||
struct bpf_prog *prog = parent_event->prog;
|
||||
|
||||
if (IS_ERR(prog)) {
|
||||
err = PTR_ERR(prog);
|
||||
goto err_ns;
|
||||
}
|
||||
bpf_prog_inc(prog);
|
||||
event->prog = prog;
|
||||
event->orig_overflow_handler =
|
||||
parent_event->orig_overflow_handler;
|
||||
|
Loading…
Reference in New Issue
Block a user