Merge branch 'bpf-fixes'

Daniel Borkmann says:

====================
bpf: couple of fixes

These are two fixes for BPF, one to introduce xmit recursion limiter for
tc bpf programs and the other one to reject filters a bit earlier. For
more details please see individual patches. I have no strong opinion
to which tree they should go, they apply to both, but I think net-next
seems okay to me.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-06-10 18:00:57 -07:00
commit 92595aea8a
3 changed files with 53 additions and 33 deletions

View File

@ -2389,6 +2389,8 @@ void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev); int init_dummy_netdev(struct net_device *dev);
DECLARE_PER_CPU(int, xmit_recursion); DECLARE_PER_CPU(int, xmit_recursion);
#define XMIT_RECURSION_LIMIT 10
static inline int dev_recursion_level(void) static inline int dev_recursion_level(void)
{ {
return this_cpu_read(xmit_recursion); return this_cpu_read(xmit_recursion);

View File

@ -3144,8 +3144,6 @@ static void skb_update_prio(struct sk_buff *skb)
DEFINE_PER_CPU(int, xmit_recursion); DEFINE_PER_CPU(int, xmit_recursion);
EXPORT_SYMBOL(xmit_recursion); EXPORT_SYMBOL(xmit_recursion);
#define RECURSION_LIMIT 10
/** /**
* dev_loopback_xmit - loop back @skb * dev_loopback_xmit - loop back @skb
* @net: network namespace this loopback is happening in * @net: network namespace this loopback is happening in
@ -3388,8 +3386,8 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
int cpu = smp_processor_id(); /* ok because BHs are off */ int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) { if (txq->xmit_lock_owner != cpu) {
if (unlikely(__this_cpu_read(xmit_recursion) >
if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) XMIT_RECURSION_LIMIT))
goto recursion_alert; goto recursion_alert;
skb = validate_xmit_skb(skb, dev); skb = validate_xmit_skb(skb, dev);

View File

@ -748,6 +748,17 @@ static bool chk_code_allowed(u16 code_to_probe)
return codes[code_to_probe]; return codes[code_to_probe];
} }
static bool bpf_check_basics_ok(const struct sock_filter *filter,
unsigned int flen)
{
if (filter == NULL)
return false;
if (flen == 0 || flen > BPF_MAXINSNS)
return false;
return true;
}
/** /**
* bpf_check_classic - verify socket filter code * bpf_check_classic - verify socket filter code
* @filter: filter to verify * @filter: filter to verify
@ -768,9 +779,6 @@ static int bpf_check_classic(const struct sock_filter *filter,
bool anc_found; bool anc_found;
int pc; int pc;
if (flen == 0 || flen > BPF_MAXINSNS)
return -EINVAL;
/* Check the filter code now */ /* Check the filter code now */
for (pc = 0; pc < flen; pc++) { for (pc = 0; pc < flen; pc++) {
const struct sock_filter *ftest = &filter[pc]; const struct sock_filter *ftest = &filter[pc];
@ -1065,7 +1073,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
struct bpf_prog *fp; struct bpf_prog *fp;
/* Make sure new filter is there and in the right amounts. */ /* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL) if (!bpf_check_basics_ok(fprog->filter, fprog->len))
return -EINVAL; return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@ -1112,7 +1120,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
int err; int err;
/* Make sure new filter is there and in the right amounts. */ /* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL) if (!bpf_check_basics_ok(fprog->filter, fprog->len))
return -EINVAL; return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
@ -1207,7 +1215,6 @@ static
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
{ {
unsigned int fsize = bpf_classic_proglen(fprog); unsigned int fsize = bpf_classic_proglen(fprog);
unsigned int bpf_fsize = bpf_prog_size(fprog->len);
struct bpf_prog *prog; struct bpf_prog *prog;
int err; int err;
@ -1215,10 +1222,10 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
/* Make sure new filter is there and in the right amounts. */ /* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL) if (!bpf_check_basics_ok(fprog->filter, fprog->len))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
prog = bpf_prog_alloc(bpf_fsize, 0); prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
if (!prog) if (!prog)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1603,9 +1610,36 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
.arg5_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING,
}; };
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
return dev_forward_skb(dev, skb);
}
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
}
skb->dev = dev;
__this_cpu_inc(xmit_recursion);
ret = dev_queue_xmit(skb);
__this_cpu_dec(xmit_recursion);
return ret;
}
static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
{ {
struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; struct sk_buff *skb = (struct sk_buff *) (long) r1;
struct net_device *dev; struct net_device *dev;
if (unlikely(flags & ~(BPF_F_INGRESS))) if (unlikely(flags & ~(BPF_F_INGRESS)))
@ -1615,19 +1649,12 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
if (unlikely(!dev)) if (unlikely(!dev))
return -EINVAL; return -EINVAL;
skb2 = skb_clone(skb, GFP_ATOMIC); skb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
if (flags & BPF_F_INGRESS) { return flags & BPF_F_INGRESS ?
if (skb_at_tc_ingress(skb2)) __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
skb_postpush_rcsum(skb2, skb_mac_header(skb2),
skb2->mac_len);
return dev_forward_skb(dev, skb2);
}
skb2->dev = dev;
return dev_queue_xmit(skb2);
} }
static const struct bpf_func_proto bpf_clone_redirect_proto = { static const struct bpf_func_proto bpf_clone_redirect_proto = {
@ -1671,15 +1698,8 @@ int skb_do_redirect(struct sk_buff *skb)
return -EINVAL; return -EINVAL;
} }
if (ri->flags & BPF_F_INGRESS) { return ri->flags & BPF_F_INGRESS ?
if (skb_at_tc_ingress(skb)) __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
skb_postpush_rcsum(skb, skb_mac_header(skb),
skb->mac_len);
return dev_forward_skb(dev, skb);
}
skb->dev = dev;
return dev_queue_xmit(skb);
} }
static const struct bpf_func_proto bpf_redirect_proto = { static const struct bpf_func_proto bpf_redirect_proto = {