forked from Minki/linux
net: ipv4: fix drop handling in ip_list_rcv() and ip_list_rcv_finish()
Since callees (ip_rcv_core() and ip_rcv_finish_core()) might free or steal the skb, we can't use the list_cut_before() method; we can't even do a list_del(&skb->list) in the drop case, because skb might have already been freed and reused. So instead, take each skb off the source list before processing, and add it to the sublist afterwards if it wasn't freed or stolen. Fixes:5fa12739a5
net: ipv4: listify ip_rcv_finish Fixes:17266ee939
net: ipv4: listified version of ip_rcv Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0eaec62a91
commit
a4ca8b7df7
@ -540,24 +540,27 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
|
|||||||
struct sk_buff *skb, *next;
|
struct sk_buff *skb, *next;
|
||||||
struct list_head sublist;
|
struct list_head sublist;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
|
|
||||||
|
list_del(&skb->list);
|
||||||
if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
|
if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
dst = skb_dst(skb);
|
dst = skb_dst(skb);
|
||||||
if (curr_dst != dst) {
|
if (curr_dst != dst) {
|
||||||
/* dispatch old sublist */
|
/* dispatch old sublist */
|
||||||
list_cut_before(&sublist, head, &skb->list);
|
|
||||||
if (!list_empty(&sublist))
|
if (!list_empty(&sublist))
|
||||||
ip_sublist_rcv_finish(&sublist);
|
ip_sublist_rcv_finish(&sublist);
|
||||||
/* start new sublist */
|
/* start new sublist */
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
curr_dst = dst;
|
curr_dst = dst;
|
||||||
}
|
}
|
||||||
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
/* dispatch final sublist */
|
/* dispatch final sublist */
|
||||||
ip_sublist_rcv_finish(head);
|
ip_sublist_rcv_finish(&sublist);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
|
static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
|
||||||
@ -577,24 +580,27 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
|
|||||||
struct sk_buff *skb, *next;
|
struct sk_buff *skb, *next;
|
||||||
struct list_head sublist;
|
struct list_head sublist;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev = skb->dev;
|
||||||
struct net *net = dev_net(dev);
|
struct net *net = dev_net(dev);
|
||||||
|
|
||||||
|
list_del(&skb->list);
|
||||||
skb = ip_rcv_core(skb, net);
|
skb = ip_rcv_core(skb, net);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (curr_dev != dev || curr_net != net) {
|
if (curr_dev != dev || curr_net != net) {
|
||||||
/* dispatch old sublist */
|
/* dispatch old sublist */
|
||||||
list_cut_before(&sublist, head, &skb->list);
|
|
||||||
if (!list_empty(&sublist))
|
if (!list_empty(&sublist))
|
||||||
ip_sublist_rcv(&sublist, dev, net);
|
ip_sublist_rcv(&sublist, curr_dev, curr_net);
|
||||||
/* start new sublist */
|
/* start new sublist */
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
curr_dev = dev;
|
curr_dev = dev;
|
||||||
curr_net = net;
|
curr_net = net;
|
||||||
}
|
}
|
||||||
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
/* dispatch final sublist */
|
/* dispatch final sublist */
|
||||||
ip_sublist_rcv(head, curr_dev, curr_net);
|
ip_sublist_rcv(&sublist, curr_dev, curr_net);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user