Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [VLAN]: Avoid a 4-order allocation.
  [HDLC] Fix dev->header_cache_update having a random value.
  [NetLabel]: Verify sensitivity level has a valid CIPSO mapping
  [PPPOE]: Key connections properly on local device.
  [AF_UNIX]: Test against sk_max_ack_backlog properly.
  [NET]: Fix bugs in "Whether sock accept queue is full" checking
This commit is contained in:
Linus Torvalds 2007-03-04 13:16:49 -08:00
commit 6d04e3b04b
32 changed files with 165 additions and 130 deletions

View File

@ -448,8 +448,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock_irqsave(&cp->lock, flags); spin_lock_irqsave(&cp->lock, flags);
cp->cpcmd &= ~RxVlanOn; cp->cpcmd &= ~RxVlanOn;
cpw16(CpCmd, cp->cpcmd); cpw16(CpCmd, cp->cpcmd);
if (cp->vlgrp) vlan_group_set_device(cp->vlgrp, vid, NULL);
cp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&cp->lock, flags); spin_unlock_irqrestore(&cp->lock, flags);
} }
#endif /* CP_VLAN_TAG_USED */ #endif /* CP_VLAN_TAG_USED */

View File

@ -2293,10 +2293,7 @@ static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
local_irq_save(flags); local_irq_save(flags);
ace_mask_irq(dev); ace_mask_irq(dev);
vlan_group_set_device(ap->vlgrp, vid, NULL);
if (ap->vlgrp)
ap->vlgrp->vlan_devices[vid] = NULL;
ace_unmask_irq(dev); ace_unmask_irq(dev);
local_irq_restore(flags); local_irq_restore(flags);
} }

View File

@ -1737,8 +1737,7 @@ static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
{ {
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
if (lp->vlgrp) vlan_group_set_device(lp->vlgrp, vid, NULL);
lp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
} }
#endif #endif

View File

@ -1252,8 +1252,7 @@ static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
spin_lock_irqsave(&adapter->lock, flags); spin_lock_irqsave(&adapter->lock, flags);
/* atl1_irq_disable(adapter); */ /* atl1_irq_disable(adapter); */
if (adapter->vlgrp) vlan_group_set_device(adapter->vlgrp, vid, NULL);
adapter->vlgrp->vlan_devices[vid] = NULL;
/* atl1_irq_enable(adapter); */ /* atl1_irq_enable(adapter); */
spin_unlock_irqrestore(&adapter->lock, flags); spin_unlock_irqrestore(&adapter->lock, flags);
/* We don't do Vlan filtering */ /* We don't do Vlan filtering */
@ -1266,7 +1265,7 @@ static void atl1_restore_vlan(struct atl1_adapter *adapter)
if (adapter->vlgrp) { if (adapter->vlgrp) {
u16 vid; u16 vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if (!adapter->vlgrp->vlan_devices[vid]) if (!vlan_group_get_device(adapter->vlgrp, vid))
continue; continue;
atl1_vlan_rx_add_vid(adapter->netdev, vid); atl1_vlan_rx_add_vid(adapter->netdev, vid);
} }

View File

@ -4467,9 +4467,7 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
struct bnx2 *bp = netdev_priv(dev); struct bnx2 *bp = netdev_priv(dev);
bnx2_netif_stop(bp); bnx2_netif_stop(bp);
vlan_group_set_device(bp->vlgrp, vid, NULL);
if (bp->vlgrp)
bp->vlgrp->vlan_devices[vid] = NULL;
bnx2_set_rx_mode(dev); bnx2_set_rx_mode(dev);
bnx2_netif_start(bp); bnx2_netif_start(bp);

View File

@ -488,9 +488,9 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
/* Save and then restore vlan_dev in the grp array, /* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it. * since the slave's driver might clear it.
*/ */
vlan_dev = bond->vlgrp->vlan_devices[vid]; vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
slave_dev->vlan_rx_kill_vid(slave_dev, vid); slave_dev->vlan_rx_kill_vid(slave_dev, vid);
bond->vlgrp->vlan_devices[vid] = vlan_dev; vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
} }
} }
@ -550,9 +550,9 @@ static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *s
/* Save and then restore vlan_dev in the grp array, /* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it. * since the slave's driver might clear it.
*/ */
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev; vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
} }
unreg: unreg:
@ -2397,7 +2397,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
vlan_id = 0; vlan_id = 0;
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) { vlan_list) {
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == rt->u.dst.dev) { if (vlan_dev == rt->u.dst.dev) {
vlan_id = vlan->vlan_id; vlan_id = vlan->vlan_id;
dprintk("basa: vlan match on %s %d\n", dprintk("basa: vlan match on %s %d\n",
@ -2444,7 +2444,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
} }
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan->vlan_ip) { if (vlan->vlan_ip) {
bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
vlan->vlan_ip, vlan->vlan_id); vlan->vlan_ip, vlan->vlan_id);
@ -3371,7 +3371,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) { vlan_list) {
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == event_dev) { if (vlan_dev == event_dev) {
switch (event) { switch (event) {
case NETDEV_UP: case NETDEV_UP:

View File

@ -889,8 +889,7 @@ static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct adapter *adapter = dev->priv; struct adapter *adapter = dev->priv;
spin_lock_irq(&adapter->async_lock); spin_lock_irq(&adapter->async_lock);
if (adapter->vlan_grp) vlan_group_set_device(adapter->vlan_grp, vid, NULL);
adapter->vlan_grp->vlan_devices[vid] = NULL;
spin_unlock_irq(&adapter->async_lock); spin_unlock_irq(&adapter->async_lock);
} }
#endif #endif

View File

@ -160,14 +160,16 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
int i; int i;
for_each_port(adapter, i) { for_each_port(adapter, i) {
const struct vlan_group *grp; struct vlan_group *grp;
struct net_device *dev = adapter->port[i]; struct net_device *dev = adapter->port[i];
const struct port_info *p = netdev_priv(dev); const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) { if (vlan && vlan != VLAN_VID_MASK) {
grp = p->vlan_grp; grp = p->vlan_grp;
dev = grp ? grp->vlan_devices[vlan] : NULL; dev = NULL;
if (grp)
dev = vlan_group_get_device(grp, vlan);
} else } else
while (dev->master) while (dev->master)
dev = dev->master; dev = dev->master;

View File

@ -376,7 +376,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
uint16_t vid = adapter->hw.mng_cookie.vlan_id; uint16_t vid = adapter->hw.mng_cookie.vlan_id;
uint16_t old_vid = adapter->mng_vlan_id; uint16_t old_vid = adapter->mng_vlan_id;
if (adapter->vlgrp) { if (adapter->vlgrp) {
if (!adapter->vlgrp->vlan_devices[vid]) { if (!vlan_group_get_device(adapter->vlgrp, vid)) {
if (adapter->hw.mng_cookie.status & if (adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
e1000_vlan_rx_add_vid(netdev, vid); e1000_vlan_rx_add_vid(netdev, vid);
@ -386,7 +386,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) && (vid != old_vid) &&
!adapter->vlgrp->vlan_devices[old_vid]) !vlan_group_get_device(adapter->vlgrp, old_vid))
e1000_vlan_rx_kill_vid(netdev, old_vid); e1000_vlan_rx_kill_vid(netdev, old_vid);
} else } else
adapter->mng_vlan_id = vid; adapter->mng_vlan_id = vid;
@ -1482,7 +1482,7 @@ e1000_close(struct net_device *netdev)
if ((adapter->hw.mng_cookie.status & if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!(adapter->vlgrp && !(adapter->vlgrp &&
adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) { vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
} }
@ -4998,10 +4998,7 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
uint32_t vfta, index; uint32_t vfta, index;
e1000_irq_disable(adapter); e1000_irq_disable(adapter);
vlan_group_set_device(adapter->vlgrp, vid, NULL);
if (adapter->vlgrp)
adapter->vlgrp->vlan_devices[vid] = NULL;
e1000_irq_enable(adapter); e1000_irq_enable(adapter);
if ((adapter->hw.mng_cookie.status & if ((adapter->hw.mng_cookie.status &
@ -5027,7 +5024,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
if (adapter->vlgrp) { if (adapter->vlgrp) {
uint16_t vid; uint16_t vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if (!adapter->vlgrp->vlan_devices[vid]) if (!vlan_group_get_device(adapter->vlgrp, vid))
continue; continue;
e1000_vlan_rx_add_vid(adapter->netdev, vid); e1000_vlan_rx_add_vid(adapter->netdev, vid);
} }

View File

@ -1939,8 +1939,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
int index; int index;
u64 hret; u64 hret;
if (port->vgrp) vlan_group_set_device(port->vgrp, vid, NULL);
port->vgrp->vlan_devices[vid] = NULL;
cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) { if (!cb1) {

View File

@ -1132,8 +1132,7 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
spin_lock_irqsave(&priv->rxlock, flags); spin_lock_irqsave(&priv->rxlock, flags);
if (priv->vlgrp) vlan_group_set_device(priv->vgrp, vid, NULL);
priv->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&priv->rxlock, flags); spin_unlock_irqrestore(&priv->rxlock, flags);
} }

View File

@ -2213,8 +2213,7 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
ixgb_irq_disable(adapter); ixgb_irq_disable(adapter);
if(adapter->vlgrp) vlan_group_set_device(adapter->vlgrp, vid, NULL);
adapter->vlgrp->vlan_devices[vid] = NULL;
ixgb_irq_enable(adapter); ixgb_irq_enable(adapter);
@ -2234,7 +2233,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
if(adapter->vlgrp) { if(adapter->vlgrp) {
uint16_t vid; uint16_t vid;
for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if(!adapter->vlgrp->vlan_devices[vid]) if(!vlan_group_get_device(adapter->vlgrp, vid))
continue; continue;
ixgb_vlan_rx_add_vid(adapter->netdev, vid); ixgb_vlan_rx_add_vid(adapter->netdev, vid);
} }

View File

@ -514,8 +514,7 @@ static void ns83820_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid
spin_lock_irq(&dev->misc_lock); spin_lock_irq(&dev->misc_lock);
spin_lock(&dev->tx_lock); spin_lock(&dev->tx_lock);
if (dev->vlgrp) vlan_group_set_device(dev->vlgrp, vid, NULL);
dev->vlgrp->vlan_devices[vid] = NULL;
spin_unlock(&dev->tx_lock); spin_unlock(&dev->tx_lock);
spin_unlock_irq(&dev->misc_lock); spin_unlock_irq(&dev->misc_lock);
} }

View File

@ -7,6 +7,12 @@
* *
* Version: 0.7.0 * Version: 0.7.0
* *
* 070228 : Fix to allow multiple sessions with same remote MAC and same
* session id by including the local device ifindex in the
* tuple identifying a session. This also ensures packets can't
* be injected into a session from interfaces other than the one
* specified by userspace. Florian Zumbiehl <florz@florz.de>
* (Oh, BTW, this one is YYMMDD, in case you were wondering ...)
* 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
* 030700 : Fixed connect logic to allow for disconnect. * 030700 : Fixed connect logic to allow for disconnect.
* 270700 : Fixed potential SMP problems; we must protect against * 270700 : Fixed potential SMP problems; we must protect against
@ -127,14 +133,14 @@ static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE];
* Set/get/delete/rehash items (internal versions) * Set/get/delete/rehash items (internal versions)
* *
**********************************************************************/ **********************************************************************/
static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr) static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex)
{ {
int hash = hash_item(sid, addr); int hash = hash_item(sid, addr);
struct pppox_sock *ret; struct pppox_sock *ret;
ret = item_hash_table[hash]; ret = item_hash_table[hash];
while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr)) while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex))
ret = ret->next; ret = ret->next;
return ret; return ret;
@ -147,21 +153,19 @@ static int __set_item(struct pppox_sock *po)
ret = item_hash_table[hash]; ret = item_hash_table[hash];
while (ret) { while (ret) {
if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa)) if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_dev->ifindex == po->pppoe_dev->ifindex)
return -EALREADY; return -EALREADY;
ret = ret->next; ret = ret->next;
} }
if (!ret) { po->next = item_hash_table[hash];
po->next = item_hash_table[hash]; item_hash_table[hash] = po;
item_hash_table[hash] = po;
}
return 0; return 0;
} }
static struct pppox_sock *__delete_item(unsigned long sid, char *addr) static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex)
{ {
int hash = hash_item(sid, addr); int hash = hash_item(sid, addr);
struct pppox_sock *ret, **src; struct pppox_sock *ret, **src;
@ -170,7 +174,7 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
src = &item_hash_table[hash]; src = &item_hash_table[hash];
while (ret) { while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr)) { if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex) {
*src = ret->next; *src = ret->next;
break; break;
} }
@ -188,12 +192,12 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
* *
**********************************************************************/ **********************************************************************/
static inline struct pppox_sock *get_item(unsigned long sid, static inline struct pppox_sock *get_item(unsigned long sid,
unsigned char *addr) unsigned char *addr, int ifindex)
{ {
struct pppox_sock *po; struct pppox_sock *po;
read_lock_bh(&pppoe_hash_lock); read_lock_bh(&pppoe_hash_lock);
po = __get_item(sid, addr); po = __get_item(sid, addr, ifindex);
if (po) if (po)
sock_hold(sk_pppox(po)); sock_hold(sk_pppox(po));
read_unlock_bh(&pppoe_hash_lock); read_unlock_bh(&pppoe_hash_lock);
@ -203,7 +207,15 @@ static inline struct pppox_sock *get_item(unsigned long sid,
static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
{ {
return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote); struct net_device *dev = NULL;
int ifindex;
dev = dev_get_by_name(sp->sa_addr.pppoe.dev);
if(!dev)
return NULL;
ifindex = dev->ifindex;
dev_put(dev);
return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex);
} }
static inline int set_item(struct pppox_sock *po) static inline int set_item(struct pppox_sock *po)
@ -220,12 +232,12 @@ static inline int set_item(struct pppox_sock *po)
return i; return i;
} }
static inline struct pppox_sock *delete_item(unsigned long sid, char *addr) static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex)
{ {
struct pppox_sock *ret; struct pppox_sock *ret;
write_lock_bh(&pppoe_hash_lock); write_lock_bh(&pppoe_hash_lock);
ret = __delete_item(sid, addr); ret = __delete_item(sid, addr, ifindex);
write_unlock_bh(&pppoe_hash_lock); write_unlock_bh(&pppoe_hash_lock);
return ret; return ret;
@ -391,7 +403,7 @@ static int pppoe_rcv(struct sk_buff *skb,
ph = (struct pppoe_hdr *) skb->nh.raw; ph = (struct pppoe_hdr *) skb->nh.raw;
po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po != NULL) if (po != NULL)
return sk_receive_skb(sk_pppox(po), skb, 0); return sk_receive_skb(sk_pppox(po), skb, 0);
drop: drop:
@ -425,7 +437,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
if (ph->code != PADT_CODE) if (ph->code != PADT_CODE)
goto abort; goto abort;
po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po) { if (po) {
struct sock *sk = sk_pppox(po); struct sock *sk = sk_pppox(po);
@ -517,7 +529,7 @@ static int pppoe_release(struct socket *sock)
po = pppox_sk(sk); po = pppox_sk(sk);
if (po->pppoe_pa.sid) { if (po->pppoe_pa.sid) {
delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote); delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_dev->ifindex);
} }
if (po->pppoe_dev) if (po->pppoe_dev)
@ -539,7 +551,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags) int sockaddr_len, int flags)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct net_device *dev = NULL; struct net_device *dev;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk); struct pppox_sock *po = pppox_sk(sk);
int error; int error;
@ -565,7 +577,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
pppox_unbind_sock(sk); pppox_unbind_sock(sk);
/* Delete the old binding */ /* Delete the old binding */
delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote); delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_dev->ifindex);
if(po->pppoe_dev) if(po->pppoe_dev)
dev_put(po->pppoe_dev); dev_put(po->pppoe_dev);
@ -705,7 +717,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
break; break;
/* PPPoE address from the user specifies an outbound /* PPPoE address from the user specifies an outbound
PPPoE address to which frames are forwarded to */ PPPoE address which frames are forwarded to */
err = -EFAULT; err = -EFAULT;
if (copy_from_user(&po->pppoe_relay, if (copy_from_user(&po->pppoe_relay,
(void __user *)arg, (void __user *)arg,

View File

@ -890,8 +890,7 @@ static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&tp->lock, flags); spin_lock_irqsave(&tp->lock, flags);
if (tp->vlgrp) vlan_group_set_device(tp->vlgrp, vid, NULL);
tp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&tp->lock, flags); spin_unlock_irqrestore(&tp->lock, flags);
} }

View File

@ -325,8 +325,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&nic->tx_lock, flags); spin_lock_irqsave(&nic->tx_lock, flags);
if (nic->vlgrp) vlan_group_set_device(nic->vlgrp, vid, NULL);
nic->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&nic->tx_lock, flags); spin_unlock_irqrestore(&nic->tx_lock, flags);
} }

View File

@ -1053,8 +1053,7 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
if (sky2->vlgrp) vlan_group_set_device(sky2->vlgrp, vid, NULL);
sky2->vlgrp->vlan_devices[vid] = NULL;
netif_tx_unlock_bh(dev); netif_tx_unlock_bh(dev);
} }

View File

@ -677,8 +677,7 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock(&np->lock); spin_lock(&np->lock);
if (debug > 1) if (debug > 1)
printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
if (np->vlgrp) vlan_group_set_device(np->vlgrp, vid, NULL);
np->vlgrp->vlan_devices[vid] = NULL;
set_rx_mode(dev); set_rx_mode(dev);
spin_unlock(&np->lock); spin_unlock(&np->lock);
} }
@ -1738,7 +1737,7 @@ static void set_rx_mode(struct net_device *dev)
int vlan_count = 0; int vlan_count = 0;
void __iomem *filter_addr = ioaddr + HashTable + 8; void __iomem *filter_addr = ioaddr + HashTable + 8;
for (i = 0; i < VLAN_VID_MASK; i++) { for (i = 0; i < VLAN_VID_MASK; i++) {
if (np->vlgrp->vlan_devices[i]) { if (vlan_group_get_device(np->vlgrp, i)) {
if (vlan_count >= 32) if (vlan_count >= 32)
break; break;
writew(cpu_to_be16(i), filter_addr); writew(cpu_to_be16(i), filter_addr);

View File

@ -9114,8 +9114,7 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
tg3_netif_stop(tp); tg3_netif_stop(tp);
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
if (tp->vlgrp) vlan_group_set_device(tp->vlgrp, vid, NULL);
tp->vlgrp->vlan_devices[vid] = NULL;
tg3_full_unlock(tp); tg3_full_unlock(tp);
if (netif_running(dev)) if (netif_running(dev))

View File

@ -746,8 +746,7 @@ typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{ {
struct typhoon *tp = netdev_priv(dev); struct typhoon *tp = netdev_priv(dev);
spin_lock_bh(&tp->state_lock); spin_lock_bh(&tp->state_lock);
if(tp->vlgrp) vlan_group_set_device(tp->vlgrp, vid, NULL);
tp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_bh(&tp->state_lock); spin_unlock_bh(&tp->state_lock);
} }

View File

@ -38,7 +38,7 @@
#include <linux/hdlc.h> #include <linux/hdlc.h>
static const char* version = "HDLC support module revision 1.20"; static const char* version = "HDLC support module revision 1.21";
#undef DEBUG_LINK #undef DEBUG_LINK
@ -222,19 +222,31 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL; return -EINVAL;
} }
static void hdlc_setup_dev(struct net_device *dev)
{
/* Re-init all variables changed by HDLC protocol drivers,
* including ether_setup() called from hdlc_raw_eth.c.
*/
dev->get_stats = hdlc_get_stats;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16;
dev->addr_len = 0;
dev->hard_header = NULL;
dev->rebuild_header = NULL;
dev->set_mac_address = NULL;
dev->hard_header_cache = NULL;
dev->header_cache_update = NULL;
dev->change_mtu = hdlc_change_mtu;
dev->hard_header_parse = NULL;
}
static void hdlc_setup(struct net_device *dev) static void hdlc_setup(struct net_device *dev)
{ {
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
dev->get_stats = hdlc_get_stats; hdlc_setup_dev(dev);
dev->change_mtu = hdlc_change_mtu;
dev->mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
hdlc->carrier = 1; hdlc->carrier = 1;
hdlc->open = 0; hdlc->open = 0;
spin_lock_init(&hdlc->state_lock); spin_lock_init(&hdlc->state_lock);
@ -294,6 +306,7 @@ void detach_hdlc_protocol(struct net_device *dev)
} }
kfree(hdlc->state); kfree(hdlc->state);
hdlc->state = NULL; hdlc->state = NULL;
hdlc_setup_dev(dev);
} }

View File

@ -365,10 +365,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size); memcpy(&state(hdlc)->settings, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit; dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = cisco_hard_header; dev->hard_header = cisco_hard_header;
dev->hard_header_cache = NULL;
dev->type = ARPHRD_CISCO; dev->type = ARPHRD_CISCO;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
netif_dormant_on(dev); netif_dormant_on(dev);
return 0; return 0;
} }

View File

@ -1289,10 +1289,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size); memcpy(&state(hdlc)->settings, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit; dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_FRAD; dev->type = ARPHRD_FRAD;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
return 0; return 0;
case IF_PROTO_FR_ADD_PVC: case IF_PROTO_FR_ADD_PVC:

View File

@ -127,9 +127,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result) if (result)
return result; return result;
dev->hard_start_xmit = hdlc->xmit; dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_PPP; dev->type = ARPHRD_PPP;
dev->addr_len = 0;
netif_dormant_off(dev); netif_dormant_off(dev);
return 0; return 0;
} }

View File

@ -88,10 +88,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
return result; return result;
memcpy(hdlc->state, &new_settings, size); memcpy(hdlc->state, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit; dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_RAWHDLC; dev->type = ARPHRD_RAWHDLC;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
netif_dormant_off(dev); netif_dormant_off(dev);
return 0; return 0;
} }

View File

@ -215,9 +215,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
x25_rx, 0)) != 0) x25_rx, 0)) != 0)
return result; return result;
dev->hard_start_xmit = x25_xmit; dev->hard_start_xmit = x25_xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_X25; dev->type = ARPHRD_X25;
dev->addr_len = 0;
netif_dormant_off(dev); netif_dormant_off(dev);
return 0; return 0;
} }

View File

@ -3654,7 +3654,7 @@ qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
return rc; return rc;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
if (vg->vlan_devices[i] == dev){ if (vlan_group_get_device(vg, i) == dev){
rc = QETH_VLAN_CARD; rc = QETH_VLAN_CARD;
break; break;
} }
@ -5261,7 +5261,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
QETH_DBF_TEXT(trace, 4, "frvaddr4"); QETH_DBF_TEXT(trace, 4, "frvaddr4");
rcu_read_lock(); rcu_read_lock();
in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
if (!in_dev) if (!in_dev)
goto out; goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@ -5288,7 +5288,7 @@ qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
QETH_DBF_TEXT(trace, 4, "frvaddr6"); QETH_DBF_TEXT(trace, 4, "frvaddr6");
in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]); in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev) if (!in6_dev)
return; return;
for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
@ -5360,7 +5360,7 @@ qeth_layer2_process_vlans(struct qeth_card *card, int clear)
if (!card->vlangrp) if (!card->vlangrp)
return; return;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (card->vlangrp->vlan_devices[i] == NULL) if (vlan_group_get_device(card->vlangrp, i) == NULL)
continue; continue;
if (clear) if (clear)
qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
@ -5398,8 +5398,7 @@ qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock_irqsave(&card->vlanlock, flags); spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */ /* unregister IP addresses of vlan device */
qeth_free_vlan_addresses(card, vid); qeth_free_vlan_addresses(card, vid);
if (card->vlangrp) vlan_group_set_device(card->vlangrp, vid, NULL);
card->vlangrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&card->vlanlock, flags); spin_unlock_irqrestore(&card->vlanlock, flags);
if (card->options.layer2) if (card->options.layer2)
qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
@ -5662,10 +5661,11 @@ qeth_add_vlan_mc(struct qeth_card *card)
vg = card->vlangrp; vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (vg->vlan_devices[i] == NULL || struct net_device *netdev = vlan_group_get_device(vg, i);
!(vg->vlan_devices[i]->flags & IFF_UP)) if (netdev == NULL ||
!(netdev->flags & IFF_UP))
continue; continue;
in_dev = in_dev_get(vg->vlan_devices[i]); in_dev = in_dev_get(netdev);
if (!in_dev) if (!in_dev)
continue; continue;
read_lock(&in_dev->mc_list_lock); read_lock(&in_dev->mc_list_lock);
@ -5749,10 +5749,11 @@ qeth_add_vlan_mc6(struct qeth_card *card)
vg = card->vlangrp; vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (vg->vlan_devices[i] == NULL || struct net_device *netdev = vlan_group_get_device(vg, i);
!(vg->vlan_devices[i]->flags & IFF_UP)) if (netdev == NULL ||
!(netdev->flags & IFF_UP))
continue; continue;
in_dev = in6_dev_get(vg->vlan_devices[i]); in_dev = in6_dev_get(netdev);
if (!in_dev) if (!in_dev)
continue; continue;
read_lock(&in_dev->lock); read_lock(&in_dev->lock);

View File

@ -70,15 +70,34 @@ extern void vlan_ioctl_set(int (*hook)(void __user *));
* depends on completely exhausting the VLAN identifier space. Thus * depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory. * it gives constant time look-up, but in many cases it wastes memory.
*/ */
#define VLAN_GROUP_ARRAY_LEN 4096 #define VLAN_GROUP_ARRAY_LEN 4096
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group { struct vlan_group {
int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */
struct hlist_node hlist; /* linked list */ struct hlist_node hlist; /* linked list */
struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN]; struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu; struct rcu_head rcu;
}; };
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN];
}
static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
struct net_device *dev)
{
struct net_device **array;
if (!vg)
return;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
struct vlan_priority_tci_mapping { struct vlan_priority_tci_mapping {
unsigned long priority; unsigned long priority;
unsigned short vlan_qos; /* This should be shifted when first set, so we only do it unsigned short vlan_qos; /* This should be shifted when first set, so we only do it
@ -160,7 +179,7 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
return NET_RX_DROP; return NET_RX_DROP;
} }
skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
if (skb->dev == NULL) { if (skb->dev == NULL) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);

View File

@ -426,7 +426,7 @@ static inline void sk_acceptq_added(struct sock *sk)
static inline int sk_acceptq_is_full(struct sock *sk) static inline int sk_acceptq_is_full(struct sock *sk)
{ {
return sk->sk_ack_backlog > sk->sk_max_ack_backlog; return sk->sk_ack_backlog >= sk->sk_max_ack_backlog;
} }
/* /*

View File

@ -184,14 +184,23 @@ struct net_device *__find_vlan_dev(struct net_device *real_dev,
struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
if (grp) if (grp)
return grp->vlan_devices[VID]; return vlan_group_get_device(grp, VID);
return NULL; return NULL;
} }
static void vlan_group_free(struct vlan_group *grp)
{
int i;
for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
kfree(grp->vlan_devices_arrays[i]);
kfree(grp);
}
static void vlan_rcu_free(struct rcu_head *rcu) static void vlan_rcu_free(struct rcu_head *rcu)
{ {
kfree(container_of(rcu, struct vlan_group, rcu)); vlan_group_free(container_of(rcu, struct vlan_group, rcu));
} }
@ -223,7 +232,7 @@ static int unregister_vlan_dev(struct net_device *real_dev,
ret = 0; ret = 0;
if (grp) { if (grp) {
dev = grp->vlan_devices[vlan_id]; dev = vlan_group_get_device(grp, vlan_id);
if (dev) { if (dev) {
/* Remove proc entry */ /* Remove proc entry */
vlan_proc_rem_dev(dev); vlan_proc_rem_dev(dev);
@ -237,7 +246,7 @@ static int unregister_vlan_dev(struct net_device *real_dev,
real_dev->vlan_rx_kill_vid(real_dev, vlan_id); real_dev->vlan_rx_kill_vid(real_dev, vlan_id);
} }
grp->vlan_devices[vlan_id] = NULL; vlan_group_set_device(grp, vlan_id, NULL);
synchronize_net(); synchronize_net();
@ -251,7 +260,7 @@ static int unregister_vlan_dev(struct net_device *real_dev,
* group. * group.
*/ */
for (i = 0; i < VLAN_VID_MASK; i++) for (i = 0; i < VLAN_VID_MASK; i++)
if (grp->vlan_devices[i]) if (vlan_group_get_device(grp, i))
break; break;
if (i == VLAN_VID_MASK) { if (i == VLAN_VID_MASK) {
@ -379,6 +388,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
struct net_device *new_dev; struct net_device *new_dev;
struct net_device *real_dev; /* the ethernet device */ struct net_device *real_dev; /* the ethernet device */
char name[IFNAMSIZ]; char name[IFNAMSIZ];
int i;
#ifdef VLAN_DEBUG #ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n", printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n",
@ -544,6 +554,15 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
if (!grp) if (!grp)
goto out_free_unregister; goto out_free_unregister;
for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
grp->vlan_devices_arrays[i] = kzalloc(
sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN,
GFP_KERNEL);
if (!grp->vlan_devices_arrays[i])
goto out_free_arrays;
}
/* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
grp->real_dev_ifindex = real_dev->ifindex; grp->real_dev_ifindex = real_dev->ifindex;
@ -554,7 +573,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
real_dev->vlan_rx_register(real_dev, grp); real_dev->vlan_rx_register(real_dev, grp);
} }
grp->vlan_devices[VLAN_ID] = new_dev; vlan_group_set_device(grp, VLAN_ID, new_dev);
if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */ if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */
printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n", printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n",
@ -571,6 +590,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
#endif #endif
return new_dev; return new_dev;
out_free_arrays:
vlan_group_free(grp);
out_free_unregister: out_free_unregister:
unregister_netdev(new_dev); unregister_netdev(new_dev);
goto out_unlock; goto out_unlock;
@ -606,7 +628,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_CHANGE: case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */ /* Propagate real device state to vlan devices */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i]; vlandev = vlan_group_get_device(grp, i);
if (!vlandev) if (!vlandev)
continue; continue;
@ -617,7 +639,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_DOWN: case NETDEV_DOWN:
/* Put all VLANs for this dev in the down state too. */ /* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i]; vlandev = vlan_group_get_device(grp, i);
if (!vlandev) if (!vlandev)
continue; continue;
@ -632,7 +654,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_UP: case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */ /* Put all VLANs for this dev in the up state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i]; vlandev = vlan_group_get_device(grp, i);
if (!vlandev) if (!vlandev)
continue; continue;
@ -649,7 +671,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
int ret; int ret;
vlandev = grp->vlan_devices[i]; vlandev = vlan_group_get_device(grp, i);
if (!vlandev) if (!vlandev)
continue; continue;

View File

@ -732,11 +732,12 @@ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def,
*net_lvl = host_lvl; *net_lvl = host_lvl;
return 0; return 0;
case CIPSO_V4_MAP_STD: case CIPSO_V4_MAP_STD:
if (host_lvl < doi_def->map.std->lvl.local_size) { if (host_lvl < doi_def->map.std->lvl.local_size &&
doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
*net_lvl = doi_def->map.std->lvl.local[host_lvl]; *net_lvl = doi_def->map.std->lvl.local[host_lvl];
return 0; return 0;
} }
break; return -EPERM;
} }
return -EINVAL; return -EINVAL;
@ -771,7 +772,7 @@ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def,
*host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; *host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
return 0; return 0;
} }
break; return -EPERM;
} }
return -EINVAL; return -EINVAL;

View File

@ -934,7 +934,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
sched = !sock_flag(other, SOCK_DEAD) && sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) && !(other->sk_shutdown & RCV_SHUTDOWN) &&
(skb_queue_len(&other->sk_receive_queue) > (skb_queue_len(&other->sk_receive_queue) >=
other->sk_max_ack_backlog); other->sk_max_ack_backlog);
unix_state_runlock(other); unix_state_runlock(other);
@ -1008,7 +1008,7 @@ restart:
if (other->sk_state != TCP_LISTEN) if (other->sk_state != TCP_LISTEN)
goto out_unlock; goto out_unlock;
if (skb_queue_len(&other->sk_receive_queue) > if (skb_queue_len(&other->sk_receive_queue) >=
other->sk_max_ack_backlog) { other->sk_max_ack_backlog) {
err = -EAGAIN; err = -EAGAIN;
if (!timeo) if (!timeo)
@ -1381,7 +1381,7 @@ restart:
} }
if (unix_peer(other) != sk && if (unix_peer(other) != sk &&
(skb_queue_len(&other->sk_receive_queue) > (skb_queue_len(&other->sk_receive_queue) >=
other->sk_max_ack_backlog)) { other->sk_max_ack_backlog)) {
if (!timeo) { if (!timeo) {
err = -EAGAIN; err = -EAGAIN;