rapidio/rionet: fix multicast packet transmit logic
Fix multicast packet transmit logic to account for repetitive transmission of single skb: - correct check for available buffers (this bug may produce NULL pointer crash dump in case of heavy traffic); - update skb user count (incorrect user counter causes a warning dump from net_tx_action routine during multicast transfers in systems with three or more rionet participants). Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: David S. Miller <davem@davemloft.net> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
de4ec99c32
commit
7c4a6106d6
@ -79,6 +79,7 @@ static int rionet_capable = 1;
|
|||||||
* on system trade-offs.
|
* on system trade-offs.
|
||||||
*/
|
*/
|
||||||
static struct rio_dev **rionet_active;
|
static struct rio_dev **rionet_active;
|
||||||
|
static int nact; /* total number of active rionet peers */
|
||||||
|
|
||||||
#define is_rionet_capable(src_ops, dst_ops) \
|
#define is_rionet_capable(src_ops, dst_ops) \
|
||||||
((src_ops & RIO_SRC_OPS_DATA_MSG) && \
|
((src_ops & RIO_SRC_OPS_DATA_MSG) && \
|
||||||
@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
struct ethhdr *eth = (struct ethhdr *)skb->data;
|
struct ethhdr *eth = (struct ethhdr *)skb->data;
|
||||||
u16 destid;
|
u16 destid;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int add_num = 1;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
if (!spin_trylock(&rnet->tx_lock)) {
|
if (!spin_trylock(&rnet->tx_lock)) {
|
||||||
@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
return NETDEV_TX_LOCKED;
|
return NETDEV_TX_LOCKED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
|
if (is_multicast_ether_addr(eth->h_dest))
|
||||||
|
add_num = nact;
|
||||||
|
|
||||||
|
if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
|
||||||
netif_stop_queue(ndev);
|
netif_stop_queue(ndev);
|
||||||
spin_unlock_irqrestore(&rnet->tx_lock, flags);
|
spin_unlock_irqrestore(&rnet->tx_lock, flags);
|
||||||
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
|
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
|
||||||
@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (is_multicast_ether_addr(eth->h_dest)) {
|
if (is_multicast_ether_addr(eth->h_dest)) {
|
||||||
|
int count = 0;
|
||||||
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
|
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
|
||||||
i++)
|
i++)
|
||||||
if (rionet_active[i])
|
if (rionet_active[i]) {
|
||||||
rionet_queue_tx_msg(skb, ndev,
|
rionet_queue_tx_msg(skb, ndev,
|
||||||
rionet_active[i]);
|
rionet_active[i]);
|
||||||
|
if (count)
|
||||||
|
atomic_inc(&skb->users);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
|
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
|
||||||
destid = RIONET_GET_DESTID(eth->h_dest);
|
destid = RIONET_GET_DESTID(eth->h_dest);
|
||||||
if (rionet_active[destid])
|
if (rionet_active[destid])
|
||||||
@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
|
|||||||
if (info == RIONET_DOORBELL_JOIN) {
|
if (info == RIONET_DOORBELL_JOIN) {
|
||||||
if (!rionet_active[sid]) {
|
if (!rionet_active[sid]) {
|
||||||
list_for_each_entry(peer, &rionet_peers, node) {
|
list_for_each_entry(peer, &rionet_peers, node) {
|
||||||
if (peer->rdev->destid == sid)
|
if (peer->rdev->destid == sid) {
|
||||||
rionet_active[sid] = peer->rdev;
|
rionet_active[sid] = peer->rdev;
|
||||||
|
nact++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
rio_mport_send_doorbell(mport, sid,
|
rio_mport_send_doorbell(mport, sid,
|
||||||
RIONET_DOORBELL_JOIN);
|
RIONET_DOORBELL_JOIN);
|
||||||
}
|
}
|
||||||
} else if (info == RIONET_DOORBELL_LEAVE) {
|
} else if (info == RIONET_DOORBELL_LEAVE) {
|
||||||
rionet_active[sid] = NULL;
|
rionet_active[sid] = NULL;
|
||||||
|
nact--;
|
||||||
} else {
|
} else {
|
||||||
if (netif_msg_intr(rnet))
|
if (netif_msg_intr(rnet))
|
||||||
printk(KERN_WARNING "%s: unhandled doorbell\n",
|
printk(KERN_WARNING "%s: unhandled doorbell\n",
|
||||||
@ -523,6 +536,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
|
|||||||
|
|
||||||
rc = rionet_setup_netdev(rdev->net->hport, ndev);
|
rc = rionet_setup_netdev(rdev->net->hport, ndev);
|
||||||
rionet_check = 1;
|
rionet_check = 1;
|
||||||
|
nact = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user