forked from Minki/linux
[NET]: Hide the queue_mapping field inside netif_subqueue_stopped
Many places get the queue_mapping field from skb to pass it to the netif_subqueue_stopped() which will be 0 in any case. Make the helper that works with sk_buff Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4e3ab47a54
commit
668f895a85
@ -996,7 +996,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
|
||||
*
|
||||
* Check individual transmit queue of a device with multiple transmit queues.
|
||||
*/
|
||||
static inline int netif_subqueue_stopped(const struct net_device *dev,
|
||||
static inline int __netif_subqueue_stopped(const struct net_device *dev,
|
||||
u16 queue_index)
|
||||
{
|
||||
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
|
||||
@ -1007,6 +1007,11 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int netif_subqueue_stopped(const struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_wake_subqueue - allow sending packets on subqueue
|
||||
|
@ -1553,7 +1553,7 @@ gso:
|
||||
return rc;
|
||||
}
|
||||
if (unlikely((netif_queue_stopped(dev) ||
|
||||
netif_subqueue_stopped(dev, skb->queue_mapping)) &&
|
||||
netif_subqueue_stopped(dev, skb)) &&
|
||||
skb->next))
|
||||
return NETDEV_TX_BUSY;
|
||||
} while (skb->next);
|
||||
@ -1692,7 +1692,7 @@ gso:
|
||||
HARD_TX_LOCK(dev, cpu);
|
||||
|
||||
if (!netif_queue_stopped(dev) &&
|
||||
!netif_subqueue_stopped(dev, skb->queue_mapping)) {
|
||||
!netif_subqueue_stopped(dev, skb)) {
|
||||
rc = 0;
|
||||
if (!dev_hard_start_xmit(skb, dev)) {
|
||||
HARD_TX_UNLOCK(dev);
|
||||
|
@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work)
|
||||
local_irq_save(flags);
|
||||
netif_tx_lock(dev);
|
||||
if ((netif_queue_stopped(dev) ||
|
||||
netif_subqueue_stopped(dev, skb->queue_mapping)) ||
|
||||
netif_subqueue_stopped(dev, skb)) ||
|
||||
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
|
||||
skb_queue_head(&npinfo->txq, skb);
|
||||
netif_tx_unlock(dev);
|
||||
@ -269,7 +269,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
||||
tries > 0; --tries) {
|
||||
if (netif_tx_trylock(dev)) {
|
||||
if (!netif_queue_stopped(dev) &&
|
||||
!netif_subqueue_stopped(dev, skb->queue_mapping))
|
||||
!netif_subqueue_stopped(dev, skb))
|
||||
status = dev->hard_start_xmit(skb, dev);
|
||||
netif_tx_unlock(dev);
|
||||
|
||||
|
@ -3383,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||
|
||||
if ((netif_queue_stopped(odev) ||
|
||||
(pkt_dev->skb &&
|
||||
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
|
||||
netif_subqueue_stopped(odev, pkt_dev->skb))) ||
|
||||
need_resched()) {
|
||||
idle_start = getCurUs();
|
||||
|
||||
@ -3400,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||
pkt_dev->idle_acc += getCurUs() - idle_start;
|
||||
|
||||
if (netif_queue_stopped(odev) ||
|
||||
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
|
||||
netif_subqueue_stopped(odev, pkt_dev->skb)) {
|
||||
pkt_dev->next_tx_us = getCurUs(); /* TODO */
|
||||
pkt_dev->next_tx_ns = 0;
|
||||
goto out; /* Try the next interface */
|
||||
@ -3429,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
||||
|
||||
netif_tx_lock_bh(odev);
|
||||
if (!netif_queue_stopped(odev) &&
|
||||
!netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
|
||||
!netif_subqueue_stopped(odev, pkt_dev->skb)) {
|
||||
|
||||
atomic_inc(&(pkt_dev->skb->users));
|
||||
retry_now:
|
||||
|
@ -284,7 +284,7 @@ restart:
|
||||
if (slave->qdisc_sleeping != q)
|
||||
continue;
|
||||
if (netif_queue_stopped(slave) ||
|
||||
netif_subqueue_stopped(slave, subq) ||
|
||||
__netif_subqueue_stopped(slave, subq) ||
|
||||
!netif_running(slave)) {
|
||||
busy = 1;
|
||||
continue;
|
||||
@ -294,7 +294,7 @@ restart:
|
||||
case 0:
|
||||
if (netif_tx_trylock(slave)) {
|
||||
if (!netif_queue_stopped(slave) &&
|
||||
!netif_subqueue_stopped(slave, subq) &&
|
||||
!__netif_subqueue_stopped(slave, subq) &&
|
||||
slave->hard_start_xmit(skb, slave) == 0) {
|
||||
netif_tx_unlock(slave);
|
||||
master->slaves = NEXT_SLAVE(q);
|
||||
|
Loading…
Reference in New Issue
Block a user