mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
pkt_sched: Schedule qdiscs instead of netdev_queue.
When we have shared qdiscs, packets come out of the qdiscs for multiple transmit queues. Therefore it doesn't make any sense to schedule the transmit queue when logically we cannot know ahead of time the TX queue of the SKB that the qdisc->dequeue() will give us. Just for sanity I added a BUG check to make sure we never get into a state where the noop_qdisc is scheduled. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
7698b4fcab
commit
37437bb2e1
@ -275,7 +275,6 @@ enum netdev_state_t
|
|||||||
{
|
{
|
||||||
__LINK_STATE_START,
|
__LINK_STATE_START,
|
||||||
__LINK_STATE_PRESENT,
|
__LINK_STATE_PRESENT,
|
||||||
__LINK_STATE_SCHED,
|
|
||||||
__LINK_STATE_NOCARRIER,
|
__LINK_STATE_NOCARRIER,
|
||||||
__LINK_STATE_LINKWATCH_PENDING,
|
__LINK_STATE_LINKWATCH_PENDING,
|
||||||
__LINK_STATE_DORMANT,
|
__LINK_STATE_DORMANT,
|
||||||
@ -452,7 +451,6 @@ struct netdev_queue {
|
|||||||
int xmit_lock_owner;
|
int xmit_lock_owner;
|
||||||
struct Qdisc *qdisc_sleeping;
|
struct Qdisc *qdisc_sleeping;
|
||||||
struct list_head qdisc_list;
|
struct list_head qdisc_list;
|
||||||
struct netdev_queue *next_sched;
|
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -969,7 +967,7 @@ static inline int unregister_gifconf(unsigned int family)
|
|||||||
*/
|
*/
|
||||||
struct softnet_data
|
struct softnet_data
|
||||||
{
|
{
|
||||||
struct netdev_queue *output_queue;
|
struct Qdisc *output_queue;
|
||||||
struct sk_buff_head input_pkt_queue;
|
struct sk_buff_head input_pkt_queue;
|
||||||
struct list_head poll_list;
|
struct list_head poll_list;
|
||||||
struct sk_buff *completion_queue;
|
struct sk_buff *completion_queue;
|
||||||
@ -984,12 +982,12 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
|
|||||||
|
|
||||||
#define HAVE_NETIF_QUEUE
|
#define HAVE_NETIF_QUEUE
|
||||||
|
|
||||||
extern void __netif_schedule(struct netdev_queue *txq);
|
extern void __netif_schedule(struct Qdisc *q);
|
||||||
|
|
||||||
static inline void netif_schedule_queue(struct netdev_queue *txq)
|
static inline void netif_schedule_queue(struct netdev_queue *txq)
|
||||||
{
|
{
|
||||||
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
|
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
|
||||||
__netif_schedule(txq);
|
__netif_schedule(txq->qdisc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void netif_tx_schedule_all(struct net_device *dev)
|
static inline void netif_tx_schedule_all(struct net_device *dev)
|
||||||
@ -1042,7 +1040,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
|
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
|
||||||
__netif_schedule(dev_queue);
|
__netif_schedule(dev_queue->qdisc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void netif_wake_queue(struct net_device *dev)
|
static inline void netif_wake_queue(struct net_device *dev)
|
||||||
@ -1186,7 +1184,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
|
|||||||
return;
|
return;
|
||||||
#endif
|
#endif
|
||||||
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
|
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
|
||||||
__netif_schedule(txq);
|
__netif_schedule(txq->qdisc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -84,15 +84,12 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
|
|||||||
struct nlattr *tab);
|
struct nlattr *tab);
|
||||||
extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
|
extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
|
||||||
|
|
||||||
extern void __qdisc_run(struct netdev_queue *txq);
|
extern void __qdisc_run(struct Qdisc *q);
|
||||||
|
|
||||||
static inline void qdisc_run(struct netdev_queue *txq)
|
static inline void qdisc_run(struct Qdisc *q)
|
||||||
{
|
{
|
||||||
struct Qdisc *q = txq->qdisc;
|
if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
|
||||||
|
__qdisc_run(q);
|
||||||
if (!netif_tx_queue_stopped(txq) &&
|
|
||||||
!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
|
|
||||||
__qdisc_run(txq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
|
extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
|
||||||
|
@ -26,6 +26,7 @@ struct qdisc_rate_table
|
|||||||
enum qdisc_state_t
|
enum qdisc_state_t
|
||||||
{
|
{
|
||||||
__QDISC_STATE_RUNNING,
|
__QDISC_STATE_RUNNING,
|
||||||
|
__QDISC_STATE_SCHED,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Qdisc
|
struct Qdisc
|
||||||
@ -45,6 +46,7 @@ struct Qdisc
|
|||||||
struct sk_buff *gso_skb;
|
struct sk_buff *gso_skb;
|
||||||
struct sk_buff_head q;
|
struct sk_buff_head q;
|
||||||
struct netdev_queue *dev_queue;
|
struct netdev_queue *dev_queue;
|
||||||
|
struct Qdisc *next_sched;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
|
||||||
struct gnet_stats_basic bstats;
|
struct gnet_stats_basic bstats;
|
||||||
|
@ -1323,18 +1323,18 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void __netif_schedule(struct netdev_queue *txq)
|
void __netif_schedule(struct Qdisc *q)
|
||||||
{
|
{
|
||||||
struct net_device *dev = txq->dev;
|
BUG_ON(q == &noop_qdisc);
|
||||||
|
|
||||||
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
|
if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
|
||||||
struct softnet_data *sd;
|
struct softnet_data *sd;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
sd = &__get_cpu_var(softnet_data);
|
sd = &__get_cpu_var(softnet_data);
|
||||||
txq->next_sched = sd->output_queue;
|
q->next_sched = sd->output_queue;
|
||||||
sd->output_queue = txq;
|
sd->output_queue = q;
|
||||||
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
@ -1771,37 +1771,23 @@ gso:
|
|||||||
rcu_read_lock_bh();
|
rcu_read_lock_bh();
|
||||||
|
|
||||||
txq = dev_pick_tx(dev, skb);
|
txq = dev_pick_tx(dev, skb);
|
||||||
spin_lock_prefetch(&txq->lock);
|
|
||||||
|
|
||||||
/* Updates of qdisc are serialized by queue->lock.
|
|
||||||
* The struct Qdisc which is pointed to by qdisc is now a
|
|
||||||
* rcu structure - it may be accessed without acquiring
|
|
||||||
* a lock (but the structure may be stale.) The freeing of the
|
|
||||||
* qdisc will be deferred until it's known that there are no
|
|
||||||
* more references to it.
|
|
||||||
*
|
|
||||||
* If the qdisc has an enqueue function, we still need to
|
|
||||||
* hold the queue->lock before calling it, since queue->lock
|
|
||||||
* also serializes access to the device queue.
|
|
||||||
*/
|
|
||||||
|
|
||||||
q = rcu_dereference(txq->qdisc);
|
q = rcu_dereference(txq->qdisc);
|
||||||
|
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
|
skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
|
||||||
#endif
|
#endif
|
||||||
if (q->enqueue) {
|
if (q->enqueue) {
|
||||||
/* Grab device queue */
|
spinlock_t *root_lock = qdisc_root_lock(q);
|
||||||
spin_lock(&txq->lock);
|
|
||||||
q = txq->qdisc;
|
|
||||||
if (q->enqueue) {
|
|
||||||
rc = q->enqueue(skb, q);
|
|
||||||
qdisc_run(txq);
|
|
||||||
spin_unlock(&txq->lock);
|
|
||||||
|
|
||||||
rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
|
spin_lock(root_lock);
|
||||||
goto out;
|
|
||||||
}
|
rc = q->enqueue(skb, q);
|
||||||
spin_unlock(&txq->lock);
|
qdisc_run(q);
|
||||||
|
|
||||||
|
spin_unlock(root_lock);
|
||||||
|
|
||||||
|
rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The device has no queue. Common case for software devices:
|
/* The device has no queue. Common case for software devices:
|
||||||
@ -1974,7 +1960,7 @@ static void net_tx_action(struct softirq_action *h)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sd->output_queue) {
|
if (sd->output_queue) {
|
||||||
struct netdev_queue *head;
|
struct Qdisc *head;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
head = sd->output_queue;
|
head = sd->output_queue;
|
||||||
@ -1982,18 +1968,20 @@ static void net_tx_action(struct softirq_action *h)
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
while (head) {
|
while (head) {
|
||||||
struct netdev_queue *txq = head;
|
struct Qdisc *q = head;
|
||||||
struct net_device *dev = txq->dev;
|
spinlock_t *root_lock;
|
||||||
|
|
||||||
head = head->next_sched;
|
head = head->next_sched;
|
||||||
|
|
||||||
smp_mb__before_clear_bit();
|
smp_mb__before_clear_bit();
|
||||||
clear_bit(__LINK_STATE_SCHED, &dev->state);
|
clear_bit(__QDISC_STATE_SCHED, &q->state);
|
||||||
|
|
||||||
if (spin_trylock(&txq->lock)) {
|
root_lock = qdisc_root_lock(q);
|
||||||
qdisc_run(txq);
|
if (spin_trylock(root_lock)) {
|
||||||
spin_unlock(&txq->lock);
|
qdisc_run(q);
|
||||||
|
spin_unlock(root_lock);
|
||||||
} else {
|
} else {
|
||||||
netif_schedule_queue(txq);
|
__netif_schedule(q);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4459,7 +4447,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|||||||
void *ocpu)
|
void *ocpu)
|
||||||
{
|
{
|
||||||
struct sk_buff **list_skb;
|
struct sk_buff **list_skb;
|
||||||
struct netdev_queue **list_net;
|
struct Qdisc **list_net;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int cpu, oldcpu = (unsigned long)ocpu;
|
unsigned int cpu, oldcpu = (unsigned long)ocpu;
|
||||||
struct softnet_data *sd, *oldsd;
|
struct softnet_data *sd, *oldsd;
|
||||||
|
@ -294,11 +294,10 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
|
|||||||
{
|
{
|
||||||
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
|
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
|
||||||
timer);
|
timer);
|
||||||
struct netdev_queue *txq = wd->qdisc->dev_queue;
|
|
||||||
|
|
||||||
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
|
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
netif_schedule_queue(txq);
|
__netif_schedule(wd->qdisc);
|
||||||
|
|
||||||
return HRTIMER_NORESTART;
|
return HRTIMER_NORESTART;
|
||||||
}
|
}
|
||||||
|
@ -650,7 +650,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sch->flags &= ~TCQ_F_THROTTLED;
|
sch->flags &= ~TCQ_F_THROTTLED;
|
||||||
netif_schedule_queue(sch->dev_queue);
|
__netif_schedule(sch);
|
||||||
return HRTIMER_NORESTART;
|
return HRTIMER_NORESTART;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,16 +72,14 @@ static inline int qdisc_qlen(struct Qdisc *q)
|
|||||||
return q->q.qlen;
|
return q->q.qlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dev_requeue_skb(struct sk_buff *skb,
|
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
|
||||||
struct netdev_queue *dev_queue,
|
|
||||||
struct Qdisc *q)
|
|
||||||
{
|
{
|
||||||
if (unlikely(skb->next))
|
if (unlikely(skb->next))
|
||||||
q->gso_skb = skb;
|
q->gso_skb = skb;
|
||||||
else
|
else
|
||||||
q->ops->requeue(skb, q);
|
q->ops->requeue(skb, q);
|
||||||
|
|
||||||
netif_schedule_queue(dev_queue);
|
__netif_schedule(q);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +119,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
|
|||||||
* some time.
|
* some time.
|
||||||
*/
|
*/
|
||||||
__get_cpu_var(netdev_rx_stat).cpu_collision++;
|
__get_cpu_var(netdev_rx_stat).cpu_collision++;
|
||||||
ret = dev_requeue_skb(skb, dev_queue, q);
|
ret = dev_requeue_skb(skb, q);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -146,9 +144,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
|
|||||||
* >0 - queue is not empty.
|
* >0 - queue is not empty.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static inline int qdisc_restart(struct netdev_queue *txq,
|
static inline int qdisc_restart(struct Qdisc *q)
|
||||||
struct Qdisc *q)
|
|
||||||
{
|
{
|
||||||
|
struct netdev_queue *txq;
|
||||||
int ret = NETDEV_TX_BUSY;
|
int ret = NETDEV_TX_BUSY;
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
spinlock_t *root_lock;
|
spinlock_t *root_lock;
|
||||||
@ -163,7 +161,8 @@ static inline int qdisc_restart(struct netdev_queue *txq,
|
|||||||
/* And release qdisc */
|
/* And release qdisc */
|
||||||
spin_unlock(root_lock);
|
spin_unlock(root_lock);
|
||||||
|
|
||||||
dev = txq->dev;
|
dev = qdisc_dev(q);
|
||||||
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
||||||
|
|
||||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||||
if (!netif_subqueue_stopped(dev, skb))
|
if (!netif_subqueue_stopped(dev, skb))
|
||||||
@ -189,29 +188,28 @@ static inline int qdisc_restart(struct netdev_queue *txq,
|
|||||||
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
|
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
|
||||||
dev->name, ret, q->q.qlen);
|
dev->name, ret, q->q.qlen);
|
||||||
|
|
||||||
ret = dev_requeue_skb(skb, txq, q);
|
ret = dev_requeue_skb(skb, q);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ret && netif_tx_queue_stopped(txq))
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __qdisc_run(struct netdev_queue *txq)
|
void __qdisc_run(struct Qdisc *q)
|
||||||
{
|
{
|
||||||
unsigned long start_time = jiffies;
|
unsigned long start_time = jiffies;
|
||||||
struct Qdisc *q = txq->qdisc;
|
|
||||||
|
|
||||||
while (qdisc_restart(txq, q)) {
|
|
||||||
if (netif_tx_queue_stopped(txq))
|
|
||||||
break;
|
|
||||||
|
|
||||||
|
while (qdisc_restart(q)) {
|
||||||
/*
|
/*
|
||||||
* Postpone processing if
|
* Postpone processing if
|
||||||
* 1. another process needs the CPU;
|
* 1. another process needs the CPU;
|
||||||
* 2. we've been doing it for too long.
|
* 2. we've been doing it for too long.
|
||||||
*/
|
*/
|
||||||
if (need_resched() || jiffies != start_time) {
|
if (need_resched() || jiffies != start_time) {
|
||||||
netif_schedule_queue(txq);
|
__netif_schedule(q);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user