summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>2007-11-21 20:32:57 +0800
committerGreg Kroah-Hartman <gregkh@suse.de>2007-12-14 09:50:52 -0800
commit96219ec9d6b461e7587a51280ab5336a78234f86 (patch)
treee253feb4d790edc0181dcbd914a7d041b098bf25
parentca21e46bf2d766d22be57595a96a7683870fe125 (diff)
PKT_SCHED: Check subqueue status before calling hard_start_xmit
[PKT_SCHED]: Check subqueue status before calling hard_start_xmit [ Upstream commit: 5f1a485d5905aa641f33009019b3699076666a4c ] The only qdiscs that check subqueue state before dequeue'ing are PRIO and RR. The other qdiscs, including the default pfifo_fast qdisc, will allow traffic bound for subqueue 0 through to hard_start_xmit. The check for netif_queue_stopped() is done above in pkt_sched.h, so it is unnecessary for qdisc_restart(). However, if the underlying driver is multiqueue capable, and only sets queue states on subqueues, this will allow packets to enter the driver when it's currently unable to process packets, resulting in expensive requeues and driver entries. This patch re-adds the check for the subqueue status before calling hard_start_xmit, so we can try and avoid the driver entry when the queues are stopped. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--net/sched/sch_generic.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c81649cf0b9e..a35d7cec3fd9 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -135,7 +135,7 @@ static inline int qdisc_restart(struct net_device *dev)
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
unsigned lockless;
- int ret;
+ int ret = NETDEV_TX_BUSY;
/* Dequeue packet */
if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
@@ -158,7 +158,8 @@ static inline int qdisc_restart(struct net_device *dev)
/* And release queue */
spin_unlock(&dev->queue_lock);
- ret = dev_hard_start_xmit(skb, dev);
+ if (!netif_subqueue_stopped(dev, skb->queue_mapping))
+ ret = dev_hard_start_xmit(skb, dev);
if (!lockless)
netif_tx_unlock(dev);