summaryrefslogtreecommitdiff
path: root/net/core/pktgen.c
diff options
context:
space:
mode:
authorPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>2007-07-06 13:36:20 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-10 22:16:21 -0700
commitf25f4e44808f0f6c9875d94ef1c41ef86c288eb2 (patch)
treed7809dd5e957f1626185326d0c3438ff9a04d350 /net/core/pktgen.c
parenta093bf006e09a305e95ff0938c0a18b7520aef67 (diff)
[CORE] Stack changes to add multiqueue hardware support API
Add the multiqueue hardware device support API to the core network stack. Allow drivers to allocate multiple queues and manage them at the netdev level if they choose to do so. Added a new field to sk_buff, namely queue_mapping, for drivers to know which tx_ring to select based on OS classification of the flow. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/pktgen.c')
-rw-r--r--net/core/pktgen.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9cd3a1cb60ef..dffe067e7a7b 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3139,7 +3139,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}
}
- if (netif_queue_stopped(odev) || need_resched()) {
+ if ((netif_queue_stopped(odev) ||
+ netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
+ need_resched()) {
idle_start = getCurUs();
if (!netif_running(odev)) {
@@ -3154,7 +3156,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->idle_acc += getCurUs() - idle_start;
- if (netif_queue_stopped(odev)) {
+ if (netif_queue_stopped(odev) ||
+ netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
pkt_dev->next_tx_us = getCurUs(); /* TODO */
pkt_dev->next_tx_ns = 0;
goto out; /* Try the next interface */
@@ -3181,7 +3184,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}
netif_tx_lock_bh(odev);
- if (!netif_queue_stopped(odev)) {
+ if (!netif_queue_stopped(odev) &&
+ !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
atomic_inc(&(pkt_dev->skb->users));
retry_now: