summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-12-10 12:32:03 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-11 09:18:15 -0800
commite66e367bf75bb2e153f286a76592c2e23afc660d (patch)
tree8e6360bb60b8a7ed4ef8bd43f345a23f067b76e9 /include
parentcc8605070a58f12b79c62f369a0446d89a7ca337 (diff)
net: fix a race in gro_cell_poll()
[ Upstream commit f8e8f97c11d5ff3cc47d85b97c7c35e443dcf490 ] Dmitry Kravkov reported packet drops for GRE packets since GRO support was added. There is a race in gro_cell_poll() because we call napi_complete() without any synchronization with a concurrent gro_cells_receive() Once bug was triggered, we queued packets but did not schedule NAPI poll. We can fix this issue using the spinlock protected the napi_skbs queue, as we have to hold it to perform skb dequeue anyway. As we open-code skb_dequeue(), we no longer need to mask IRQS, as both producer and consumer run under BH context. Bug added in commit c9e6bc644e (net: add gro_cells infrastructure) Reported-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Tested-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/net/gro_cells.h14
1 files changed, 9 insertions, 5 deletions
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 4fd8a4b4b7ee..e5062c955ea6 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -17,7 +17,6 @@ struct gro_cells {
static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
- unsigned long flags;
struct gro_cell *cell = gcells->cells;
struct net_device *dev = skb->dev;
@@ -35,32 +34,37 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
return;
}
- spin_lock_irqsave(&cell->napi_skbs.lock, flags);
+ /* We run in BH context */
+ spin_lock(&cell->napi_skbs.lock);
__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
- spin_unlock_irqrestore(&cell->napi_skbs.lock, flags);
+ spin_unlock(&cell->napi_skbs.lock);
}
+/* called unser BH context */
static inline int gro_cell_poll(struct napi_struct *napi, int budget)
{
struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
struct sk_buff *skb;
int work_done = 0;
+ spin_lock(&cell->napi_skbs.lock);
while (work_done < budget) {
- skb = skb_dequeue(&cell->napi_skbs);
+ skb = __skb_dequeue(&cell->napi_skbs);
if (!skb)
break;
-
+ spin_unlock(&cell->napi_skbs.lock);
napi_gro_receive(napi, skb);
work_done++;
+ spin_lock(&cell->napi_skbs.lock);
}
if (work_done < budget)
napi_complete(napi);
+ spin_unlock(&cell->napi_skbs.lock);
return work_done;
}