summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorIgor Druzhinin <igor.druzhinin@citrix.com>2019-02-28 12:48:03 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-03-23 08:44:22 +0100
commit51d27870ecf1a5f9fed9846d337874bc89e25234 (patch)
treeab826fb81657639dc61ef1074760ae3d17a442ed /drivers/net/xen-netback/netback.c
parent6e7339d5ee302fcbfe8ef29c058cc85c360849b9 (diff)
xen-netback: fix occasional leak of grant ref mappings under memory pressure
[ Upstream commit 99e87f56b48f490fb16b6e0f74691c1e664dea95 ] Zero-copy callback flag is not yet set on frag list skb at the moment xenvif_handle_frag_list() returns -ENOMEM. This eventually results in leaking grant ref mappings since xenvif_zerocopy_callback() is never called for these fragments. Those eventually build up and cause Xen to kill Dom0 as the slots get reused for new mappings: "d0v0 Attempt to implicitly unmap a granted PTE c010000329fce005" That behavior is observed under certain workloads where sudden spikes of page cache writes coexist with active atomic skb allocations from network traffic. Additionally, rework the logic to deal with frag_list deallocation in a single place. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 02db20b26749..d324ac308e6d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1538,11 +1538,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
skb_frag_size_set(&frags[i], len);
}
- /* Copied all the bits from the frag list -- free it. */
- skb_frag_list_init(skb);
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
-
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
@@ -1611,6 +1606,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+ xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
@@ -1619,6 +1616,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
kfree_skb(skb);
continue;
}
+ /* Copied all the bits from the frag list -- free it. */
+ skb_frag_list_init(skb);
+ kfree_skb(nskb);
}
skb->dev = queue->vif->dev;