summaryrefslogtreecommitdiff
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c852
1 files changed, 576 insertions, 276 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e5284bca2d90..3f021e054ba1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
+#include <linux/highmem.h>
#include <net/tcp.h>
@@ -54,6 +55,13 @@
bool separate_tx_rx_irq = 1;
module_param(separate_tx_rx_irq, bool, 0644);
+/* When guest ring is filled up, qdisc queues the packets for us, but we have
+ * to timeout them, otherwise other guests' packets can get stuck there
+ */
+unsigned int rx_drain_timeout_msecs = 10000;
+module_param(rx_drain_timeout_msecs, uint, 0444);
+unsigned int rx_drain_timeout_jiffies;
+
/*
* This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious.
@@ -62,24 +70,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
-/*
- * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
- * the maximum slots a valid packet can use. Now this value is defined
- * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
- * all backend.
- */
-#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-
-/*
- * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
- * one or more merged tx requests, otherwise it is the continuation of
- * previous tx request.
- */
-static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
-{
- return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
-}
-
static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status);
@@ -109,6 +99,21 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
+#define callback_param(vif, pending_idx) \
+ (vif->pending_tx_info[pending_idx].callback_struct)
+
+/* Find the containing VIF's structure from a pointer in pending_tx_info array
+ */
+static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+{
+ u16 pending_idx = ubuf->desc;
+ struct pending_tx_info *temp =
+ container_of(ubuf, struct pending_tx_info, callback_struct);
+ return container_of(temp - pending_idx,
+ struct xenvif,
+ pending_tx_info[0]);
+}
+
/* This is a miniumum size for the linear area to avoid lots of
* calls to __pskb_pull_tail() as we set up checksum offsets. The
* value 128 was chosen as it covers all IPv4 and most likely
@@ -131,12 +136,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
-{
- return MAX_PENDING_REQS -
- vif->pending_prod + vif->pending_cons;
-}
-
bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
{
RING_IDX prod, cons;
@@ -192,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their
* own buffers as before.
*/
- if ((offset + size > MAX_BUFFER_OFFSET) &&
- (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ BUG_ON(size > MAX_BUFFER_OFFSET);
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
@@ -235,12 +234,14 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
- unsigned long offset, int *head)
+ unsigned long offset, int *head,
+ struct xenvif *foreign_vif,
+ grant_ref_t foreign_gref)
{
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
- int gso_type;
+ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -277,8 +278,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes;
- copy_gop->source.domid = DOMID_SELF;
- copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
+ if (foreign_vif) {
+ copy_gop->source.domid = foreign_vif->domid;
+ copy_gop->source.u.ref = foreign_gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+ } else {
+ copy_gop->source.domid = DOMID_SELF;
+ copy_gop->source.u.gmfn =
+ virt_to_mfn(page_address(page));
+ }
copy_gop->source.offset = offset;
copy_gop->dest.domid = vif->domid;
@@ -299,12 +307,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/* Leave a gap for the GSO descriptor. */
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- else
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
@@ -338,19 +346,18 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
- int gso_size;
+ struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+ grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
+ struct xenvif *foreign_vif = NULL;
old_meta_prod = npo->meta_prod;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- gso_size = skb_shinfo(skb)->gso_size;
- } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- gso_size = skb_shinfo(skb)->gso_size;
- } else {
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
- gso_size = 0;
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
/* Set up a GSO prefix descriptor, if necessary */
@@ -358,7 +365,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -368,7 +375,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if ((1 << gso_type) & vif->gso_mask) {
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
} else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
@@ -379,6 +386,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
npo->copy_off = 0;
npo->copy_gref = req->gref;
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+ (ubuf->callback == &xenvif_zerocopy_callback)) {
+ int i = 0;
+ foreign_vif = ubuf_to_vif(ubuf);
+
+ do {
+ u16 pending_idx = ubuf->desc;
+ foreign_grefs[i++] =
+ foreign_vif->pending_tx_info[pending_idx].req.gref;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ } while (ubuf);
+ }
+
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
@@ -388,7 +408,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(vif, skb, npo,
- virt_to_page(data), len, offset, &head);
+ virt_to_page(data), len, offset, &head,
+ NULL,
+ 0);
data += len;
}
@@ -397,7 +419,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
- &head);
+ &head,
+ foreign_vif,
+ foreign_grefs[i]);
}
return npo->meta_prod - old_meta_prod;
@@ -455,10 +479,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
}
}
-struct skb_cb_overlay {
+struct xenvif_rx_cb {
int meta_slots_used;
};
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
@@ -474,7 +500,6 @@ static void xenvif_rx_action(struct xenvif *vif)
LIST_HEAD(notify);
int ret;
unsigned long offset;
- struct skb_cb_overlay *sco;
bool need_to_notify = false;
struct netrx_pending_operations npo = {
@@ -486,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
RING_IDX max_slots_needed;
+ RING_IDX old_req_cons;
+ RING_IDX ring_slots_used;
int i;
/* We need a cheap worse case estimate for the number of
@@ -497,11 +524,31 @@ static void xenvif_rx_action(struct xenvif *vif)
PAGE_SIZE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned int size;
+ unsigned int offset;
+
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ offset = skb_shinfo(skb)->frags[i].page_offset;
+
+ /* For a worse-case estimate we need to factor in
+ * the fragment page offset as this will affect the
+ * number of times xenvif_gop_frag_copy() will
+ * call start_new_rx_buffer().
+ */
+ max_slots_needed += DIV_ROUND_UP(offset + size,
+ PAGE_SIZE);
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+
+ /* To avoid the estimate becoming too pessimal for some
+ * frontends that limit posted rx requests, cap the estimate
+ * at MAX_SKB_FRAGS.
+ */
+ if (max_slots_needed > MAX_SKB_FRAGS)
+ max_slots_needed = MAX_SKB_FRAGS;
+
+ /* We may need one more slot for GSO metadata */
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
max_slots_needed++;
/* If the skb may not fit then bail out now */
@@ -513,9 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
} else
vif->rx_last_skb_slots = 0;
- sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
- BUG_ON(sco->meta_slots_used > max_slots_needed);
+ old_req_cons = vif->rx.req_cons;
+ XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
+ ring_slots_used = vif->rx.req_cons - old_req_cons;
+
+ BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
}
@@ -529,7 +578,6 @@ static void xenvif_rx_action(struct xenvif *vif)
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
- sco = (struct skb_cb_overlay *)skb->cb;
if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
@@ -540,19 +588,21 @@ static void xenvif_rx_action(struct xenvif *vif)
resp->offset = vif->meta[npo.meta_cons].gso_size;
resp->id = vif->meta[npo.meta_cons].id;
- resp->status = sco->meta_slots_used;
+ resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
npo.meta_cons++;
- sco->meta_slots_used--;
+ XENVIF_RX_CB(skb)->meta_slots_used--;
}
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
+ status = xenvif_check_gop(vif,
+ XENVIF_RX_CB(skb)->meta_slots_used,
+ &npo);
- if (sco->meta_slots_used == 1)
+ if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
flags = 0;
else
flags = XEN_NETRXF_more_data;
@@ -589,13 +639,13 @@ static void xenvif_rx_action(struct xenvif *vif)
xenvif_add_frag_responses(vif, status,
vif->meta + npo.meta_cons + 1,
- sco->meta_slots_used);
+ XENVIF_RX_CB(skb)->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
need_to_notify |= !!ret;
- npo.meta_cons += sco->meta_slots_used;
+ npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
dev_kfree_skb(skb);
}
@@ -645,9 +695,12 @@ static void xenvif_tx_err(struct xenvif *vif,
struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
+ unsigned long flags;
do {
+ spin_lock_irqsave(&vif->response_lock, flags);
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+ spin_unlock_irqrestore(&vif->response_lock, flags);
if (cons == end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
@@ -658,7 +711,8 @@ static void xenvif_tx_err(struct xenvif *vif,
static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
- xenvif_carrier_off(vif);
+ vif->disabled = true;
+ xenvif_kick_thread(vif);
}
static int xenvif_count_requests(struct xenvif *vif,
@@ -759,180 +813,168 @@ static int xenvif_count_requests(struct xenvif *vif,
return slots;
}
-static struct page *xenvif_alloc_page(struct xenvif *vif,
- u16 pending_idx)
+
+struct xenvif_tx_cb {
+ u16 pending_idx;
+};
+
+#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+
+static inline void xenvif_tx_create_gop(struct xenvif *vif,
+ u16 pending_idx,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_map_grant_ref *gop)
{
- struct page *page;
+ vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
+ gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+ txp->gref, vif->domid);
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
- if (!page)
+ memcpy(&vif->pending_tx_info[pending_idx].req, txp,
+ sizeof(*txp));
+}
+
+static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+{
+ struct sk_buff *skb =
+ alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(skb == NULL))
return NULL;
- vif->mmap_pages[pending_idx] = page;
- return page;
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ /* Initialize it here to avoid later surprises */
+ skb_shinfo(skb)->destructor_arg = NULL;
+
+ return skb;
}
-static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_tx_request *txp,
- struct gnttab_copy *gop)
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_map_grant_ref *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
- u16 pending_idx = *((u16 *)skb->data);
- u16 head_idx = 0;
- int slot, start;
- struct page *page;
- pending_ring_idx_t index, start_idx = 0;
- uint16_t dst_offset;
- unsigned int nr_slots;
- struct pending_tx_info *first = NULL;
+ u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ int start;
+ pending_ring_idx_t index;
+ unsigned int nr_slots, frag_overflow = 0;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
+ if (shinfo->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ shinfo->nr_frags = MAX_SKB_FRAGS;
+ }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
- /* Coalesce tx requests, at this point the packet passed in
- * should be <= 64K. Any packets larger than 64K have been
- * handled in xenvif_count_requests().
- */
- for (shinfo->nr_frags = slot = start; slot < nr_slots;
- shinfo->nr_frags++) {
- struct pending_tx_info *pending_tx_info =
- vif->pending_tx_info;
+ for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
+ shinfo->nr_frags++, txp++, gop++) {
+ index = pending_index(vif->pending_cons++);
+ pending_idx = vif->pending_ring[index];
+ xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ }
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
- if (!page)
- goto err;
-
- dst_offset = 0;
- first = NULL;
- while (dst_offset < PAGE_SIZE && slot < nr_slots) {
- gop->flags = GNTCOPY_source_gref;
-
- gop->source.u.ref = txp->gref;
- gop->source.domid = vif->domid;
- gop->source.offset = txp->offset;
-
- gop->dest.domid = DOMID_SELF;
-
- gop->dest.offset = dst_offset;
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
-
- if (dst_offset + txp->size > PAGE_SIZE) {
- /* This page can only merge a portion
- * of tx request. Do not increment any
- * pointer / counter here. The txp
- * will be dealt with in future
- * rounds, eventually hitting the
- * `else` branch.
- */
- gop->len = PAGE_SIZE - dst_offset;
- txp->offset += gop->len;
- txp->size -= gop->len;
- dst_offset += gop->len; /* quit loop */
- } else {
- /* This tx request can be merged in the page */
- gop->len = txp->size;
- dst_offset += gop->len;
-
- index = pending_index(vif->pending_cons++);
-
- pending_idx = vif->pending_ring[index];
-
- memcpy(&pending_tx_info[pending_idx].req, txp,
- sizeof(*txp));
-
- /* Poison these fields, corresponding
- * fields for head tx req will be set
- * to correct values after the loop.
- */
- vif->mmap_pages[pending_idx] = (void *)(~0UL);
- pending_tx_info[pending_idx].head =
- INVALID_PENDING_RING_IDX;
-
- if (!first) {
- first = &pending_tx_info[pending_idx];
- start_idx = index;
- head_idx = pending_idx;
- }
-
- txp++;
- slot++;
- }
+ if (frag_overflow) {
+ struct sk_buff *nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ return NULL;
+ }
+
+ shinfo = skb_shinfo(nskb);
+ frags = shinfo->frags;
- gop++;
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
+ shinfo->nr_frags++, txp++, gop++) {
+ index = pending_index(vif->pending_cons++);
+ pending_idx = vif->pending_ring[index];
+ xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags],
+ pending_idx);
}
- first->req.offset = 0;
- first->req.size = dst_offset;
- first->head = start_idx;
- vif->mmap_pages[head_idx] = page;
- frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+ skb_shinfo(skb)->frag_list = nskb;
}
- BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
-
return gop;
-err:
- /* Unwind, freeing all pages and sending error responses. */
- while (shinfo->nr_frags-- > start) {
- xenvif_idx_release(vif,
- frag_get_pending_idx(&frags[shinfo->nr_frags]),
- XEN_NETIF_RSP_ERROR);
+}
+
+static inline void xenvif_grant_handle_set(struct xenvif *vif,
+ u16 pending_idx,
+ grant_handle_t handle)
+{
+ if (unlikely(vif->grant_tx_handle[pending_idx] !=
+ NETBACK_INVALID_HANDLE)) {
+ netdev_err(vif->dev,
+ "Trying to overwrite active handle! pending_idx: %x\n",
+ pending_idx);
+ BUG();
}
- /* The head too, if necessary. */
- if (start)
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ vif->grant_tx_handle[pending_idx] = handle;
+}
- return NULL;
+static inline void xenvif_grant_handle_reset(struct xenvif *vif,
+ u16 pending_idx)
+{
+ if (unlikely(vif->grant_tx_handle[pending_idx] ==
+ NETBACK_INVALID_HANDLE)) {
+ netdev_err(vif->dev,
+ "Trying to unmap invalid handle! pending_idx: %x\n",
+ pending_idx);
+ BUG();
+ }
+ vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
}
static int xenvif_tx_check_gop(struct xenvif *vif,
struct sk_buff *skb,
- struct gnttab_copy **gopp)
+ struct gnttab_map_grant_ref **gopp)
{
- struct gnttab_copy *gop = *gopp;
- u16 pending_idx = *((u16 *)skb->data);
+ struct gnttab_map_grant_ref *gop = *gopp;
+ u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct pending_tx_info *tx_info;
int nr_frags = shinfo->nr_frags;
int i, err, start;
- u16 peek; /* peek into next tx request */
+ struct sk_buff *first_skb = NULL;
/* Check status of header. */
err = gop->status;
if (unlikely(err))
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ else
+ xenvif_grant_handle_set(vif, pending_idx , gop->handle);
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+check_frags:
for (i = start; i < nr_frags; i++) {
int j, newerr;
- pending_ring_idx_t head;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
tx_info = &vif->pending_tx_info[pending_idx];
- head = tx_info->head;
/* Check error status: if okay then remember grant handle. */
- do {
- newerr = (++gop)->status;
- if (newerr)
- break;
- peek = vif->pending_ring[pending_index(++head)];
- } while (!pending_tx_is_head(vif, peek));
+ newerr = (++gop)->status;
if (likely(!newerr)) {
+ xenvif_grant_handle_set(vif, pending_idx , gop->handle);
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
continue;
}
@@ -942,20 +984,45 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
-
/* First error: invalidate header and preceding fragments. */
- pending_idx = *((u16 *)skb->data);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+ if (!first_skb)
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ else
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ xenvif_idx_unmap(vif, pending_idx);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
+ if (skb_has_frag_list(skb)) {
+ first_skb = skb;
+ skb = shinfo->frag_list;
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+ start = 0;
+
+ goto check_frags;
+ }
+
+ /* There was a mapping error in the frag_list skb. We have to unmap
+ * the first skb's frags
+ */
+ if (first_skb && err) {
+ int j;
+ shinfo = skb_shinfo(first_skb);
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+ for (j = start; j < shinfo->nr_frags; j++) {
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+ xenvif_idx_unmap(vif, pending_idx);
+ }
+ }
+
*gopp = gop + 1;
return err;
}
@@ -965,6 +1032,10 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i;
+ u16 prev_pending_idx = INVALID_PENDING_IDX;
+
+ if (skb_shinfo(skb)->destructor_arg)
+ prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
@@ -974,6 +1045,17 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
pending_idx = frag_get_pending_idx(frag);
+ /* If this is not the first frag, chain it to the previous*/
+ if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
+ skb_shinfo(skb)->destructor_arg =
+ &callback_param(vif, pending_idx);
+ else if (likely(pending_idx != prev_pending_idx))
+ callback_param(vif, prev_pending_idx).ctx =
+ &callback_param(vif, pending_idx);
+
+ callback_param(vif, pending_idx).ctx = NULL;
+ prev_pending_idx = pending_idx;
+
txp = &vif->pending_tx_info[pending_idx].req;
page = virt_to_page(idx_to_kaddr(vif, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
@@ -981,10 +1063,15 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
skb->data_len += txp->size;
skb->truesize += txp->size;
- /* Take an extra reference to offset xenvif_idx_release */
+ /* Take an extra reference to offset network stack's put_page */
get_page(vif->mmap_pages[pending_idx]);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
}
+ /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
+ * overlaps with "index", and "mapping" is not set. I think mapping
+ * should be set. If delivered to local stack, it would drop this
+ * skb in sk_filter unless the socket has the right to use it.
+ */
+ skb->pfmemalloc = false;
}
static int xenvif_get_extras(struct xenvif *vif,
@@ -1104,16 +1191,13 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
{
- struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
+ struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
struct sk_buff *skb;
int ret;
- while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS) &&
- (skb_queue_len(&vif->tx_queue) < budget)) {
+ while (skb_queue_len(&vif->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
- struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
@@ -1129,7 +1213,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
xenvif_fatal_tx_err(vif);
- continue;
+ break;
}
work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
@@ -1189,8 +1273,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
PKT_PROT_LEN : txreq.size;
- skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
@@ -1198,9 +1281,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
break;
}
- /* Packets passed to netif_rx() must have some headroom. */
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1212,31 +1292,11 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
}
}
- /* XXX could copy straight to head */
- page = xenvif_alloc_page(vif, pending_idx);
- if (!page) {
- kfree_skb(skb);
- xenvif_tx_err(vif, &txreq, idx);
- break;
- }
-
- gop->source.u.ref = txreq.gref;
- gop->source.domid = vif->domid;
- gop->source.offset = txreq.offset;
-
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
- gop->dest.domid = DOMID_SELF;
- gop->dest.offset = txreq.offset;
-
- gop->len = txreq.size;
- gop->flags = GNTCOPY_source_gref;
+ xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
gop++;
- memcpy(&vif->pending_tx_info[pending_idx].req,
- &txreq, sizeof(txreq));
- vif->pending_tx_info[pending_idx].head = index;
- *((u16 *)skb->data) = pending_idx;
+ XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
@@ -1264,17 +1324,82 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
vif->tx.req_cons = idx;
- if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
+ if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
break;
}
- return gop - vif->tx_copy_ops;
+ return gop - vif->tx_map_ops;
}
+/* Consolidate skb with a frag_list into a brand new one with local pages on
+ * frags. Returns 0 or -ENOMEM if can't allocate new pages.
+ */
+static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
+{
+ unsigned int offset = skb_headlen(skb);
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ int i;
+ struct ubuf_info *uarg;
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+
+ vif->tx_zerocopy_sent += 2;
+ vif->tx_frag_overflow++;
+
+ xenvif_fill_frags(vif, nskb);
+ /* Subtract frags size, we will correct it later */
+ skb->truesize -= skb->data_len;
+ skb->len += nskb->len;
+ skb->data_len += nskb->len;
+
+ /* create a brand new frags array and coalesce there */
+ for (i = 0; offset < skb->len; i++) {
+ struct page *page;
+ unsigned int len;
+
+ BUG_ON(i >= MAX_SKB_FRAGS);
+ page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+ if (!page) {
+ int j;
+ skb->truesize += skb->data_len;
+ for (j = 0; j < i; j++)
+ put_page(frags[j].page.p);
+ return -ENOMEM;
+ }
+
+ if (offset + PAGE_SIZE < skb->len)
+ len = PAGE_SIZE;
+ else
+ len = skb->len - offset;
+ if (skb_copy_bits(skb, offset, page_address(page), len))
+ BUG();
+
+ offset += len;
+ frags[i].page.p = page;
+ frags[i].page_offset = 0;
+ skb_frag_size_set(&frags[i], len);
+ }
+ /* swap out with old one */
+ memcpy(skb_shinfo(skb)->frags,
+ frags,
+ i * sizeof(skb_frag_t));
+ skb_shinfo(skb)->nr_frags = i;
+ skb->truesize += i * PAGE_SIZE;
+
+ /* remove traces of mapped pages and frag_list */
+ skb_frag_list_init(skb);
+ uarg = skb_shinfo(skb)->destructor_arg;
+ uarg->callback(uarg, true);
+ skb_shinfo(skb)->destructor_arg = NULL;
+
+ skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ kfree_skb(nskb);
+
+ return 0;
+}
static int xenvif_tx_submit(struct xenvif *vif)
{
- struct gnttab_copy *gop = vif->tx_copy_ops;
+ struct gnttab_map_grant_ref *gop = vif->tx_map_ops;
struct sk_buff *skb;
int work_done = 0;
@@ -1283,7 +1408,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
u16 pending_idx;
unsigned data_len;
- pending_idx = *((u16 *)skb->data);
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
@@ -1298,14 +1423,16 @@ static int xenvif_tx_submit(struct xenvif *vif)
memcpy(skb->data,
(void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
data_len);
+ callback_param(vif, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
+ skb_shinfo(skb)->destructor_arg =
+ &callback_param(vif, pending_idx);
} else {
/* Schedule a response immediately. */
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
}
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1315,6 +1442,17 @@ static int xenvif_tx_submit(struct xenvif *vif)
xenvif_fill_frags(vif, skb);
+ if (unlikely(skb_has_frag_list(skb))) {
+ if (xenvif_handle_frag_list(vif, skb)) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Not enough memory to consolidate frag_list!\n");
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ kfree_skb(skb);
+ continue;
+ }
+ }
+
if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
@@ -1327,6 +1465,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
if (checksum_setup(vif, skb)) {
netdev_dbg(vif->dev,
"Can't setup checksum in net_tx_action\n");
+ /* We have to set this flag to trigger the callback */
+ if (skb_shinfo(skb)->destructor_arg)
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
kfree_skb(skb);
continue;
}
@@ -1352,17 +1493,126 @@ static int xenvif_tx_submit(struct xenvif *vif)
work_done++;
+ /* Set this flag right before netif_receive_skb, otherwise
+ * someone might think this packet already left netback, and
+ * do a skb_copy_ubufs while we are still in control of the
+ * skb. E.g. the __pskb_pull_tail earlier can do such thing.
+ */
+ if (skb_shinfo(skb)->destructor_arg) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ vif->tx_zerocopy_sent++;
+ }
+
netif_receive_skb(skb);
}
return work_done;
}
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+{
+ unsigned long flags;
+ pending_ring_idx_t index;
+ struct xenvif *vif = ubuf_to_vif(ubuf);
+
+ /* This is the only place where we grab this lock, to protect callbacks
+ * from each other.
+ */
+ spin_lock_irqsave(&vif->callback_lock, flags);
+ do {
+ u16 pending_idx = ubuf->desc;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
+ MAX_PENDING_REQS);
+ index = pending_index(vif->dealloc_prod);
+ vif->dealloc_ring[index] = pending_idx;
+ /* Sync with xenvif_tx_dealloc_action:
+ * insert idx then incr producer.
+ */
+ smp_wmb();
+ vif->dealloc_prod++;
+ } while (ubuf);
+ wake_up(&vif->dealloc_wq);
+ spin_unlock_irqrestore(&vif->callback_lock, flags);
+
+ if (likely(zerocopy_success))
+ vif->tx_zerocopy_success++;
+ else
+ vif->tx_zerocopy_fail++;
+}
+
+static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
+{
+ struct gnttab_unmap_grant_ref *gop;
+ pending_ring_idx_t dc, dp;
+ u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
+ unsigned int i = 0;
+
+ dc = vif->dealloc_cons;
+ gop = vif->tx_unmap_ops;
+
+ /* Free up any grants we have finished using */
+ do {
+ dp = vif->dealloc_prod;
+
+ /* Ensure we see all indices enqueued by all
+ * xenvif_zerocopy_callback().
+ */
+ smp_rmb();
+
+ while (dc != dp) {
+ BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
+ pending_idx =
+ vif->dealloc_ring[pending_index(dc++)];
+
+ pending_idx_release[gop-vif->tx_unmap_ops] =
+ pending_idx;
+ vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
+ vif->mmap_pages[pending_idx];
+ gnttab_set_unmap_op(gop,
+ idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map,
+ vif->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(vif, pending_idx);
+ ++gop;
+ }
+
+ } while (dp != vif->dealloc_prod);
+
+ vif->dealloc_cons = dc;
+
+ if (gop - vif->tx_unmap_ops > 0) {
+ int ret;
+ ret = gnttab_unmap_refs(vif->tx_unmap_ops,
+ NULL,
+ vif->pages_to_unmap,
+ gop - vif->tx_unmap_ops);
+ if (ret) {
+ netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ gop - vif->tx_unmap_ops, ret);
+ for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
+ if (gop[i].status != GNTST_okay)
+ netdev_err(vif->dev,
+ " host_addr: %llx handle: %x status: %d\n",
+ gop[i].host_addr,
+ gop[i].handle,
+ gop[i].status);
+ }
+ BUG();
+ }
+ }
+
+ for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
+ xenvif_idx_release(vif, pending_idx_release[i],
+ XEN_NETIF_RSP_OKAY);
+}
+
+
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif *vif, int budget)
{
unsigned nr_gops;
- int work_done;
+ int work_done, ret;
if (unlikely(!tx_work_todo(vif)))
return 0;
@@ -1372,7 +1622,11 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
if (nr_gops == 0)
return 0;
- gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
+ ret = gnttab_map_refs(vif->tx_map_ops,
+ NULL,
+ vif->pages_to_map,
+ nr_gops);
+ BUG_ON(ret);
work_done = xenvif_tx_submit(vif);
@@ -1383,45 +1637,18 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status)
{
struct pending_tx_info *pending_tx_info;
- pending_ring_idx_t head;
- u16 peek; /* peek into next tx request */
-
- BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
-
- /* Already complete? */
- if (vif->mmap_pages[pending_idx] == NULL)
- return;
+ pending_ring_idx_t index;
+ unsigned long flags;
pending_tx_info = &vif->pending_tx_info[pending_idx];
-
- head = pending_tx_info->head;
-
- BUG_ON(!pending_tx_is_head(vif, head));
- BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
-
- do {
- pending_ring_idx_t index;
- pending_ring_idx_t idx = pending_index(head);
- u16 info_idx = vif->pending_ring[idx];
-
- pending_tx_info = &vif->pending_tx_info[info_idx];
- make_tx_response(vif, &pending_tx_info->req, status);
-
- /* Setting any number other than
- * INVALID_PENDING_RING_IDX indicates this slot is
- * starting a new packet / ending a previous packet.
- */
- pending_tx_info->head = 0;
-
- index = pending_index(vif->pending_prod++);
- vif->pending_ring[index] = vif->pending_ring[info_idx];
-
- peek = vif->pending_ring[pending_index(++head)];
-
- } while (!pending_tx_is_head(vif, peek));
-
- put_page(vif->mmap_pages[pending_idx]);
- vif->mmap_pages[pending_idx] = NULL;
+ spin_lock_irqsave(&vif->response_lock, flags);
+ make_tx_response(vif, &pending_tx_info->req, status);
+ index = pending_index(vif->pending_prod);
+ vif->pending_ring[index] = pending_idx;
+ /* TX shouldn't use the index before we give it back here */
+ mb();
+ vif->pending_prod++;
+ spin_unlock_irqrestore(&vif->response_lock, flags);
}
@@ -1469,23 +1696,54 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
return resp;
}
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
+{
+ int ret;
+ struct gnttab_unmap_grant_ref tx_unmap_op;
+
+ gnttab_set_unmap_op(&tx_unmap_op,
+ idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map,
+ vif->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(vif, pending_idx);
+
+ ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
+ &vif->mmap_pages[pending_idx], 1);
+ if (ret) {
+ netdev_err(vif->dev,
+ "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+ ret,
+ pending_idx,
+ tx_unmap_op.host_addr,
+ tx_unmap_op.handle,
+ tx_unmap_op.status);
+ BUG();
+ }
+
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+}
+
static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&vif->rx_queue) &&
- xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
+ return (!skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
+ vif->rx_queue_purge;
}
static inline int tx_work_todo(struct xenvif *vif)
{
- if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
- (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS))
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
return 1;
return 0;
}
+static inline bool tx_dealloc_work_todo(struct xenvif *vif)
+{
+ return vif->dealloc_cons != vif->dealloc_prod;
+}
+
void xenvif_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
@@ -1543,7 +1801,7 @@ static void xenvif_start_queue(struct xenvif *vif)
netif_wake_queue(vif->dev);
}
-int xenvif_kthread(void *data)
+int xenvif_kthread_guest_rx(void *data)
{
struct xenvif *vif = data;
struct sk_buff *skb;
@@ -1551,16 +1809,34 @@ int xenvif_kthread(void *data)
while (!kthread_should_stop()) {
wait_event_interruptible(vif->wq,
rx_work_todo(vif) ||
+ vif->disabled ||
kthread_should_stop());
+
+ /* This frontend is found to be rogue, disable it in
+ * kthread context. Currently this is only set when
+ * netback finds out frontend sends malformed packet,
+ * but we cannot disable the interface in softirq
+ * context so we defer it here.
+ */
+ if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
+ xenvif_carrier_off(vif);
+
if (kthread_should_stop())
break;
+ if (vif->rx_queue_purge) {
+ skb_queue_purge(&vif->rx_queue);
+ vif->rx_queue_purge = false;
+ }
+
if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
if (skb_queue_empty(&vif->rx_queue) &&
- netif_queue_stopped(vif->dev))
+ netif_queue_stopped(vif->dev)) {
+ del_timer_sync(&vif->wake_queue);
xenvif_start_queue(vif);
+ }
cond_resched();
}
@@ -1572,6 +1848,28 @@ int xenvif_kthread(void *data)
return 0;
}
+int xenvif_dealloc_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(vif->dealloc_wq,
+ tx_dealloc_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ xenvif_tx_dealloc_action(vif);
+ cond_resched();
+ }
+
+ /* Unmap anything remaining*/
+ if (tx_dealloc_work_todo(vif))
+ xenvif_tx_dealloc_action(vif);
+
+ return 0;
+}
+
static int __init netback_init(void)
{
int rc = 0;
@@ -1589,6 +1887,8 @@ static int __init netback_init(void)
if (rc)
goto failed_init;
+ rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+
return 0;
failed_init: