summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Falcon <tlfalcon@linux.vnet.ibm.com>2016-12-08 16:40:03 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-18 07:14:38 +0200
commit7d170f270a95639192cfd53dcb15e6d8530b4577 (patch)
tree83034685c4fadca85e413606960a5eef2e334e7f
parentac0cbfbb1e4b84d426f210849492afadbc4b6bb9 (diff)
ibmveth: set correct gso_size and gso_type
commit 7b5967389f5a8dfb9d32843830f5e2717e20995d upstream. This patch is based on an earlier one submitted by Jon Maxwell with the following commit message: "We recently encountered a bug where a few customers using ibmveth on the same LPAR hit an issue where a TCP session hung when large receive was enabled. Closer analysis revealed that the session was stuck because the one side was advertising a zero window repeatedly. We narrowed this down to the fact the ibmveth driver did not set gso_size which is translated by TCP into the MSS later up the stack. The MSS is used to calculate the TCP window size and as that was abnormally large, it was calculating a zero window, even although the sockets receive buffer was completely empty." We rely on the Virtual I/O Server partition in a pseries environment to provide the MSS through the TCP header checksum field. The stipulation is that users should not disable checksum offloading if rx packet aggregation is enabled through VIOS. Some firmware offerings provide the MSS in the RX buffer. This is signalled by a bit in the RX queue descriptor. Reviewed-by: Brian King <brking@linux.vnet.ibm.com> Reviewed-by: Pradeep Satyanarayana <pradeeps@linux.vnet.ibm.com> Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Reviewed-by: Jonathan Maxwell <jmaxwell37@gmail.com> Reviewed-by: David Dai <zdai@us.ibm.com> Signed-off-by: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c65
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
2 files changed, 64 insertions, 2 deletions
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 7af870a3c549..855c43d8f7e0 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.05"
+#define ibmveth_driver_version "1.06"
MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -137,6 +137,11 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
}
+static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
+}
+
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
@@ -1172,6 +1177,45 @@ map_failed:
goto retry_bounce;
}
+static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
+{
+ int offset = 0;
+
+ /* only TCP packets will be aggregated */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_TCP) {
+ offset = iph->ihl * 4;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ } else {
+ return;
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
+
+ if (iph6->nexthdr == IPPROTO_TCP) {
+ offset = sizeof(struct ipv6hdr);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
+ return;
+ }
+ } else {
+ return;
+ }
+ /* if mss is not set through Large Packet bit/mss in rx buffer,
+ * expect that the mss will be written to the tcp header checksum.
+ */
+ if (lrg_pkt) {
+ skb_shinfo(skb)->gso_size = mss;
+ } else if (offset) {
+ struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
+
+ skb_shinfo(skb)->gso_size = ntohs(tcph->check);
+ tcph->check = 0;
+ }
+}
+
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
@@ -1180,6 +1224,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int frames_processed = 0;
unsigned long lpar_rc;
struct iphdr *iph;
+ u16 mss = 0;
restart_poll:
while (frames_processed < budget) {
@@ -1197,9 +1242,21 @@ restart_poll:
int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
+ int lrg_pkt = ibmveth_rxq_large_packet(adapter);
skb = ibmveth_rxq_get_buffer(adapter);
+ /* if the large packet bit is set in the rx queue
+ * descriptor, the mss will be written by PHYP eight
+ * bytes from the start of the rx buffer, which is
+ * skb->data at this stage
+ */
+ if (lrg_pkt) {
+ __be64 *rxmss = (__be64 *)(skb->data + 8);
+
+ mss = (u16)be64_to_cpu(*rxmss);
+ }
+
new_skb = NULL;
if (length < rx_copybreak)
new_skb = netdev_alloc_skb(netdev, length);
@@ -1233,11 +1290,15 @@ restart_poll:
if (iph->check == 0xffff) {
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- adapter->rx_large_packets++;
}
}
}
+ if (length > netdev->mtu + ETH_HLEN) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
+
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 4eade67fe30c..7acda04d034e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -209,6 +209,7 @@ struct ibmveth_rx_q_entry {
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
+#define IBMVETH_RXQ_LRG_PKT 0x04000000
#define IBMVETH_RXQ_NO_CSUM 0x02000000
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF