summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2015-02-10 00:05:18 +0000
committerBen Hutchings <ben@decadent.org.uk>2015-02-20 00:49:33 +0000
commit649917478f4c580f6c0a46c99ebff7381581530b (patch)
tree58777d362c8297297de6ce2a6c44c67e961a84c9 /net
parent9fefe0d3228bb6a874bf3bf7628dd198347abf64 (diff)
Revert "tcp: Apply device TSO segment limit earlier"
This reverts commit 9f871e883277cc22c6217db806376dce52401a31, which was commit 1485348d2424e1131ea42efc033cbd9366462b01 upstream. It can cause connections to stall when a PMTU event occurs. This was fixed by commit 843925f33fcc ("tcp: Do not apply TSO segment limit to non-TSO packets") upstream, but that depends on other changes to TSO. The original issue this fixed was a performance regression for the sfc driver in extreme cases of TSO (skb with > 100 segments). This is not really very important and it seems best to revert it rather than try to fix it up. Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: netdev@vger.kernel.org Cc: linux-net-drivers@solarflare.com
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_output.c21
4 files changed, 11 insertions, 18 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 8a2c2dd8b5b8..e0935282945f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1311,7 +1311,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk->sk_gso_max_size = dst->dev->gso_max_size;
- sk->sk_gso_max_segs = dst->dev->gso_max_segs;
}
}
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 32c9e83f44a6..9a7c01e5cb8d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -738,9 +738,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
old_size_goal + mss_now > xmit_size_goal)) {
xmit_size_goal = old_size_goal;
} else {
- tp->xmit_size_goal_segs =
- min_t(u16, xmit_size_goal / mss_now,
- sk->sk_gso_max_segs);
+ tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
}
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6cebfd2df615..850c737e08e2 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -290,8 +290,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
- left * tp->mss_cache < sk->sk_gso_max_size &&
- left < sk->sk_gso_max_segs)
+ left * tp->mss_cache < sk->sk_gso_max_size)
return 1;
return left <= tcp_max_burst(tp);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0d5a11834d91..3a37f54c6651 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1320,21 +1320,21 @@ static void tcp_cwnd_validate(struct sock *sk)
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
- unsigned int mss_now, unsigned int max_segs)
+ unsigned int mss_now, unsigned int cwnd)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 needed, window, max_len;
+ u32 needed, window, cwnd_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
- max_len = mss_now * max_segs;
+ cwnd_len = mss_now * cwnd;
- if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
- return max_len;
+ if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
+ return cwnd_len;
needed = min(skb->len, window);
- if (max_len <= needed)
- return max_len;
+ if (cwnd_len <= needed)
+ return cwnd_len;
return needed - needed % mss_now;
}
@@ -1562,8 +1562,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
limit = min(send_win, cong_win);
/* If a full-sized TSO skb can be sent, do it. */
- if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
- sk->sk_gso_max_segs * tp->mss_cache))
+ if (limit >= sk->sk_gso_max_size)
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -1792,9 +1791,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
- min_t(unsigned int,
- cwnd_quota,
- sk->sk_gso_max_segs));
+ cwnd_quota);
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))