summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/net/sock.h12
-rw-r--r--net/ipv4/tcp.c3
2 files changed, 9 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 312cb25cbd18..e51e626e9af1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
sk_stream_mem_schedule(sk, skb->truesize, 1);
}
+static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
+{
+ return size <= sk->sk_forward_alloc ||
+ sk_stream_mem_schedule(sk, size, 0);
+}
+
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
@@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
skb = alloc_skb_fclone(size + hdr_len, gfp);
if (skb) {
skb->truesize += mem;
- if (sk->sk_forward_alloc >= (int)skb->truesize ||
- sk_stream_mem_schedule(sk, skb->truesize, 0)) {
+ if (sk_stream_wmem_schedule(sk, skb->truesize)) {
skb_reserve(skb, hdr_len);
return skb;
}
@@ -1227,8 +1232,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
{
struct page *page = NULL;
- if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
- sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
+ if (sk_stream_wmem_schedule(sk, PAGE_SIZE))
page = alloc_pages(sk->sk_allocation, 0);
else {
sk->sk_prot->enter_memory_pressure();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 02fdda68718d..854f6d0c4bb3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -552,8 +552,7 @@ new_segment:
tcp_mark_push(tp, skb);
goto new_segment;
}
- if (sk->sk_forward_alloc < copy &&
- !sk_stream_mem_schedule(sk, copy, 0))
+ if (!sk_stream_wmem_schedule(sk, copy))
goto wait_for_memory;
if (can_coalesce) {