summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c55
1 files changed, 35 insertions, 20 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0f8caf64caa3..6596b4feeddc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sock *sk = sock->sk;
struct tcp_sock *tp = tcp_sk(sk);
- sock_poll_wait(file, sk->sk_sleep, wait);
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == TCP_LISTEN)
return inet_csk_listen_poll(sk);
@@ -2215,7 +2215,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
default:
/* fallthru */
break;
- };
+ }
if (optlen < sizeof(int))
return -EINVAL;
@@ -2298,7 +2298,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (sock_flag(sk, SOCK_KEEPOPEN) &&
!((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))) {
- __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
+ u32 elapsed = keepalive_time_elapsed(tp);
if (tp->keepalive_time > elapsed)
elapsed = tp->keepalive_time - elapsed;
else
@@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct tcphdr *th2;
unsigned int len;
unsigned int thlen;
- unsigned int flags;
+ __be32 flags;
unsigned int mss = 1;
unsigned int hlen;
unsigned int off;
@@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
found:
flush = NAPI_GRO_CB(p)->flush;
- flush |= flags & TCP_FLAG_CWR;
- flush |= (flags ^ tcp_flag_word(th2)) &
- ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
- flush |= th->ack_seq ^ th2->ack_seq;
+ flush |= (__force int)(flags & TCP_FLAG_CWR);
+ flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
+ ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
+ flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
for (i = sizeof(*th); i < thlen; i += 4)
flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i);
@@ -2795,8 +2795,9 @@ found:
out_check_final:
flush = len < mss;
- flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
- TCP_FLAG_SYN | TCP_FLAG_FIN);
+ flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
+ TCP_FLAG_RST | TCP_FLAG_SYN |
+ TCP_FLAG_FIN));
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
pp = head;
@@ -2839,7 +2840,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
if (p->md5_desc.tfm)
crypto_free_hash(p->md5_desc.tfm);
kfree(p);
- p = NULL;
}
}
free_percpu(pool);
@@ -2937,25 +2937,40 @@ retry:
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
-struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+
+/**
+ * tcp_get_md5sig_pool - get md5sig_pool for this user
+ *
+ * We use percpu structure, so if we succeed, we exit with preemption
+ * and BH disabled, to make sure another thread or softirq handling
+ * wont try to get same context.
+ */
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
struct tcp_md5sig_pool * __percpu *p;
- spin_lock_bh(&tcp_md5sig_pool_lock);
+
+ local_bh_disable();
+
+ spin_lock(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
- spin_unlock_bh(&tcp_md5sig_pool_lock);
- return (p ? *per_cpu_ptr(p, cpu) : NULL);
-}
+ spin_unlock(&tcp_md5sig_pool_lock);
+
+ if (p)
+ return *per_cpu_ptr(p, smp_processor_id());
-EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+ local_bh_enable();
+ return NULL;
+}
+EXPORT_SYMBOL(tcp_get_md5sig_pool);
-void __tcp_put_md5sig_pool(void)
+void tcp_put_md5sig_pool(void)
{
+ local_bh_enable();
tcp_free_md5sig_pool();
}
-
-EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+EXPORT_SYMBOL(tcp_put_md5sig_pool);
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
struct tcphdr *th)