summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2008-09-15 11:48:46 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2008-10-08 19:44:45 -0700
commit8c4d2d9f936a353f3336fd521341826a70e0a9fd (patch)
treea24f1ced96f022a837884181db0d0ef58bc7dea5
parentbb5168ca75d488ee10e4653ddbf4cd51a57707f3 (diff)
udp: Fix rcv socket locking
[ Upstream commits d97106ea52aa57e63ff40d04479016836bbb5a4e and 93821778def10ec1e69aa3ac10adee975dad4ff3 ] The previous patch in response to the recursive locking on IPsec reception is broken as it tries to drop the BH socket lock while in user context. This patch fixes it by shrinking the section protected by the socket lock to sock_queue_rcv_skb only. The only reason we added the lock is for the accounting which happens in that function. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--net/ipv4/udp.c57
-rw-r--r--net/ipv6/udp.c6
2 files changed, 36 insertions, 27 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9703c870f1df..b924502b5e1e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -956,6 +956,27 @@ int udp_disconnect(struct sock *sk, int flags)
return 0;
}
+static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int is_udplite = IS_UDPLITE(sk);
+ int rc;
+
+ if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
+ /* Note that an ENOMEM error is charged twice */
+ if (rc == -ENOMEM)
+ UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS,
+ is_udplite);
+ goto drop;
+ }
+
+ return 0;
+
+drop:
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
+ kfree_skb(skb);
+ return -1;
+}
+
/* returns:
* -1: error
* 0: success
@@ -1046,14 +1067,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
goto drop;
}
- if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
- /* Note that an ENOMEM error is charged twice */
- if (rc == -ENOMEM)
- UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
- goto drop;
- }
+ rc = 0;
- return 0;
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk))
+ rc = __udp_queue_rcv_skb(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
+
+ return rc;
drop:
UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
@@ -1091,15 +1114,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
skb1 = skb_clone(skb, GFP_ATOMIC);
if (skb1) {
- int ret = 0;
-
- bh_lock_sock_nested(sk);
- if (!sock_owned_by_user(sk))
- ret = udp_queue_rcv_skb(sk, skb1);
- else
- sk_add_backlog(sk, skb1);
- bh_unlock_sock(sk);
-
+ int ret = udp_queue_rcv_skb(sk, skb1);
if (ret > 0)
/* we should probably re-process instead
* of dropping packets here. */
@@ -1192,13 +1207,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
uh->dest, inet_iif(skb), udptable);
if (sk != NULL) {
- int ret = 0;
- bh_lock_sock_nested(sk);
- if (!sock_owned_by_user(sk))
- ret = udp_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
- bh_unlock_sock(sk);
+ int ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
@@ -1493,7 +1502,7 @@ struct proto udp_prot = {
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
- .backlog_rcv = udp_queue_rcv_skb,
+ .backlog_rcv = __udp_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v4_get_port,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 53739de829db..4e36c57dc242 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -373,7 +373,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
uh->source, saddr, dif))) {
struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
if (buff) {
- bh_lock_sock_nested(sk2);
+ bh_lock_sock(sk2);
if (!sock_owned_by_user(sk2))
udpv6_queue_rcv_skb(sk2, buff);
else
@@ -381,7 +381,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
bh_unlock_sock(sk2);
}
}
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else
@@ -499,7 +499,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
/* deliver */
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else