summaryrefslogtreecommitdiff
path: root/net/ipv4/igmp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-02-17 03:19:24 -0800
committerDavid S. Miller <davem@davemloft.net>2015-02-20 15:24:04 -0500
commit959d10f6bbf6ab5b8813c4e37540a2e43ca2ae96 (patch)
treee6175dbec05d9cbc0cdcb4c7c6fa10030d25877f /net/ipv4/igmp.c
parent64bea46e3ff28701aa34be48b93c7907ebbdb31e (diff)
igmp: add __ip_mc_{join|leave}_group()
There is a need to perform igmp join/leave operations while RTNL is held. Make ip_mc_{join|leave}_group() wrappers around __ip_mc_{join|leave}_group() to avoid the proliferation of work queues. For example, vxlan_igmp_join() could possibly be removed. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/igmp.c')
-rw-r--r--net/ipv4/igmp.c52
1 files changed, 34 insertions, 18 deletions
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 666cf364df86..4b1172d73e03 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1849,30 +1849,25 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
pmc->sfcount[MCAST_EXCLUDE] = 1;
}
-
-/*
- * Join a multicast group
- */
-int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
+int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
{
- int err;
__be32 addr = imr->imr_multiaddr.s_addr;
- struct ip_mc_socklist *iml = NULL, *i;
+ struct ip_mc_socklist *iml, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
int ifindex;
int count = 0;
+ int err;
+
+ ASSERT_RTNL();
if (!ipv4_is_multicast(addr))
return -EINVAL;
- rtnl_lock();
-
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
- iml = NULL;
err = -ENODEV;
goto done;
}
@@ -1900,9 +1895,22 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
- rtnl_unlock();
return err;
}
+EXPORT_SYMBOL(__ip_mc_join_group);
+
+/* Join a multicast group
+ */
+int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+{
+ int ret;
+
+ rtnl_lock();
+ ret = __ip_mc_join_group(sk, imr);
+ rtnl_unlock();
+
+ return ret;
+}
EXPORT_SYMBOL(ip_mc_join_group);
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
@@ -1925,11 +1933,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
return err;
}
-/*
- * Ask a socket to leave a group.
- */
-
-int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+int __ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
@@ -1940,7 +1944,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
u32 ifindex;
int ret = -EADDRNOTAVAIL;
- rtnl_lock();
+ ASSERT_RTNL();
+
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
ret = -ENODEV;
@@ -1964,14 +1969,25 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
*imlp = iml->next_rcu;
ip_mc_dec_group(in_dev, group);
- rtnl_unlock();
+
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
out:
+ return ret;
+}
+EXPORT_SYMBOL(__ip_mc_leave_group);
+
+int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
+{
+ int ret;
+
+ rtnl_lock();
+ ret = __ip_mc_leave_group(sk, imr);
rtnl_unlock();
+
return ret;
}
EXPORT_SYMBOL(ip_mc_leave_group);