summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2016-02-10 16:14:57 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-03-03 15:07:06 -0800
commit692925fe2d42092a99d3532cb03932c8fda57786 (patch)
tree2d364f3bc9d10a9e6165b56d5b09a25fd655138c /net
parent1bd367857b7f884db302703c624f88adfd8eb171 (diff)
tipc: fix premature addition of node to lookup table
[ Upstream commit d5c91fb72f1652ea3026925240a0998a42ddb16b ] In commit 5266698661401a ("tipc: let broadcast packet reception use new link receive function") we introduced a new per-node broadcast reception link instance. This link is created at the moment the node itself is created. Unfortunately, the allocation is done after the node instance has already been added to the node lookup hash table. This creates a potential race condition, where arriving broadcast packets are able to find and access the node before it has been fully initialized, and before the above mentioned link has been created. The result is occasional crashes in the function tipc_bcast_rcv(), which is trying to access the not-yet existing link. We fix this by deferring the addition of the node instance until after it has been fully initialized in the function tipc_node_create(). Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/tipc/node.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 20cddec0a43c..3926b561f873 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
skb_queue_head_init(&n_ptr->bc_entry.inputq1);
__skb_queue_head_init(&n_ptr->bc_entry.arrvq);
skb_queue_head_init(&n_ptr->bc_entry.inputq2);
- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
- list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
- if (n_ptr->addr < temp_node->addr)
- break;
- }
- list_add_tail_rcu(&n_ptr->list, &temp_node->list);
n_ptr->state = SELF_DOWN_PEER_LEAVING;
n_ptr->signature = INVALID_NODE_SIG;
n_ptr->active_links[0] = INVALID_BEARER_ID;
@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
tipc_node_get(n_ptr);
setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
n_ptr->keepalive_intv = U32_MAX;
+ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n_ptr->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n_ptr->list, &temp_node->list);
exit:
spin_unlock_bh(&tn->node_list_lock);
return n_ptr;