From f84e71a94cb5f88d86ab50c251e09379925b80b9 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Tue, 6 May 2008 18:46:36 +0400 Subject: Fix GFP_KERNEL allocation under read lock. The mesh_path_add() read-locks the pathtbl_resize_lock and calls kmalloc with GFP_KERNEL mask. Fix it and move the endadd2 label lower. It should be _before_ the if() beyond, but it makes no sense for it being there, so I move it right after this if(). Signed-off-by: Pavel Emelyanov Signed-off-by: John W. Linville --- net/mac80211/mesh_pathtbl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net/mac80211/mesh_pathtbl.c') diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 5845dc21ce85..727aa528c831 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -158,14 +158,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev) if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) return -ENOSPC; - read_lock(&pathtbl_resize_lock); - new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); if (!new_mpath) { atomic_dec(&sdata->u.sta.mpaths); err = -ENOMEM; goto endadd2; } + + read_lock(&pathtbl_resize_lock); memcpy(new_mpath->dst, dst, ETH_ALEN); new_mpath->dev = dev; new_mpath->flags = 0; @@ -202,7 +202,6 @@ int mesh_path_add(u8 *dst, struct net_device *dev) endadd: spin_unlock(&mesh_paths->hashwlock[hash_idx]); -endadd2: read_unlock(&pathtbl_resize_lock); if (!err && grow) { struct mesh_table *oldtbl, *newtbl; @@ -219,6 +218,7 @@ endadd2: mesh_table_free(oldtbl, false); write_unlock(&pathtbl_resize_lock); } +endadd2: return err; } -- cgit v1.2.3 From 0eb03d5a14377eecf6ed0ebf3cc2c9f48c12c7c6 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Tue, 6 May 2008 18:49:02 +0400 Subject: Fix not checked kmalloc() result. The new_node kmallocation is not checked for success, so add this check. BTW, it also happens under the read_lock. Signed-off-by: Pavel Emelyanov Signed-off-by: John W. Linville --- net/mac80211/mesh_pathtbl.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'net/mac80211/mesh_pathtbl.c') diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 727aa528c831..1d2d051e5976 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -164,13 +164,19 @@ int mesh_path_add(u8 *dst, struct net_device *dev) err = -ENOMEM; goto endadd2; } + new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); + if (!new_node) { + kfree(new_mpath); + atomic_dec(&sdata->u.sta.mpaths); + err = -ENOMEM; + goto endadd2; + } read_lock(&pathtbl_resize_lock); memcpy(new_mpath->dst, dst, ETH_ALEN); new_mpath->dev = dev; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); - new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); new_node->mpath = new_mpath; new_mpath->timer.data = (unsigned long) new_mpath; new_mpath->timer.function = mesh_path_timer; -- cgit v1.2.3 From 6d6936e2ea82ebcbdd12d489b7b5ccf430de52f1 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Tue, 6 May 2008 18:51:31 +0400 Subject: Fix potential scheduling while atomic in mesh_path_add. Calling synchronize_rcu() under write-lock-ed pathtbl_resize_lock may result in this warning (and other side effects). It looks safe just dropping this lock before calling synchronize_rcu. Signed-off-by: Pavel Emelyanov Signed-off-by: John W. Linville --- net/mac80211/mesh_pathtbl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net/mac80211/mesh_pathtbl.c') diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 1d2d051e5976..99c2d360888e 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -220,9 +220,10 @@ endadd: return -ENOMEM; } rcu_assign_pointer(mesh_paths, newtbl); + write_unlock(&pathtbl_resize_lock); + synchronize_rcu(); mesh_table_free(oldtbl, false); - write_unlock(&pathtbl_resize_lock); } endadd2: return err; -- cgit v1.2.3