summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel J Blueman <daniel.blueman@gmail.com>2010-06-01 14:06:13 +0100
committerGreg Kroah-Hartman <gregkh@suse.de>2010-09-20 13:18:10 -0700
commit0b88f2ba7caa2d0ff6e3521481a82be084ecdc7b (patch)
treefe6f9294720b377cd708955e66c56fc5fe35f394
parent58e9934fe305f5c2caeb252cfc478c9111841d6f (diff)
sched: apply RCU protection to wake_affine()
commit f3b577dec1f2ce32d2db6d2ca6badff7002512af upstream The task_group() function returns a pointer that must be protected by either RCU, the ->alloc_lock, or the cgroup lock (see the rcu_dereference_check() in task_subsys_state(), which is invoked by task_group()). The wake_affine() function currently does none of these, which means that a concurrent update would be within its rights to free the structure returned by task_group(). Because wake_affine() uses this structure only to compute load-balancing heuristics, there is no reason to acquire either of the two locks. Therefore, this commit introduces an RCU read-side critical section that starts before the first call to task_group() and ends after the last use of the "tg" pointer returned from task_group(). Thanks to Li Zefan for pointing out the need to extend the RCU read-side critical section from that proposed by the original patch. Signed-off-by: Daniel J Blueman <daniel.blueman@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--kernel/sched_fair.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index d2c46e761fcd..15a3182baeff 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1250,6 +1250,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* effect of the currently running task from the load
* of the current CPU:
*/
+ rcu_read_lock();
if (sync) {
tg = task_group(current);
weight = current->se.load.weight;
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
balanced = !this_load ||
100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
+ rcu_read_unlock();
/*
* If the currently running task will sleep within