summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-18 11:24:34 +0200
committerIngo Molnar <mingo@elte.hu>2011-04-19 10:56:54 +0200
commit057f3fadb347e9c51b07e1b277bbdda79f976768 (patch)
tree12f637ad30129245ef51836df1d134257d827dd5 /kernel/sched.c
parent2f36825b176f67e5c5228aa33d828bc39718811f (diff)
sched: Fix sched_domain iterations vs. RCU
Vladis Kletnieks reported a new RCU debug warning in the scheduler. Since commit dce840a08702b ("sched: Dynamically allocate sched_domain/ sched_group data-structures") the sched_domain trees are protected by RCU instead of RCU-sched. This means that we need to include rcu_read_lock() protection when we iterate them since disabling preemption doesn't suffice anymore. Reported-by: Valdis.Kletnieks@vt.edu Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1302882741.2388.241.camel@twins Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 0cfe0310ed5d..27d3e73a2af6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1208,11 +1208,17 @@ int get_nohz_timer_target(void)
int i;
struct sched_domain *sd;
+ rcu_read_lock();
for_each_domain(cpu, sd) {
- for_each_cpu(i, sched_domain_span(sd))
- if (!idle_cpu(i))
- return i;
+ for_each_cpu(i, sched_domain_span(sd)) {
+ if (!idle_cpu(i)) {
+ cpu = i;
+ goto unlock;
+ }
+ }
}
+unlock:
+ rcu_read_unlock();
return cpu;
}
/*
@@ -2415,12 +2421,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
struct sched_domain *sd;
schedstat_inc(p, se.statistics.nr_wakeups_remote);
+ rcu_read_lock();
for_each_domain(this_cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd, ttwu_wake_remote);
break;
}
}
+ rcu_read_unlock();
}
#endif /* CONFIG_SMP */