summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2006-03-23 02:59:20 -0800
committerGreg Kroah-Hartman <gregkh@suse.de>2006-03-27 22:47:31 -0800
commit4ceb2fc75f15f1a8d4d791e4398b2e06f4f34f47 (patch)
tree81b5f1a2cf582325903b01c565959b12eee03cf3 /kernel
parent375dcda41ce22c756ae9535c133875495c859be3 (diff)
[PATCH] fix scheduler deadlock
We have noticed lockups during boot when stress testing kexec on ppc64. Two cpus would deadlock in scheduler code trying to grab already taken spinlocks. The double_rq_lock code uses the address of the runqueue to order the taking of multiple locks. This address is a per cpu variable: if (rq1 < rq2) { spin_lock(&rq1->lock); spin_lock(&rq2->lock); } else { spin_lock(&rq2->lock); spin_lock(&rq1->lock); } On the other hand, the code in wake_sleeping_dependent uses the cpu id order to grab locks: for_each_cpu_mask(i, sibling_map) spin_lock(&cpu_rq(i)->lock); This means we rely on the address of per cpu data increasing as cpu ids increase. While this will be true for the generic percpu implementation it may not be true for arch specific implementations. One way to solve this is to always take runqueues in cpu id order. To do this we add a cpu variable to the runqueue and check it in the double runqueue locking functions. Signed-off-by: Anton Blanchard <anton@samba.org> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4d46e90f59c3..4e7efac7b1ec 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -237,6 +237,7 @@ struct runqueue {
task_t *migration_thread;
struct list_head migration_queue;
+ int cpu;
#endif
#ifdef CONFIG_SCHEDSTATS
@@ -1660,6 +1661,9 @@ unsigned long nr_iowait(void)
/*
* double_rq_lock - safely lock two runqueues
*
+ * We must take them in cpu order to match code in
+ * dependent_sleeper and wake_dependent_sleeper.
+ *
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
@@ -1671,7 +1675,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
- if (rq1 < rq2) {
+ if (rq1->cpu < rq2->cpu) {
spin_lock(&rq1->lock);
spin_lock(&rq2->lock);
} else {
@@ -1707,7 +1711,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
__acquires(this_rq->lock)
{
if (unlikely(!spin_trylock(&busiest->lock))) {
- if (busiest < this_rq) {
+ if (busiest->cpu < this_rq->cpu) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
@@ -6035,6 +6039,7 @@ void __init sched_init(void)
rq->push_cpu = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
+ rq->cpu = i;
#endif
atomic_set(&rq->nr_iowait, 0);