summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-05 17:23:50 +0200
committerIngo Molnar <mingo@elte.hu>2011-04-14 08:52:37 +0200
commit2acca55ed98ad9b9aa25e7e587ebe306c0313dc7 (patch)
treeb06fcc57dc284ccf8dcbda90370fd08250887421 /kernel/sched.c
parenta8e4f2eaecc9bfa4954adf79a04f4f22fddd829c (diff)
sched: Also serialize ttwu_local() with p->pi_lock
Since we now serialize ttwu() using p->pi_lock, we also need to serialize ttwu_local() using that, otherwise, once we drop the rq->lock from ttwu() it can race with ttwu_local(). Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152729.192366907@chello.nl
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fd32b78c123c..6b269b79c52c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2566,9 +2566,9 @@ out:
* try_to_wake_up_local - try to wake up a local task with rq lock held
* @p: the thread to be awakened
*
- * Put @p on the run-queue if it's not already there. The caller must
+ * Put @p on the run-queue if it's not already there. The caller must
* ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task. this_rq() stays locked over invocation.
+ * the current task.
*/
static void try_to_wake_up_local(struct task_struct *p)
{
@@ -2578,14 +2578,22 @@ static void try_to_wake_up_local(struct task_struct *p)
BUG_ON(p == current);
lockdep_assert_held(&rq->lock);
+ if (!raw_spin_trylock(&p->pi_lock)) {
+ raw_spin_unlock(&rq->lock);
+ raw_spin_lock(&p->pi_lock);
+ raw_spin_lock(&rq->lock);
+ }
+
if (!(p->state & TASK_NORMAL))
- return;
+ goto out;
if (!p->on_rq)
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_post_activation(p, rq, 0);
ttwu_stat(rq, p, smp_processor_id(), 0);
+out:
+ raw_spin_unlock(&p->pi_lock);
}
/**
@@ -4114,11 +4122,13 @@ need_resched:
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
+ deactivate_task(rq, prev, DEQUEUE_SLEEP);
+ prev->on_rq = 0;
+
/*
- * If a worker is going to sleep, notify and
- * ask workqueue whether it wants to wake up a
- * task to maintain concurrency. If so, wake
- * up the task.
+ * If a worker went to sleep, notify and ask workqueue
+ * whether it wants to wake up a task to maintain
+ * concurrency.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
@@ -4128,12 +4138,9 @@ need_resched:
try_to_wake_up_local(to_wakeup);
}
- deactivate_task(rq, prev, DEQUEUE_SLEEP);
- prev->on_rq = 0;
-
/*
- * If we are going to sleep and we have plugged IO queued, make
- * sure to submit it to avoid deadlocks.
+ * If we are going to sleep and we have plugged IO
+ * queued, make sure to submit it to avoid deadlocks.
*/
if (blk_needs_flush_plug(prev)) {
raw_spin_unlock(&rq->lock);