summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2012-03-02 10:36:57 -0500
committerClark Williams <williams@redhat.com>2012-03-02 11:52:34 -0600
commit908b82659598cb1e96abc5f28539a66dfd427643 (patch)
treed5f8298b73468444b1d50cedf6e9cf592ae1c0c9
parentcc605a8b6bdbbf4b9b54ced270126112cac375dc (diff)
cpu: Make hotplug.lock a "sleeping" spinlock on RT
Tasks can block on hotplug.lock in pin_current_cpu(), but their state might be != RUNNING. So the mutex wakeup will set the state unconditionally to RUNNING. That might cause spurious unexpected wakeups. We could provide a state preserving mutex_lock() function, but this is semantically backwards. So instead we convert the hotplug.lock() to a spinlock for RT, which has the state preserving semantics already. Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Cc: Carsten Emde <C.Emde@osadl.org> Cc: John Kacur <jkacur@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Clark Williams <clark.williams@gmail.com> Cc: stable-rt@vger.kernel.org Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/cpu.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fa4083478d75..c25b5ffb63e2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -46,7 +46,12 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* Makes the lock keep the task's state */
+ spinlock_t lock;
+#else
struct mutex lock; /* Synchronizes accesses to refcount, */
+#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -58,6 +63,14 @@ static struct {
.refcount = 0,
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock() spin_lock(&cpu_hotplug.lock)
+# define hotplug_unlock() spin_unlock(&cpu_hotplug.lock)
+#else
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
+# define hotplug_lock() mutex_unlock(&cpu_hotplug.lock)
+#endif
+
struct hotplug_pcp {
struct task_struct *unplug;
int refcount;
@@ -87,8 +100,8 @@ retry:
return;
}
preempt_enable();
- mutex_lock(&cpu_hotplug.lock);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_lock();
+ hotplug_unlock();
preempt_disable();
goto retry;
}
@@ -161,9 +174,9 @@ void get_online_cpus(void)
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
cpu_hotplug.refcount++;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -172,10 +185,10 @@ void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
return;
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
wake_up_process(cpu_hotplug.active_writer);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -207,11 +220,11 @@ static void cpu_hotplug_begin(void)
cpu_hotplug.active_writer = current;
for (;;) {
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
if (likely(!cpu_hotplug.refcount))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
schedule();
}
}
@@ -219,7 +232,7 @@ static void cpu_hotplug_begin(void)
static void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
#else /* #if CONFIG_HOTPLUG_CPU */