summaryrefslogtreecommitdiff
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c79
1 files changed, 57 insertions, 22 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2f194e965715..561ba3a7b8cf 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -29,12 +29,12 @@ struct cpu_stop_done {
atomic_t nr_todo; /* nr left to execute */
bool executed; /* actually executed? */
int ret; /* collected return value */
- struct completion completion; /* fired if nr_todo reaches 0 */
+ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
};
/* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper {
- spinlock_t lock;
+ raw_spinlock_t lock;
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
struct task_struct *thread; /* stopper thread */
@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
{
memset(done, 0, sizeof(*done));
atomic_set(&done->nr_todo, nr_todo);
- init_completion(&done->completion);
+ done->waiter = current;
}
/* signal completion unless @done is NULL */
@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
if (done) {
if (executed)
done->executed = true;
- if (atomic_dec_and_test(&done->nr_todo))
- complete(&done->completion);
+ if (atomic_dec_and_test(&done->nr_todo)) {
+ wake_up_process(done->waiter);
+ done->waiter = NULL;
+ }
}
}
@@ -67,7 +69,7 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
{
unsigned long flags;
- spin_lock_irqsave(&stopper->lock, flags);
+ raw_spin_lock_irqsave(&stopper->lock, flags);
if (stopper->enabled) {
list_add_tail(&work->list, &stopper->works);
@@ -75,7 +77,23 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper,
} else
cpu_stop_signal_done(work->done, false);
- spin_unlock_irqrestore(&stopper->lock, flags);
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+}
+
+static void wait_for_stop_done(struct cpu_stop_done *done)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (atomic_read(&done->nr_todo)) {
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ /*
+ * We need to wait until cpu_stop_signal_done() has cleared
+ * done->waiter.
+ */
+ while (done->waiter)
+ cpu_relax();
+ set_current_state(TASK_RUNNING);
}
/**
@@ -109,7 +127,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
cpu_stop_init_done(&done, 1);
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
- wait_for_completion(&done.completion);
+ wait_for_stop_done(&done);
return done.executed ? done.ret : -ENOENT;
}
@@ -135,6 +153,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
/* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex);
+static DEFINE_MUTEX(stopper_lock);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
static void queue_stop_cpus_work(const struct cpumask *cpumask,
@@ -153,15 +172,14 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
}
/*
- * Disable preemption while queueing to avoid getting
- * preempted by a stopper which might wait for other stoppers
- * to enter @fn which can lead to deadlock.
+ * Make sure that all work is queued on all cpus before we
+ * any of the cpus can execute it.
*/
- preempt_disable();
+ mutex_lock(&stopper_lock);
for_each_cpu(cpu, cpumask)
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
&per_cpu(stop_cpus_work, cpu));
- preempt_enable();
+ mutex_unlock(&stopper_lock);
}
static int __stop_cpus(const struct cpumask *cpumask,
@@ -171,7 +189,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
cpu_stop_init_done(&done, cpumask_weight(cpumask));
queue_stop_cpus_work(cpumask, fn, arg, &done);
- wait_for_completion(&done.completion);
+ wait_for_stop_done(&done);
return done.executed ? done.ret : -ENOENT;
}
@@ -259,13 +277,13 @@ repeat:
}
work = NULL;
- spin_lock_irq(&stopper->lock);
+ raw_spin_lock_irq(&stopper->lock);
if (!list_empty(&stopper->works)) {
work = list_first_entry(&stopper->works,
struct cpu_stop_work, list);
list_del_init(&work->list);
}
- spin_unlock_irq(&stopper->lock);
+ raw_spin_unlock_irq(&stopper->lock);
if (work) {
cpu_stop_fn_t fn = work->fn;
@@ -275,6 +293,16 @@ repeat:
__set_current_state(TASK_RUNNING);
+ /*
+ * Wait until the stopper finished scheduling on all
+ * cpus
+ */
+ mutex_lock(&stopper_lock);
+ /*
+ * Let other cpu threads continue as well
+ */
+ mutex_unlock(&stopper_lock);
+
/* cpu stop callbacks are not allowed to sleep */
preempt_disable();
@@ -289,7 +317,13 @@ repeat:
kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
ksym_buf), arg);
+ /*
+ * Make sure that the wakeup and setting done->waiter
+ * to NULL is atomic.
+ */
+ local_irq_disable();
cpu_stop_signal_done(done, true);
+ local_irq_enable();
} else
schedule();
@@ -317,6 +351,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
if (IS_ERR(p))
return notifier_from_errno(PTR_ERR(p));
get_task_struct(p);
+ p->flags |= PF_STOMPER;
kthread_bind(p, cpu);
sched_set_stop_task(cpu, p);
stopper->thread = p;
@@ -326,9 +361,9 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
/* strictly unnecessary, as first user will wake it */
wake_up_process(stopper->thread);
/* mark enabled */
- spin_lock_irq(&stopper->lock);
+ raw_spin_lock_irq(&stopper->lock);
stopper->enabled = true;
- spin_unlock_irq(&stopper->lock);
+ raw_spin_unlock_irq(&stopper->lock);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -341,11 +376,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
/* kill the stopper */
kthread_stop(stopper->thread);
/* drain remaining works */
- spin_lock_irq(&stopper->lock);
+ raw_spin_lock_irq(&stopper->lock);
list_for_each_entry(work, &stopper->works, list)
cpu_stop_signal_done(work->done, false);
stopper->enabled = false;
- spin_unlock_irq(&stopper->lock);
+ raw_spin_unlock_irq(&stopper->lock);
/* release the stopper */
put_task_struct(stopper->thread);
stopper->thread = NULL;
@@ -376,7 +411,7 @@ static int __init cpu_stop_init(void)
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
- spin_lock_init(&stopper->lock);
+ raw_spin_lock_init(&stopper->lock);
INIT_LIST_HEAD(&stopper->works);
}
@@ -570,7 +605,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
ret = stop_machine_cpu_stop(&smdata);
/* Busy wait for completion. */
- while (!completion_done(&done.completion))
+ while (atomic_read(&done.nr_todo))
cpu_relax();
mutex_unlock(&stop_cpus_mutex);