summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-16 13:26:08 +0200
committerClark Williams <williams@redhat.com>2012-02-15 10:32:49 -0600
commit3da7d6bb0414b1eeec63cfb6f8721edf70e3ab96 (patch)
treefe0ca74cdccfd1a1d7b805c3cbcb1812e730e157 /kernel
parent103215078a333a3d51bf46d1396f62f5d3affbce (diff)
sched-migrate-disable.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c88
1 files changed, 83 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a5a4ddc7ff7d..8ceb0af4d11e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6224,11 +6224,12 @@ static inline void sched_init_granularity(void)
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
- if (p->sched_class && p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
-
+ if (!p->migrate_disable) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+ }
cpumask_copy(&p->cpus_allowed, new_mask);
- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
}
/*
@@ -6279,7 +6280,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -6298,6 +6299,83 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ preempt_disable();
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+ preempt_enable();
+ return;
+ }
+
+ pin_current_cpu();
+ if (unlikely(!scheduler_running)) {
+ p->migrate_disable = 1;
+ preempt_enable();
+ return;
+ }
+ rq = task_rq_lock(p, &flags);
+ p->migrate_disable = 1;
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+ }
+ task_rq_unlock(rq, p, &flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ preempt_disable();
+ if (p->migrate_disable > 1) {
+ p->migrate_disable--;
+ preempt_enable();
+ return;
+ }
+
+ if (unlikely(!scheduler_running)) {
+ p->migrate_disable = 0;
+ unpin_current_cpu();
+ preempt_enable();
+ return;
+ }
+
+ rq = task_rq_lock(p, &flags);
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
+ }
+
+ task_rq_unlock(rq, p, &flags);
+ unpin_current_cpu();
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_enable);
+
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()