summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-06 12:20:33 +0200
committerClark Williams <williams@redhat.com>2011-12-28 16:25:36 -0600
commited0a5b1f42e162b61cdaf01c825219023c0da406 (patch)
tree4c1c0a95db4d04c1135e8983d6abba392a0191b2
parent03a3ef2a7ae0dc733a4a8273c6cdb1f72de62da5 (diff)
sched-mmdrop-delayed.patch
Needs thread context (pgd_lock) -> ifdeffed. workqueues wont work with RT Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/sched.h12
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/sched.c21
4 files changed, 49 insertions, 3 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 1ec126f23eaa..c303a27981fe 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-debug-flags.h>
+#include <linux/rcupdate.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -393,6 +394,9 @@ struct mm_struct {
#ifdef CONFIG_CPUMASK_OFFSTACK
struct cpumask cpumask_allocation;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head delayed_drop;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bf637e27d129..866ddae3a98d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2264,12 +2264,24 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
+
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __mmdrop_delayed(struct rcu_head *rhp);
+static inline void mmdrop_delayed(struct mm_struct *mm)
+{
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+# define mmdrop_delayed(mm) mmdrop(mm)
+#endif
+
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
diff --git a/kernel/fork.c b/kernel/fork.c
index 6679093b31e2..56188b0c9d21 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(__put_task_struct);
#else
void __put_task_struct_cb(struct rcu_head *rhp)
{
- struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
__put_task_struct(tsk);
@@ -552,6 +552,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
+#ifdef CONFIG_PREEMPT_RT_BASE
+/*
+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
+ * want another facility to make this work.
+ */
+void __mmdrop_delayed(struct rcu_head *rhp)
+{
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
+ __mmdrop(mm);
+}
+#endif
+
/*
* Decrement the use count and release all resources for an mm.
*/
diff --git a/kernel/sched.c b/kernel/sched.c
index 6e2bf9e29ca5..3ff2d0f1fcfe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3174,8 +3174,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
+ /*
+ * We use mmdrop_delayed() here so we don't have to do the
+ * full __mmdrop() when we are the last user.
+ */
if (mm)
- mmdrop(mm);
+ mmdrop_delayed(mm);
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
@@ -6303,6 +6307,8 @@ static int migration_cpu_stop(void *data)
#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
+
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
@@ -6315,7 +6321,12 @@ void idle_task_exit(void)
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
- mmdrop(mm);
+
+ /*
+ * Defer the cleanup to an alive cpu. On RT we can neither
+ * call mmdrop() nor mmdrop_delayed() from here.
+ */
+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
}
/*
@@ -6660,6 +6671,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_nr_uninterruptible(rq);
calc_global_load_remove(rq);
break;
+ case CPU_DEAD:
+ if (per_cpu(idle_last_mm, cpu)) {
+ mmdrop(per_cpu(idle_last_mm, cpu));
+ per_cpu(idle_last_mm, cpu) = NULL;
+ }
+ break;
#endif
}