summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2009-07-03 08:44:54 -0500
committerClark Williams <williams@redhat.com>2012-02-15 10:32:43 -0600
commitd3a2559cc7b47c284756097b0068390c5af34e77 (patch)
tree7806abd4d45a4c5921448198a23d14e3a6e172bd /mm
parent29b152b9f3e7d4ac70925d371969c9a80541e3b8 (diff)
mm: shrink the page frame to !-rt size
He below is a boot-tested hack to shrink the page frame size back to normal. Should be a net win since there should be many less PTE-pages than page-frames. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e09ca9b19492..0d248c51a411 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4008,3 +4008,35 @@ void copy_user_huge_page(struct page *dst, struct page *src,
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
+#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0)
+/*
+ * Heinous hack, relies on the caller doing something like:
+ *
+ * pte = alloc_pages(PGALLOC_GFP, 0);
+ * if (pte)
+ * pgtable_page_ctor(pte);
+ * return pte;
+ *
+ * This ensures we release the page and return NULL when the
+ * lock allocation fails.
+ */
+struct page *pte_lock_init(struct page *page)
+{
+ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ if (page->ptl) {
+ spin_lock_init(__pte_lockptr(page));
+ } else {
+ __free_page(page);
+ page = NULL;
+ }
+ return page;
+}
+
+void pte_lock_deinit(struct page *page)
+{
+ kfree(page->ptl);
+ page->mapping = NULL;
+}
+
+#endif