summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-07-03 08:29:37 -0500
committerClark Williams <williams@redhat.com>2012-03-13 12:39:57 -0500
commitcb80b8556517cf91756626986a91efe0eae20f98 (patch)
treedc041db9d6fd36137e0728e3c0c2e073dae017d7 /mm/page_alloc.c
parent1f7c0907a73d98639dec5af65c527db9610786bd (diff)
mm: page_alloc: rt-friendly per-cpu pages
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. Contains fixes from: Peter Zijlstra <a.p.zijlstra@chello.nl> Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c55
1 files changed, 39 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3344154a8a36..27865c9d3b48 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/locallock.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -222,6 +223,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define cpu_lock_irqsave(cpu, flags) \
+ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
+# define cpu_unlock_irqrestore(cpu, flags) \
+ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
+#else
+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
+#endif
+
int page_group_by_mobility_disabled __read_mostly;
static void set_pageblock_migratetype(struct page *page, int migratetype)
@@ -683,13 +696,13 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order))
return;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order,
get_pageblock_migratetype(page));
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
/*
@@ -1067,14 +1080,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
unsigned long flags;
int to_drain;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
if (pcp->count >= pcp->batch)
to_drain = pcp->batch;
else
to_drain = pcp->count;
free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
#endif
@@ -1094,7 +1107,7 @@ static void drain_pages(unsigned int cpu)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- local_irq_save(flags);
+ cpu_lock_irqsave(cpu, flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
@@ -1102,7 +1115,7 @@ static void drain_pages(unsigned int cpu)
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
- local_irq_restore(flags);
+ cpu_unlock_irqrestore(cpu, flags);
}
}
@@ -1119,7 +1132,14 @@ void drain_local_pages(void *arg)
*/
void drain_all_pages(void)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
on_each_cpu(drain_local_pages, NULL, 1);
+#else
+ int i;
+
+ for_each_online_cpu(i)
+ drain_pages(i);
+#endif
}
#ifdef CONFIG_HIBERNATION
@@ -1175,7 +1195,7 @@ void free_hot_cold_page(struct page *page, int cold)
migratetype = get_pageblock_migratetype(page);
set_page_private(page, migratetype);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE);
@@ -1207,7 +1227,7 @@ void free_hot_cold_page(struct page *page, int cold)
}
out:
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
/*
@@ -1302,7 +1322,7 @@ again:
struct per_cpu_pages *pcp;
struct list_head *list;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
@@ -1334,17 +1354,19 @@ again:
*/
WARN_ON_ONCE(order > 1);
}
- spin_lock_irqsave(&zone->lock, flags);
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
- spin_unlock(&zone->lock);
- if (!page)
+ if (!page) {
+ spin_unlock(&zone->lock);
goto failed;
+ }
__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
+ spin_unlock(&zone->lock);
}
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
@@ -1352,7 +1374,7 @@ again:
return page;
failed:
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
return NULL;
}
@@ -3684,10 +3706,10 @@ static int __zone_pcp_update(void *data)
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- local_irq_save(flags);
+ cpu_lock_irqsave(cpu, flags);
free_pcppages_bulk(zone, pcp->count, pcp);
setup_pageset(pset, batch);
- local_irq_restore(flags);
+ cpu_unlock_irqrestore(cpu, flags);
}
return 0;
}
@@ -5053,6 +5075,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
+ local_irq_lock_init(pa_lock);
}
/*