summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/suspend.h1
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/page_alloc.c79
5 files changed, 48 insertions, 44 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7e93a9ae7064..0c6ce515185d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -228,5 +228,7 @@ extern void FASTCALL(free_cold_page(struct page *page));
void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
+void drain_all_pages(void);
+void drain_local_pages(void *dummy);
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 646ce2d068d4..1d7d4c5797ee 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -130,7 +130,6 @@ struct pbe {
};
/* mm/page_alloc.c */
-extern void drain_local_pages(void);
extern void mark_free_pages(struct zone *zone);
/**
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f6a5df934f8d..95250d7c8d91 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1203,7 +1203,7 @@ asmlinkage int swsusp_save(void)
printk(KERN_INFO "PM: Creating hibernation image: \n");
- drain_local_pages();
+ drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
@@ -1221,7 +1221,7 @@ asmlinkage int swsusp_save(void)
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
- drain_local_pages();
+ drain_local_pages(NULL);
copy_data_pages(&copy_bm, &orig_bm);
/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9512a544d044..7469c503580d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -481,8 +481,6 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
return offlined;
}
-extern void drain_all_local_pages(void);
-
int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
@@ -540,7 +538,7 @@ repeat:
lru_add_drain_all();
flush_scheduled_work();
cond_resched();
- drain_all_local_pages();
+ drain_all_pages();
}
pfn = scan_lru_pages(start_pfn, end_pfn);
@@ -563,7 +561,7 @@ repeat:
flush_scheduled_work();
yield();
/* drain pcp pages , this is synchrouns. */
- drain_all_local_pages();
+ drain_all_pages();
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b2838c24e582..5c7de8e959fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -890,7 +890,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
}
#endif
-static void __drain_pages(unsigned int cpu)
+/*
+ * Drain pages of the indicated processor.
+ *
+ * The processor must either be the current processor and the
+ * thread pinned to the current processor or a processor that
+ * is not online.
+ */
+static void drain_pages(unsigned int cpu)
{
unsigned long flags;
struct zone *zone;
@@ -915,6 +922,22 @@ static void __drain_pages(unsigned int cpu)
}
}
+/*
+ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ */
+void drain_local_pages(void *arg)
+{
+ drain_pages(smp_processor_id());
+}
+
+/*
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator
+ */
+void drain_all_pages(void)
+{
+ on_each_cpu(drain_local_pages, NULL, 0, 1);
+}
+
#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
@@ -952,37 +975,6 @@ void mark_free_pages(struct zone *zone)
#endif /* CONFIG_PM */
/*
- * Spill all of this CPU's per-cpu pages back into the buddy allocator.
- */
-void drain_local_pages(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __drain_pages(smp_processor_id());
- local_irq_restore(flags);
-}
-
-void smp_drain_local_pages(void *arg)
-{
- drain_local_pages();
-}
-
-/*
- * Spill all the per-cpu pages from all CPUs back into the buddy allocator
- */
-void drain_all_local_pages(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __drain_pages(smp_processor_id());
- local_irq_restore(flags);
-
- smp_call_function(smp_drain_local_pages, NULL, 0, 1);
-}
-
-/*
* Free a 0-order page
*/
static void fastcall free_hot_cold_page(struct page *page, int cold)
@@ -1569,7 +1561,7 @@ nofail_alloc:
cond_resched();
if (order != 0)
- drain_all_local_pages();
+ drain_all_pages();
if (likely(did_some_progress)) {
page = get_page_from_freelist(gfp_mask, order,
@@ -3978,10 +3970,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- local_irq_disable();
- __drain_pages(cpu);
+ drain_pages(cpu);
+
+ /*
+ * Spill the event counters of the dead processor
+ * into the current processors event counters.
+ * This artificially elevates the count of the current
+ * processor.
+ */
vm_events_fold_cpu(cpu);
- local_irq_enable();
+
+ /*
+ * Zero the differential counters of the dead processor
+ * so that the vm statistics are consistent.
+ *
+ * This is only okay since the processor is dead and cannot
+ * race with what we are doing.
+ */
refresh_cpu_vm_stats(cpu);
}
return NOTIFY_OK;
@@ -4480,7 +4485,7 @@ int set_migratetype_isolate(struct page *page)
out:
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
- drain_all_local_pages();
+ drain_all_pages();
return ret;
}