summaryrefslogtreecommitdiff
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 5c13f1338972..2051da9c48e2 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -30,6 +30,7 @@
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
#include <linux/gfp.h>
+#include <linux/locallock.h>
#include "internal.h"
@@ -40,6 +41,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
+static DEFINE_LOCAL_IRQ_LOCK(swap_lock);
+
/*
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
@@ -268,11 +272,11 @@ void rotate_reclaimable_page(struct page *page)
unsigned long flags;
page_cache_get(page);
- local_irq_save(flags);
+ local_lock_irqsave(rotate_lock, flags);
pvec = &__get_cpu_var(lru_rotate_pvecs);
if (!pagevec_add(pvec, page))
pagevec_move_tail(pvec);
- local_irq_restore(flags);
+ local_unlock_irqrestore(rotate_lock, flags);
}
}
@@ -328,12 +332,13 @@ static void activate_page_drain(int cpu)
void activate_page(struct page *page)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+ struct pagevec *pvec = &get_locked_var(swap_lock,
+ activate_page_pvecs);
page_cache_get(page);
if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
- put_cpu_var(activate_page_pvecs);
+ put_locked_var(swap_lock, activate_page_pvecs);
}
}
@@ -373,12 +378,12 @@ EXPORT_SYMBOL(mark_page_accessed);
void __lru_cache_add(struct page *page, enum lru_list lru)
{
- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
+ struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru];
page_cache_get(page);
if (!pagevec_add(pvec, page))
__pagevec_lru_add(pvec, lru);
- put_cpu_var(lru_add_pvecs);
+ put_locked_var(swap_lock, lru_add_pvecs);
}
EXPORT_SYMBOL(__lru_cache_add);
@@ -513,9 +518,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
- local_irq_save(flags);
+ local_lock_irqsave(rotate_lock, flags);
pagevec_move_tail(pvec);
- local_irq_restore(flags);
+ local_unlock_irqrestore(rotate_lock, flags);
}
pvec = &per_cpu(lru_deactivate_pvecs, cpu);
@@ -543,18 +548,19 @@ void deactivate_page(struct page *page)
return;
if (likely(get_page_unless_zero(page))) {
- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+ struct pagevec *pvec = &get_locked_var(swap_lock,
+ lru_deactivate_pvecs);
if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
- put_cpu_var(lru_deactivate_pvecs);
+ put_locked_var(swap_lock, lru_deactivate_pvecs);
}
}
void lru_add_drain(void)
{
- lru_add_drain_cpu(get_cpu());
- put_cpu();
+ lru_add_drain_cpu(local_lock_cpu(swap_lock));
+ local_unlock_cpu(swap_lock);
}
static void lru_add_drain_per_cpu(struct work_struct *dummy)
@@ -768,6 +774,9 @@ void __init swap_setup(void)
{
unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
+ local_irq_lock_init(rotate_lock);
+ local_irq_lock_init(swap_lock);
+
#ifdef CONFIG_SWAP
bdi_init(swapper_space.backing_dev_info);
#endif