summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorVarun Wadekar <vwadekar@nvidia.com>2012-01-24 16:52:10 +0530
committerVarun Wadekar <vwadekar@nvidia.com>2012-01-24 17:05:20 +0530
commit058cf848b62154a6ac13e276899fbb6650df0b74 (patch)
tree980f3caa279997fe141738ed15b4211036f3c7ab /mm
parentac64f7716cc7afbb57a3c7c70aa9214e2279624e (diff)
parent9bb1282f6a7754955c18be912fbc2b55d133f1b9 (diff)
Merge branch 'linux-3.1.y' into android-tegra-nv-3.1
Linux 3.1.10 Change-Id: I465d184c492e8041dd0cd90f2cb70fde17ba7118 Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/memcontrol.c44
-rw-r--r--mm/slub.c5
3 files changed, 51 insertions, 16 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index b91f3aa627fc..0eedbf85062d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
- struct mem_cgroup *memcg = NULL;
VM_BUG_ON(!PageLocked(old));
VM_BUG_ON(!PageLocked(new));
VM_BUG_ON(new->mapping);
- /*
- * This is not page migration, but prepare_migration and
- * end_migration does enough work for charge replacement.
- *
- * In the longer term we probably want a specialized function
- * for moving the charge from old to new in a more efficient
- * manner.
- */
- error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
- if (error)
- return error;
-
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (!error) {
struct address_space *mapping = old->mapping;
@@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
+ /* mem_cgroup codes must not be called under tree_lock */
+ mem_cgroup_replace_page_cache(old, new);
radix_tree_preload_end();
if (freepage)
freepage(old);
page_cache_release(old);
- mem_cgroup_end_migration(memcg, old, new, true);
- } else {
- mem_cgroup_end_migration(memcg, old, new, false);
}
return error;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index afde618f9895..dd81ddc64b4d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3355,6 +3355,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
cgroup_release_and_wakeup_rmdir(&mem->css);
}
+/*
+ * At replace page cache, newpage is not under any memcg but it's on
+ * LRU. So, this function doesn't touch res_counter but handles LRU
+ * in correct way. Both pages are locked so we cannot race with uncharge.
+ */
+void mem_cgroup_replace_page_cache(struct page *oldpage,
+ struct page *newpage)
+{
+ struct mem_cgroup *memcg;
+ struct page_cgroup *pc;
+ struct zone *zone;
+ enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
+ unsigned long flags;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ pc = lookup_page_cgroup(oldpage);
+ /* fix accounting on old pages */
+ lock_page_cgroup(pc);
+ memcg = pc->mem_cgroup;
+ mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
+ ClearPageCgroupUsed(pc);
+ unlock_page_cgroup(pc);
+
+ if (PageSwapBacked(oldpage))
+ type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+
+ zone = page_zone(newpage);
+ pc = lookup_page_cgroup(newpage);
+ /*
+ * Even if newpage->mapping was NULL before starting replacement,
+ * the newpage may be on LRU(or pagevec for LRU) already. We lock
+ * LRU while we overwrite pc->mem_cgroup.
+ */
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ if (PageLRU(newpage))
+ del_page_from_lru_list(zone, newpage, page_lru(newpage));
+ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
+ if (PageLRU(newpage))
+ add_page_to_lru_list(zone, newpage, page_lru(newpage));
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+}
+
#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
diff --git a/mm/slub.c b/mm/slub.c
index 7c54fe83a90c..f73234db904d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2077,6 +2077,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto new_slab;
}
+ /* must check again c->freelist in case of cpu migration or IRQ */
+ object = c->freelist;
+ if (object)
+ goto load_freelist;
+
stat(s, ALLOC_SLOWPATH);
do {