summaryrefslogtreecommitdiff
path: root/include/linux/mm_inline.h
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-05-29 15:07:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 16:22:28 -0700
commitfa9add641b1b1c564db916accac1db346e7a2759 (patch)
tree875e74ec4d7fed0018fdbc134ad899949c5e3384 /include/linux/mm_inline.h
parent75b00af77ed5b5a3d55549f9e0c33f3969b9330c (diff)
mm/memcg: apply add/del_page to lruvec
Take lruvec further: pass it instead of zone to add_page_to_lru_list() and del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to its target functions. This cleanup eliminates a swathe of cruft in memcontrol.c, including mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and mem_cgroup_lru_move_lists() - which never actually touched the lists. In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously a side-effect of add, and mem_cgroup_update_lru_size() to maintain the lru_size stats. Whilst these are simplifications in their own right, the goal is to bring the evaluation of lruvec next to the spin_locking of the lrus, in preparation for a future patch. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm_inline.h')
-rw-r--r--include/linux/mm_inline.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 16d45d9c31a4..1397ccf81e91 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}
-static __always_inline void
-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void add_page_to_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
{
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_add(&page->lru, &lruvec->lists[lru]);
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
}
-static __always_inline void
-del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void del_page_from_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
{
- mem_cgroup_lru_del_list(page, lru);
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru);
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
}
/**