summaryrefslogtreecommitdiff
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2017-05-03 14:55:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 15:52:11 -0700
commit71cd31135d4cf030a057ed7079a75a40c0a4a796 (patch)
treec95c8da1e70c21e412d9ab7328fc414ecfb80451 /include/linux/memcontrol.h
parentdf0e53d0619e83b465e363c088bf4eeb2848273b (diff)
mm: memcontrol: re-use node VM page state enum
The current duplication is a high-maintenance mess, and it's painful to add new items or query memcg state from the rest of the VM. This increases the size of the stat array marginally, but we should aim to track all these stats on a per-cgroup level anyway. Link: http://lkml.kernel.org/r/20170404220148.28338-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h100
1 files changed, 43 insertions, 57 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0bb5f055bd26..0fa1f5de6841 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,40 +35,45 @@ struct page;
struct mm_struct;
struct kmem_cache;
-/*
- * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
- * These two lists should keep in accord with each other.
- */
-enum mem_cgroup_stat_index {
- /*
- * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
- */
- MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
- MEM_CGROUP_STAT_SHMEM, /* # of pages charged as shmem */
- MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
- MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
- MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
- MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
- MEM_CGROUP_STAT_NSTATS,
- /* default hierarchy stats */
- MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
+/* Cgroup-specific page state, on top of universal node page state */
+enum memcg_stat_item {
+ MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
+ MEMCG_RSS,
+ MEMCG_RSS_HUGE,
+ MEMCG_SWAP,
+ MEMCG_SOCK,
+ /* XXX: why are these zone and not node counters? */
+ MEMCG_KERNEL_STACK_KB,
MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE,
- MEMCG_SOCK,
- MEMCG_WORKINGSET_REFAULT,
- MEMCG_WORKINGSET_ACTIVATE,
- MEMCG_WORKINGSET_NODERECLAIM,
MEMCG_NR_STAT,
};
+/* Cgroup-specific events, on top of universal VM events */
+enum memcg_event_item {
+ MEMCG_LOW = NR_VM_EVENT_ITEMS,
+ MEMCG_HIGH,
+ MEMCG_MAX,
+ MEMCG_OOM,
+ MEMCG_NR_EVENTS,
+};
+
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
int priority;
unsigned int generation;
};
+#ifdef CONFIG_MEMCG
+
+#define MEM_CGROUP_ID_SHIFT 16
+#define MEM_CGROUP_ID_MAX USHRT_MAX
+
+struct mem_cgroup_id {
+ int id;
+ atomic_t ref;
+};
+
/*
* Per memcg event counter is incremented at every pagein/pageout. With THP,
* it will be incremated by the number of pages. This counter is used for
@@ -82,25 +87,6 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-#ifdef CONFIG_MEMCG
-
-#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
-
-struct mem_cgroup_id {
- int id;
- atomic_t ref;
-};
-
-/* Cgroup-specific events, on top of universal VM events */
-enum memcg_event_item {
- MEMCG_LOW = NR_VM_EVENT_ITEMS,
- MEMCG_HIGH,
- MEMCG_MAX,
- MEMCG_OOM,
- MEMCG_NR_EVENTS,
-};
-
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
@@ -487,7 +473,7 @@ void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
long val = 0;
int cpu;
@@ -502,20 +488,20 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
}
static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
if (!mem_cgroup_disabled())
this_cpu_add(memcg->stat->count[idx], val);
}
static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
mem_cgroup_update_stat(memcg, idx, 1);
}
static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
mem_cgroup_update_stat(memcg, idx, -1);
}
@@ -538,20 +524,20 @@ static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
* Kernel pages are an exception to this, since they'll never move.
*/
static inline void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
if (page->mem_cgroup)
mem_cgroup_update_stat(page->mem_cgroup, idx, val);
}
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
mem_cgroup_update_page_stat(page, idx, 1);
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
mem_cgroup_update_page_stat(page, idx, -1);
}
@@ -760,33 +746,33 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
}
static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
}
static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
}
static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
}
static inline void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx,
+ enum memcg_stat_item idx,
int nr)
{
}
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+ enum memcg_stat_item idx)
{
}
@@ -906,7 +892,7 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
* @val: number of pages (positive or negative)
*/
static inline void memcg_kmem_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
if (memcg_kmem_enabled() && page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
@@ -935,7 +921,7 @@ static inline void memcg_put_cache_ids(void)
}
static inline void memcg_kmem_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */