summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:46:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commit11fb998986a72aa7e997d96d63d52582a01228c5 (patch)
treefd9db095081c4fe2212db7de2757bfdf4645dc04 /mm/vmscan.c
parent4b9d0fab7166c9323f06d708518a35cf3a90426c (diff)
mm: move most file-based accounting to the node
There are now a number of accounting oddities such as mapped file pages being accounted for on the node while the total number of file pages are accounted on the zone. This can be coped with to some extent but it's confusing so this patch moves the relevant file-based accounted. Due to throttling logic in the page allocator for reliable OOM detection, it is still necessary to track dirty and writeback pages on a per-zone basis. [mgorman@techsingularity.net: fix NR_ZONE_WRITE_PENDING accounting] Link: http://lkml.kernel.org/r/1468404004-5085-5-git-send-email-mgorman@techsingularity.net Link: http://lkml.kernel.org/r/1467970510-21195-20-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 90b46651d158..b797afec3057 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3587,11 +3587,11 @@ int sysctl_min_unmapped_ratio = 1;
*/
int sysctl_min_slab_ratio = 5;
-static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
+static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
{
- unsigned long file_mapped = node_page_state(zone->zone_pgdat, NR_FILE_MAPPED);
- unsigned long file_lru = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
- node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE);
+ unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
+ unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
+ node_page_state(pgdat, NR_ACTIVE_FILE);
/*
* It's possible for there to be more file mapped pages than
@@ -3610,17 +3610,17 @@ static unsigned long zone_pagecache_reclaimable(struct zone *zone)
/*
* If RECLAIM_UNMAP is set, then all file pages are considered
* potentially reclaimable. Otherwise, we have to worry about
- * pages like swapcache and zone_unmapped_file_pages() provides
+ * pages like swapcache and node_unmapped_file_pages() provides
* a better estimate
*/
if (zone_reclaim_mode & RECLAIM_UNMAP)
- nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
+ nr_pagecache_reclaimable = node_page_state(zone->zone_pgdat, NR_FILE_PAGES);
else
- nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
+ nr_pagecache_reclaimable = node_unmapped_file_pages(zone->zone_pgdat);
/* If we can't clean pages, remove dirty pages from consideration */
if (!(zone_reclaim_mode & RECLAIM_WRITE))
- delta += zone_page_state(zone, NR_FILE_DIRTY);
+ delta += node_page_state(zone->zone_pgdat, NR_FILE_DIRTY);
/* Watch for any possible underflows due to delta */
if (unlikely(delta > nr_pagecache_reclaimable))