summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/vmscan.c9
-rw-r--r--mm/vmstat.c5
5 files changed, 23 insertions, 9 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index bcc4ed07fa90..9cc130f5feb2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -45,6 +45,8 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
__mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
+ __mod_zone_page_state(&pgdat->node_zones[zid],
+ NR_ZONE_LRU_BASE + lru, nr_pages);
acct_highmem_file_pages(zid, lru, nr_pages);
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a3b7f45aac56..1a813ad335f4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -110,6 +110,12 @@ struct zone_padding {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
+ NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
+ NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
+ NR_ZONE_ACTIVE_ANON,
+ NR_ZONE_INACTIVE_FILE,
+ NR_ZONE_ACTIVE_FILE,
+ NR_ZONE_UNEVICTABLE,
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ffd4fdbae8b5..759cfa8cbbeb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4356,6 +4356,11 @@ void show_free_areas(unsigned int filter)
" min:%lukB"
" low:%lukB"
" high:%lukB"
+ " active_anon:%lukB"
+ " inactive_anon:%lukB"
+ " active_file:%lukB"
+ " inactive_file:%lukB"
+ " unevictable:%lukB"
" present:%lukB"
" managed:%lukB"
" mlocked:%lukB"
@@ -4373,6 +4378,11 @@ void show_free_areas(unsigned int filter)
K(min_wmark_pages(zone)),
K(low_wmark_pages(zone)),
K(high_wmark_pages(zone)),
+ K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
+ K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
+ K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
+ K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
+ K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
K(zone->present_pages),
K(zone->managed_pages),
K(zone_page_state(zone, NR_MLOCK)),
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 22aec2bcfeec..222d5403dd4b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1359,23 +1359,14 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
enum lru_list lru, unsigned long *nr_zone_taken,
unsigned long nr_taken)
{
-#ifdef CONFIG_HIGHMEM
int zid;
- /*
- * Highmem has separate accounting for highmem pages so each zone
- * is updated separately.
- */
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
if (!nr_zone_taken[zid])
continue;
__update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
}
-#else
- /* Zone ID does not matter on !HIGHMEM */
- __update_lru_size(lruvec, lru, 0, -nr_taken);
-#endif
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 91ecca96dcae..053075ac67b8 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -921,6 +921,11 @@ int fragmentation_index(struct zone *zone, unsigned int order)
const char * const vmstat_text[] = {
/* enum zone_stat_item countes */
"nr_free_pages",
+ "nr_zone_inactive_anon",
+ "nr_zone_active_anon",
+ "nr_zone_inactive_file",
+ "nr_zone_active_file",
+ "nr_zone_unevictable",
"nr_mlock",
"nr_slab_reclaimable",
"nr_slab_unreclaimable",