diff options
Diffstat (limited to 'include/linux/vmstat.h')
| -rw-r--r-- | include/linux/vmstat.h | 294 |
1 files changed, 256 insertions, 38 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index b3d85f30d424..3398a345bda8 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VMSTAT_H #define _LINUX_VMSTAT_H @@ -6,8 +7,35 @@ #include <linux/mmzone.h> #include <linux/vm_event_item.h> #include <linux/atomic.h> +#include <linux/static_key.h> +#include <linux/mmdebug.h> -extern int sysctl_stat_interval; +#ifdef CONFIG_NUMA +DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); +#endif + +struct reclaim_stat { + unsigned nr_dirty; + unsigned nr_unqueued_dirty; + unsigned nr_congested; + unsigned nr_writeback; + unsigned nr_immediate; + unsigned nr_pageout; + unsigned nr_activate[ANON_AND_FILE]; + unsigned nr_ref_keep; + unsigned nr_unmap_fail; + unsigned nr_lazyfree_fail; + unsigned nr_demoted; +}; + +/* Stat data for system wide items */ +enum vm_stat_item { + NR_DIRTY_THRESHOLD, + NR_DIRTY_BG_THRESHOLD, + NR_MEMMAP_PAGES, /* page metadata allocated through buddy allocator */ + NR_MEMMAP_BOOT_PAGES, /* page metadata allocated through boot allocator */ + NR_VM_STAT_ITEMS, +}; #ifdef CONFIG_VM_EVENT_COUNTERS /* @@ -94,10 +122,10 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) #endif -#ifdef CONFIG_DEBUG_VM_VMACACHE -#define count_vm_vmacache_event(x) count_vm_event(x) +#ifdef CONFIG_PER_VMA_LOCK_STATS +#define count_vm_vma_lock_event(x) count_vm_event(x) #else -#define count_vm_vmacache_event(x) do {} while (0) +#define count_vm_vma_lock_event(x) do {} while (0) #endif #define __count_zid_vm_events(item, zid, delta) \ @@ -108,6 +136,28 @@ static inline void vm_events_fold_cpu(int cpu) */ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; +extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; + +#ifdef CONFIG_NUMA +static inline void zone_numa_event_add(long x, struct zone *zone, + enum numa_stat_item item) +{ + atomic_long_add(x, &zone->vm_numa_event[item]); + atomic_long_add(x, &vm_numa_event[item]); +} + +static inline unsigned long zone_numa_event_state(struct zone *zone, + enum numa_stat_item item) +{ + return atomic_long_read(&zone->vm_numa_event[item]); +} + +static inline unsigned long +global_numa_event_state(enum numa_stat_item item) +{ + return atomic_long_read(&vm_numa_event[item]); +} +#endif /* CONFIG_NUMA */ static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) @@ -123,7 +173,7 @@ static inline void node_page_state_add(long x, struct pglist_data *pgdat, atomic_long_add(x, &vm_node_stat[item]); } -static inline unsigned long global_page_state(enum zone_stat_item item) +static inline unsigned long global_zone_page_state(enum zone_stat_item item) { long x = atomic_long_read(&vm_zone_stat[item]); #ifdef CONFIG_SMP @@ -133,7 +183,8 @@ static inline unsigned long global_page_state(enum zone_stat_item item) return x; } -static inline unsigned long global_node_page_state(enum node_stat_item item) +static inline +unsigned long global_node_page_state_pages(enum node_stat_item item) { long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP @@ -143,6 +194,13 @@ static inline unsigned long global_node_page_state(enum node_stat_item item) return x; } +static inline unsigned long global_node_page_state(enum node_stat_item item) +{ + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); + + return global_node_page_state_pages(item); +} + static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { @@ -168,7 +226,7 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) - x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; + x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item]; if (x < 0) x = 0; @@ -176,38 +234,41 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, return x; } -static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, - enum node_stat_item item) +#ifdef CONFIG_NUMA +/* See __count_vm_event comment on why raw_cpu_inc is used. */ +static inline void +__count_numa_event(struct zone *zone, enum numa_stat_item item) { - long x = atomic_long_read(&pgdat->vm_stat[item]); + struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; -#ifdef CONFIG_SMP - int cpu; - for_each_online_cpu(cpu) - x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; - - if (x < 0) - x = 0; -#endif - return x; + raw_cpu_inc(pzstats->vm_numa_event[item]); } +static inline void +__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) +{ + struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; + + raw_cpu_add(pzstats->vm_numa_event[item], delta); +} -#ifdef CONFIG_NUMA extern unsigned long sum_zone_node_page_state(int node, - enum zone_stat_item item); + enum zone_stat_item item); +extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); +extern unsigned long node_page_state_pages(struct pglist_data *pgdat, + enum node_stat_item item); +extern void fold_vm_numa_events(void); #else -#define sum_zone_node_page_state(node, item) global_page_state(item) +#define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) +#define node_page_state_pages(node, item) global_node_page_state_pages(item) +static inline void fold_vm_numa_events(void) +{ +} #endif /* CONFIG_NUMA */ -#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) -#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) -#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) -#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) - #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); @@ -236,11 +297,7 @@ void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); -struct ctl_table; -int vmstat_refresh(struct ctl_table *, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); - -void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); +void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); @@ -261,6 +318,17 @@ static inline void __mod_zone_page_state(struct zone *zone, static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { + if (vmstat_item_in_bytes(item)) { + /* + * Only cgroups use subpage accounting right now; at + * the global level, these items still change in + * multiples of whole pages. Store them as pages + * internally to keep the per-cpu counters compact. + */ + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); + delta >>= PAGE_SHIFT; + } + node_page_state_add(delta, pgdat, item); } @@ -337,17 +405,167 @@ static inline void cpu_vm_stats_fold(int cpu) { } static inline void quiet_vmstat(void) { } static inline void drain_zonestat(struct zone *zone, - struct per_cpu_pageset *pset) { } + struct per_cpu_zonestat *pzstats) { } #endif /* CONFIG_SMP */ -static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, - int migratetype) +static inline void __zone_stat_mod_folio(struct folio *folio, + enum zone_stat_item item, long nr) +{ + __mod_zone_page_state(folio_zone(folio), item, nr); +} + +static inline void __zone_stat_add_folio(struct folio *folio, + enum zone_stat_item item) +{ + __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); +} + +static inline void __zone_stat_sub_folio(struct folio *folio, + enum zone_stat_item item) +{ + __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); +} + +static inline void zone_stat_mod_folio(struct folio *folio, + enum zone_stat_item item, long nr) +{ + mod_zone_page_state(folio_zone(folio), item, nr); +} + +static inline void zone_stat_add_folio(struct folio *folio, + enum zone_stat_item item) +{ + mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); +} + +static inline void zone_stat_sub_folio(struct folio *folio, + enum zone_stat_item item) +{ + mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); +} + +static inline void __node_stat_mod_folio(struct folio *folio, + enum node_stat_item item, long nr) +{ + __mod_node_page_state(folio_pgdat(folio), item, nr); +} + +static inline void __node_stat_add_folio(struct folio *folio, + enum node_stat_item item) { - __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); - if (is_migrate_cma(migratetype)) - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); + __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); +} + +static inline void __node_stat_sub_folio(struct folio *folio, + enum node_stat_item item) +{ + __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); +} + +static inline void node_stat_mod_folio(struct folio *folio, + enum node_stat_item item, long nr) +{ + mod_node_page_state(folio_pgdat(folio), item, nr); +} + +static inline void node_stat_add_folio(struct folio *folio, + enum node_stat_item item) +{ + mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); +} + +static inline void node_stat_sub_folio(struct folio *folio, + enum node_stat_item item) +{ + mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); } extern const char * const vmstat_text[]; +static inline const char *zone_stat_name(enum zone_stat_item item) +{ + return vmstat_text[item]; +} + +#ifdef CONFIG_NUMA +static inline const char *numa_stat_name(enum numa_stat_item item) +{ + return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + item]; +} +#endif /* CONFIG_NUMA */ + +static inline const char *node_stat_name(enum node_stat_item item) +{ + return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + + item]; +} + +static inline const char *lru_list_name(enum lru_list lru) +{ + return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" +} + +#if defined(CONFIG_VM_EVENT_COUNTERS) +static inline const char *vm_event_name(enum vm_event_item item) +{ + return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_EVENT_ITEMS + + NR_VM_NODE_STAT_ITEMS + + NR_VM_STAT_ITEMS + + item]; +} +#endif /* CONFIG_VM_EVENT_COUNTERS */ + +#ifdef CONFIG_MEMCG + +void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); + +void lruvec_stat_mod_folio(struct folio *folio, + enum node_stat_item idx, int val); + +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + lruvec_stat_mod_folio(page_folio(page), idx, val); +} + +#else + +static inline void mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + mod_node_page_state(lruvec_pgdat(lruvec), idx, val); +} + +static inline void lruvec_stat_mod_folio(struct folio *folio, + enum node_stat_item idx, int val) +{ + mod_node_page_state(folio_pgdat(folio), idx, val); +} + +static inline void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + mod_node_page_state(page_pgdat(page), idx, val); +} + +#endif /* CONFIG_MEMCG */ + +static inline void lruvec_stat_add_folio(struct folio *folio, + enum node_stat_item idx) +{ + lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); +} + +static inline void lruvec_stat_sub_folio(struct folio *folio, + enum node_stat_item idx) +{ + lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); +} + +void memmap_boot_pages_add(long delta); +void memmap_pages_add(long delta); #endif /* _LINUX_VMSTAT_H */ |
