summaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorShakeel Butt <shakeelb@google.com>2022-08-26 23:06:42 +0000
committerAndrew Morton <akpm@linux-foundation.org>2022-09-26 19:46:29 -0700
commite6ad640bc404eb298dd1880113131768ddf5c6a8 (patch)
treef72c01caf19e88d74482af862b9378f6f19893b9 /include/linux/mmzone.h
parent974f4367dd315acc15ad4a6453f8304aea60dfbd (diff)
mm: deduplicate cacheline padding code
There are three users (mmzone.h, memcontrol.h, page_counter.h) using similar code for forcing cacheline padding between fields of different structures. Dedup that code. Link: https://lkml.kernel.org/r/20220826230642.566725-1-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Suggested-by: Feng Tang <feng.tang@intel.com> Reviewed-by: Feng Tang <feng.tang@intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h24
1 files changed, 5 insertions, 19 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e335a492c2eb..c69c08156822 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -121,20 +121,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data;
-/*
- * Add a wild amount of padding here to ensure data fall into separate
- * cachelines. There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define ZONE_PADDING(name) struct zone_padding name;
-#else
-#define ZONE_PADDING(name)
-#endif
-
#ifdef CONFIG_NUMA
enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */
@@ -837,7 +823,7 @@ struct zone {
int initialized;
/* Write-intensive fields used from the page allocator */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
@@ -849,7 +835,7 @@ struct zone {
spinlock_t lock;
/* Write-intensive fields used by compaction and vmstats. */
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
@@ -886,7 +872,7 @@ struct zone {
bool contiguous;
- ZONE_PADDING(_pad3_)
+ CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
@@ -1196,7 +1182,7 @@ typedef struct pglist_data {
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -1241,7 +1227,7 @@ typedef struct pglist_data {
struct lru_gen_mm_walk mm_walk;
#endif
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;