summaryrefslogtreecommitdiff
path: root/include/linux/cache.h
blob: 0ecb17bb688373db0b46a1846df4a0d32bce773b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CACHE_H
#define __LINUX_CACHE_H

#include <uapi/linux/kernel.h>
#include <asm/cache.h>

#ifndef L1_CACHE_ALIGN
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
#endif

#ifndef SMP_CACHE_BYTES
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif

/*
 * __read_mostly is used to keep rarely changing variables out of frequently
 * updated cachelines. Its use should be reserved for data that is used
 * frequently in hot paths. Performance traces can help decide when to use
 * this. You want __read_mostly data to be tightly packed, so that in the
 * best case multiple frequently read variables for a hot path will be next
 * to each other in order to reduce the number of cachelines needed to
 * execute a critical path. We should be mindful and selective of its use.
 * ie: if you're going to use it please supply a *good* justification in your
 * commit log
 */
#ifndef __read_mostly
#define __read_mostly
#endif

/*
 * __ro_after_init is used to mark things that are read-only after init (i.e.
 * after mark_rodata_ro() has been called). These are effectively read-only,
 * but may get written to during init, so can't live in .rodata (via "const").
 */
#ifndef __ro_after_init
#define __ro_after_init __section(".data..ro_after_init")
#endif

#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif

#ifndef ____cacheline_aligned_in_smp
#ifdef CONFIG_SMP
#define ____cacheline_aligned_in_smp ____cacheline_aligned
#else
#define ____cacheline_aligned_in_smp
#endif /* CONFIG_SMP */
#endif

#ifndef __cacheline_aligned
#define __cacheline_aligned					\
  __attribute__((__aligned__(SMP_CACHE_BYTES),			\
		 __section__(".data..cacheline_aligned")))
#endif /* __cacheline_aligned */

#ifndef __cacheline_aligned_in_smp
#ifdef CONFIG_SMP
#define __cacheline_aligned_in_smp __cacheline_aligned
#else
#define __cacheline_aligned_in_smp
#endif /* CONFIG_SMP */
#endif

/*
 * The maximum alignment needed for some critical structures
 * These could be inter-node cacheline sizes/L3 cacheline
 * size etc.  Define this in asm/cache.h for your arch
 */
#ifndef INTERNODE_CACHE_SHIFT
#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
#endif

#if !defined(____cacheline_internodealigned_in_smp)
#if defined(CONFIG_SMP)
#define ____cacheline_internodealigned_in_smp \
	__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
#else
#define ____cacheline_internodealigned_in_smp
#endif
#endif

#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
#define cache_line_size()	L1_CACHE_BYTES
#endif

#ifndef __cacheline_group_begin
#define __cacheline_group_begin(GROUP) \
	__u8 __cacheline_group_begin__##GROUP[0]
#endif

#ifndef __cacheline_group_end
#define __cacheline_group_end(GROUP) \
	__u8 __cacheline_group_end__##GROUP[0]
#endif

#ifndef CACHELINE_ASSERT_GROUP_MEMBER
#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
	BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
		       offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
		       offsetofend(TYPE, MEMBER) <= \
		       offsetof(TYPE, __cacheline_group_end__##GROUP)))
#endif

#ifndef CACHELINE_ASSERT_GROUP_SIZE
#define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
	BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
		     offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
		     SIZE)
#endif

/*
 * Helper to add padding within a struct to ensure data fall into separate
 * cachelines.
 */
#if defined(CONFIG_SMP)
struct cacheline_padding {
	char x[0];
} ____cacheline_internodealigned_in_smp;
#define CACHELINE_PADDING(name)		struct cacheline_padding name
#else
#define CACHELINE_PADDING(name)
#endif

#ifdef ARCH_DMA_MINALIGN
#define ARCH_HAS_DMA_MINALIGN
#else
#define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
#endif

#endif /* __LINUX_CACHE_H */