diff options
Diffstat (limited to 'arch/arm64/include/asm/cache.h')
-rw-r--r-- | arch/arm64/include/asm/cache.h | 69 |
1 files changed, 33 insertions, 36 deletions
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 806e9dc2a852..06a4670bdb0b 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -5,32 +5,9 @@ #ifndef __ASM_CACHE_H #define __ASM_CACHE_H -#include <asm/cputype.h> - -#define CTR_L1IP_SHIFT 14 -#define CTR_L1IP_MASK 3 -#define CTR_DMINLINE_SHIFT 16 -#define CTR_IMINLINE_SHIFT 0 -#define CTR_IMINLINE_MASK 0xf -#define CTR_ERG_SHIFT 20 -#define CTR_CWG_SHIFT 24 -#define CTR_CWG_MASK 15 -#define CTR_IDC_SHIFT 28 -#define CTR_DIC_SHIFT 29 - -#define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) - -#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) - -#define ICACHE_POLICY_VPIPT 0 -#define ICACHE_POLICY_VIPT 2 -#define ICACHE_POLICY_PIPT 3 - #define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - #define CLIDR_LOUU_SHIFT 27 #define CLIDR_LOC_SHIFT 24 #define CLIDR_LOUIS_SHIFT 21 @@ -39,6 +16,15 @@ #define CLIDR_LOC(clidr) (((clidr) >> CLIDR_LOC_SHIFT) & 0x7) #define CLIDR_LOUIS(clidr) (((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7) +/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */ +#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1)) +#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level)) +#define CLIDR_CTYPE(clidr, level) \ + (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) + +/* Ttypen, bits [2(n - 1) + 34 : 2(n - 1) + 33], for n = 1 to 7 */ +#define CLIDR_TTYPE_SHIFT(level) (2 * ((level) - 1) + CLIDR_EL1_Ttypen_SHIFT) + /* * Memory returned by kmalloc() may be used for DMA, so we must make * sure that all such allocations are cache aligned. Otherwise, @@ -47,17 +33,31 @@ * the CPU. */ #define ARCH_DMA_MINALIGN (128) +#define ARCH_KMALLOC_MINALIGN (8) + +#ifndef __ASSEMBLY__ + +#include <linux/bitops.h> +#include <linux/kasan-enabled.h> + +#include <asm/cputype.h> +#include <asm/mte-def.h> +#include <asm/sysreg.h> #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) +#elif defined(CONFIG_KASAN_HW_TAGS) +static inline unsigned int arch_slab_minalign(void) +{ + return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE : + __alignof__(unsigned long long); +} +#define arch_slab_minalign() arch_slab_minalign() #endif -#ifndef __ASSEMBLY__ - -#include <linux/bitops.h> +#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr) #define ICACHEF_ALIASING 0 -#define ICACHEF_VPIPT 1 extern unsigned long __icache_flags; /* @@ -69,17 +69,12 @@ static inline int icache_is_aliasing(void) return test_bit(ICACHEF_ALIASING, &__icache_flags); } -static inline int icache_is_vpipt(void) -{ - return test_bit(ICACHEF_VPIPT, &__icache_flags); -} - static inline u32 cache_type_cwg(void) { - return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; + return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype()); } -#define __read_mostly __section(.data..read_mostly) +#define __read_mostly __section(".data..read_mostly") static inline int cache_line_size_of_cpu(void) { @@ -90,6 +85,8 @@ static inline int cache_line_size_of_cpu(void) int cache_line_size(void); +#define dma_get_cache_alignment cache_line_size + /* * Read the effective value of CTR_EL0. * @@ -110,12 +107,12 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void) { u32 ctr = read_cpuid_cachetype(); - if (!(ctr & BIT(CTR_IDC_SHIFT))) { + if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) { u64 clidr = read_sysreg(clidr_el1); if (CLIDR_LOC(clidr) == 0 || (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0)) - ctr |= BIT(CTR_IDC_SHIFT); + ctr |= BIT(CTR_EL0_IDC_SHIFT); } return ctr; |