summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/linkage.h4
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/include/asm/pgtable.h2
3 files changed, 9 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 636c1bced7d4..1b266292f0be 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -1,7 +1,7 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
-#define __ALIGN .align 4
-#define __ALIGN_STR ".align 4"
+#define __ALIGN .align 2
+#define __ALIGN_STR ".align 2"
#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3585a5e26151..f7c4d2146aed 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -95,16 +95,19 @@
#define KERNEL_END _end
/*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
+ * KASAN requires 1/8th of the kernel virtual address space for the shadow
+ * region. KASAN can bloat the stack significantly, so double the (minimum)
+ * stack size when KASAN is in use.
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
+#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_SHADOW_SIZE (0)
+#define KASAN_THREAD_SHIFT 0
#endif
-#define MIN_THREAD_SHIFT 14
+#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
/*
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index bc4e92337d16..b46e54c2399b 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -401,7 +401,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))