summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/init.c
diff options
context:
space:
mode:
authorZhang Jianhua <chris.zjh@huawei.com>2023-08-04 15:56:15 +0800
committerWill Deacon <will@kernel.org>2023-08-04 17:19:44 +0100
commit4e0bacd65e72f7f9b1b60e48e85e6d4187474644 (patch)
tree4d942f6e70525e99058be084454dfe391e41ec6a /arch/arm64/mm/init.c
parent42501f6d4d5d4e5b69fa083193c8247918e8c393 (diff)
arm64: fix build warning for ARM64_MEMSTART_SHIFT
When building with W=1, the following warning occurs. arch/arm64/include/asm/kernel-pgtable.h:129:41: error: "PUD_SHIFT" is not defined, evaluates to 0 [-Werror=undef] 129 | #define ARM64_MEMSTART_SHIFT PUD_SHIFT | ^~~~~~~~~ arch/arm64/include/asm/kernel-pgtable.h:142:5: note: in expansion of macro ‘ARM64_MEMSTART_SHIFT’ 142 | #if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS | ^~~~~~~~~~~~~~~~~~~~ The generic PUD_SHIFT was defined in include/asm-generic/pgtable-nopud.h, however the #ifndef __ASSEMBLY__ guard in this header file makes it unavailable for assembly files. While someone .S file include the <asm/kernel-pgtable.h>, the build warning would occur. Now move the macro ARM64_MEMSTART_SHIFT and ARM64_MEMSTART_ALIGN to arch/arm64/mm/init.c where it is used only, to avoid this issue. Signed-off-by: Zhang Jianhua <chris.zjh@huawei.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20230804075615.3334756-1-chris.zjh@huawei.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm/init.c')
-rw-r--r--arch/arm64/mm/init.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index d31c3a9290c5..4fcb88a445ef 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -73,6 +73,33 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+/*
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
+ */
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ARM64_MEMSTART_SHIFT PUD_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
+#else
+#define ARM64_MEMSTART_SHIFT PMD_SHIFT
+#endif
+
+/*
+ * sparsemem vmemmap imposes an additional requirement on the alignment of
+ * memstart_addr, due to the fact that the base of the vmemmap region
+ * has a direct correspondence, and needs to appear sufficiently aligned
+ * in the virtual address space.
+ */
+#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
+#else
+#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
+#endif
+
static int __init reserve_crashkernel_low(unsigned long long low_size)
{
unsigned long long low_base;