summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2021-10-29 12:25:04 +0100
committerWill Deacon <will@kernel.org>2021-10-29 12:25:04 +0100
commitdc6bab18fb3c9dfde892cef2b1fe73565ff1f91a (patch)
tree2d5305b31c94d0077ce9979dccd886b136fe4c6f /arch/arm64/mm
parent2bc655ce29422b94fcb4c9710d12664195897e42 (diff)
parent8fac67ca236b961b573355e203dbaf62a706a2e5 (diff)
Merge branch 'for-next/mm' into for-next/core
* for-next/mm: arm64: mm: update max_pfn after memory hotplug arm64/mm: Add pud_sect_supported() arm64: mm: Drop pointless call to set_max_mapnr()
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/hugetlbpage.c26
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmu.c5
3 files changed, 20 insertions, 13 deletions
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 23505fc35324..029cf5e42c4c 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -40,11 +40,10 @@ void __init arm64_hugetlb_cma_reserve(void)
{
int order;
-#ifdef CONFIG_ARM64_4K_PAGES
- order = PUD_SHIFT - PAGE_SHIFT;
-#else
- order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
-#endif
+ if (pud_sect_supported())
+ order = PUD_SHIFT - PAGE_SHIFT;
+ else
+ order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
/*
* HugeTLB CMA reservation is required for gigantic
* huge pages which could not be allocated via the
@@ -62,8 +61,9 @@ bool arch_hugetlb_migration_supported(struct hstate *h)
size_t pagesize = huge_page_size(h);
switch (pagesize) {
-#ifdef CONFIG_ARM64_4K_PAGES
+#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
+ return pud_sect_supported();
#endif
case PMD_SIZE:
case CONT_PMD_SIZE:
@@ -126,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = size;
switch (size) {
-#ifdef CONFIG_ARM64_4K_PAGES
+#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
+ if (pud_sect_supported())
+ contig_ptes = 1;
+ break;
#endif
case PMD_SIZE:
contig_ptes = 1;
@@ -489,9 +492,9 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
static int __init hugetlbpage_init(void)
{
-#ifdef CONFIG_ARM64_4K_PAGES
- hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
-#endif
+ if (pud_sect_supported())
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+
hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
@@ -503,8 +506,9 @@ arch_initcall(hugetlbpage_init);
bool __init arch_hugetlb_valid_size(unsigned long size)
{
switch (size) {
-#ifdef CONFIG_ARM64_4K_PAGES
+#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
+ return pud_sect_supported();
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 37a81754d9b6..142125749783 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -416,8 +416,6 @@ void __init mem_init(void)
else if (!xen_swiotlb_detect())
swiotlb_force = SWIOTLB_NO_FORCE;
- set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
-
/* this will put all unused low memory onto the freelists */
memblock_free_all();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index cfd9deb347c3..fd85b51b9d50 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1499,6 +1499,11 @@ int arch_add_memory(int nid, u64 start, u64 size,
if (ret)
__remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size);
+ else {
+ max_pfn = PFN_UP(start + size);
+ max_low_pfn = max_pfn;
+ }
+
return ret;
}