From 55bd9ac468397c4f12a33b7ec714b5d0362c3aa2 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 5 Jun 2020 07:18:06 -0700 Subject: powerpc/mm: Fix typo in IS_ENABLED() IS_ENABLED() matches names exactly, so the missing "CONFIG_" prefix means this code would never be built. Also fixes a missing newline in pr_warn(). Fixes: 970d54f99cea ("powerpc/book3s64/hash: Disable 16M linear mapping size if not aligned") Signed-off-by: Joe Perches Signed-off-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/202006050717.A2F9809E@keescook --- arch/powerpc/mm/book3s64/hash_utils.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/mm/book3s64/hash_utils.c') diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 468169e33c86..084287fb2908 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -663,11 +663,10 @@ static void __init htab_init_page_sizes(void) * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default */ - if (IS_ENABLED(STRICT_KERNEL_RWX) && + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && (unsigned long)_stext % 0x1000000) { if (mmu_psize_defs[MMU_PAGE_16M].shift) - pr_warn("Kernel not 16M aligned, " - "disabling 16M linear map alignment"); + pr_warn("Kernel not 16M aligned, disabling 16M linear map alignment\n"); aligned = false; } -- cgit From 86590e524ee834b629afc55d8e5786091fbf84cc Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 22 Jun 2020 12:10:19 +0530 Subject: powerpc/mm/book3s64: Skip 16G page reservation with radix With hash translation, the hypervisor can hint the LPAR about 16GB contiguous range via ibm,expected#pages. The kernel marks the range specified in the device tree as reserved. Avoid doing this when using radix translation. Radix translation only supports 1G gigantic hugepage and kernel can do the 1G gigantic hugepage allocation via early memblock reservation. This can be done because with radix translation pages are not required to be contiguous on the host. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200622064019.16682-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/mm/book3s64/hash_utils.c') diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 084287fb2908..eec6f4e5e481 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -596,7 +596,7 @@ static void __init htab_scan_page_sizes(void) } #ifdef CONFIG_HUGETLB_PAGE - if (!hugetlb_disabled) { + if (!hugetlb_disabled && !early_radix_enabled() ) { /* Reserve 16G huge page memory sections for huge pages */ of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); } -- cgit From e0d8e991be641ba0034c67785bf86f6c097869d6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:42 +0530 Subject: powerpc/book3s64/kuap: Move UAMOR setup to key init function UAMOR values are not application-specific. The kernel initializes its value based on different reserved keys. Remove the thread-specific UAMOR value and don't switch the UAMOR on context switch. Move UAMOR initialization to key initialization code and remove thread_struct.uamor because it is not used anymore. Before commit: 4a4a5e5d2aad ("powerpc/pkeys: key allocation/deallocation must not change pkey registers") we used to update uamor based on key allocation and free. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-20-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_utils.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/mm/book3s64/hash_utils.c') diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index eec6f4e5e481..9dfb0ceed5e3 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1110,6 +1110,10 @@ void hash__early_init_mmu_secondary(void) if (cpu_has_feature(CPU_FTR_ARCH_206) && cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); + +#ifdef CONFIG_PPC_MEM_KEYS + mtspr(SPRN_UAMOR, default_uamor); +#endif } #endif /* CONFIG_SMP */ -- cgit From 5c9fa16e8abd342ce04dc830c1ebb2a03abf6c05 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Jul 2020 11:19:57 +1000 Subject: powerpc/64s: Remove PROT_SAO support ISA v3.1 does not support the SAO storage control attribute required to implement PROT_SAO. PROT_SAO was used by specialised system software (Lx86) that has been discontinued for about 7 years, and is not thought to be used elsewhere, so removal should not cause problems. We rather remove it than keep support for older processors, because live migrating guest partitions to newer processors may not be possible if SAO is in use (or worse allowed with silent races). - PROT_SAO stays in the uapi header so code using it would still build. - arch_validate_prot() is removed, the generic version rejects PROT_SAO so applications would get a failure at mmap() time. Signed-off-by: Nicholas Piggin [mpe: Drop KVM change for the time being] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703011958.1166620-3-npiggin@gmail.com --- arch/powerpc/mm/book3s64/hash_utils.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/mm/book3s64/hash_utils.c') diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 9dfb0ceed5e3..6f9f346a5f65 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -232,8 +232,6 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); - else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) - rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set -- cgit From 69507b984ddce803df81215cc7813825189adafa Mon Sep 17 00:00:00 2001 From: Santosh Sivaraj Date: Tue, 21 Jul 2020 14:49:15 +0530 Subject: powerpc/mm/hash64: Remove comment that is no longer valid hash_low_64.S was removed in commit a43c0eb8364c ("powerpc/mm: Convert 4k insert from asm to C") and flush_hash_page() is no longer called from any assembly routine. Signed-off-by: Santosh Sivaraj [mpe: Tweak comment wording] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200721091915.205006-1-santosh@fossix.org --- arch/powerpc/mm/book3s64/hash_utils.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/powerpc/mm/book3s64/hash_utils.c') diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6f9f346a5f65..9fdabea04990 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1707,10 +1707,6 @@ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift, return gslot; } -/* - * WARNING: This is called from hash_low_64.S, if you change this prototype, - * do not forget to update the assembly call site ! - */ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, unsigned long flags) { -- cgit From 55548a86ebde2b3691b6a84baef1b02933408994 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 27 Jul 2020 15:27:04 +0530 Subject: powerpc/mm: Limit resize_hpt_for_hotplug() call to hash guests only During memory hotplug and unplug, resize_hpt_for_hotplug() gets called for both hash and radix guests but it should be called only for hash guests. Though the call does nothing in the radix guest case, it is cleaner to push this call into hash specific memory hotplug routines. Reported-by: Nathan Lynch Signed-off-by: Bharata B Rao Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200727095704.1432916-1-bharata@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm/book3s64/hash_utils.c') diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 9fdabea04990..30a4a91d9987 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -785,7 +785,7 @@ static unsigned long __init htab_get_table_size(void) } #ifdef CONFIG_MEMORY_HOTPLUG -int resize_hpt_for_hotplug(unsigned long new_mem_size) +static int resize_hpt_for_hotplug(unsigned long new_mem_size) { unsigned target_hpt_shift; @@ -819,6 +819,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end, return -1; } + resize_hpt_for_hotplug(memblock_phys_mem_size()); + rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(prot), mmu_linear_psize, mmu_kernel_ssize); @@ -836,6 +838,10 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end) int rc = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); WARN_ON(rc < 0); + + if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC) + pr_warn("Hash collision while resizing HPT\n"); + return rc; } #endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit