summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2024-02-16 11:17:34 +0100
committerMichael Ellerman <mpe@ellerman.id.au>2024-03-03 22:18:45 +1100
commit9cbacb834b4afcb55eb8ac5115fa82fc7ede5c83 (patch)
treeecc655227ddf3507515e9aea2664c8605564c524
parent3c8016e681c5e0f5f3ad15edb4569727cd32eaff (diff)
powerpc: Don't ignore errors from set_memory_{n}p() in __kernel_map_pages()
set_memory_p() and set_memory_np() can fail. As mentioned in linux/mm.h: /* * To support DEBUG_PAGEALLOC architecture must ensure that * __kernel_map_pages() never fails */ So panic in case set_memory_p() or set_memory_np() fail in __kernel_map_pages(). Link: https://github.com/KSPP/linux/issues/7 Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20ef75884aa6a636e8298736f3d1056b0793d3d9.1708078640.git.christophe.leroy@csgroup.eu
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c3
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/pageattr.c10
3 files changed, 10 insertions, 5 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 0626a25b0d72..01c3b4b65241 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2172,7 +2172,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
-void hash__kernel_map_pages(struct page *page, int numpages, int enable)
+int hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
@@ -2189,6 +2189,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
kernel_unmap_linear_page(vaddr, lmi);
}
local_irq_restore(flags);
+ return 0;
}
#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 4eb62bb51d49..b4b662a61485 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -187,4 +187,4 @@ int create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
#endif
-void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+int hash__kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 8a9d24218b74..ac22bf28086f 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -107,17 +107,21 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
#ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ int err;
unsigned long addr = (unsigned long)page_address(page);
if (PageHighMem(page))
return;
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
- hash__kernel_map_pages(page, numpages, enable);
+ err = hash__kernel_map_pages(page, numpages, enable);
else if (enable)
- set_memory_p(addr, numpages);
+ err = set_memory_p(addr, numpages);
else
- set_memory_np(addr, numpages);
+ err = set_memory_np(addr, numpages);
+
+ if (err)
+ panic("%s: changing memory protections failed\n", __func__);
}
#endif
#endif