From a5edf9815dd739fce660b4c8658f61b7d2517042 Mon Sep 17 00:00:00 2001 From: Nicholas Miehlbradt Date: Mon, 26 Sep 2022 07:57:26 +0000 Subject: powerpc/64s: Enable KFENCE on book3s64 KFENCE support was added for ppc32 in commit 90cbac0e995d ("powerpc: Enable KFENCE for PPC32"). Enable KFENCE on ppc64 architecture with hash and radix MMUs. It uses the same mechanism as debug pagealloc to protect/unprotect pages. All KFENCE kunit tests pass on both MMUs. KFENCE memory is initially allocated using memblock but is later marked as SLAB allocated. This necessitates the change to __pud_free to ensure that the KFENCE pages are freed appropriately. Based on previous work by Christophe Leroy and Jordan Niethe. Signed-off-by: Nicholas Miehlbradt Reviewed-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926075726.2846-4-nicholas@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/pgalloc.h | 6 ++++-- arch/powerpc/include/asm/book3s/64/pgtable.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm/book3s') diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index e1af0b394ceb..dd2cff53a111 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud) /* * Early pud pages allocated via memblock allocator - * can't be directly freed to slab + * can't be directly freed to slab. KFENCE pages have + * both reserved and slab flags set so need to be freed + * kmem_cache_free. */ - if (PageReserved(page)) + if (PageReserved(page) && !PageSlab(page)) free_reserved_page(page); else kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index b5f8264cc980..c436d8422654 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1106,7 +1106,7 @@ static inline void vmemmap_remove_mapping(unsigned long start, } #endif -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static inline void __kernel_map_pages(struct page *page, int numpages, int enable) { if (radix_enabled()) -- cgit