diff options
author | Ritesh Harjani (IBM) <ritesh.list@gmail.com> | 2024-10-18 22:59:46 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2024-10-23 18:53:19 +1100 |
commit | ff8631cdc23ad42f662a8510c57aeb0555ac3d5f (patch) | |
tree | fdfa88c0d674b5a2a3b64c6a4a13d396619c626e /arch/powerpc/mm/book3s64 | |
parent | cc5734481b3c24ddee1551f9732d743453bca010 (diff) |
book3s64/hash: Add hash_debug_pagealloc_alloc_slots() function
This adds hash_debug_pagealloc_alloc_slots() function instead of open
coding that in htab_initialize(). This is required since we will be
separating the kfence functionality to not depend upon debug_pagealloc.
Now that everything required for debug_pagealloc is under a #ifdef
config. Bring in linear_map_hash_slots and linear_map_hash_count
variables under the same config too.
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/d1d5aabe1e4c693a983e59ccf3de08e3c28c5161.1729271995.git.ritesh.list@gmail.com
Diffstat (limited to 'arch/powerpc/mm/book3s64')
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_utils.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6e3860224351..030c120d1399 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(mmu_slb_size); #ifdef CONFIG_PPC_64K_PAGES int mmu_ci_restrictions; #endif -static u8 *linear_map_hash_slots; -static unsigned long linear_map_hash_count; struct mmu_hash_ops mmu_hash_ops __ro_after_init; EXPORT_SYMBOL(mmu_hash_ops); @@ -274,6 +272,8 @@ void hash__tlbiel_all(unsigned int action) } #ifdef CONFIG_DEBUG_PAGEALLOC +static u8 *linear_map_hash_slots; +static unsigned long linear_map_hash_count; static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) @@ -328,6 +328,19 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) mmu_kernel_ssize, 0); } +static inline void hash_debug_pagealloc_alloc_slots(void) +{ + if (!debug_pagealloc_enabled()) + return; + linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; + linear_map_hash_slots = memblock_alloc_try_nid( + linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, + ppc64_rma_size, NUMA_NO_NODE); + if (!linear_map_hash_slots) + panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", + __func__, linear_map_hash_count, &ppc64_rma_size); +} + static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) { if (!debug_pagealloc_enabled()) @@ -361,6 +374,7 @@ int hash__kernel_map_pages(struct page *page, int numpages, { return 0; } +static inline void hash_debug_pagealloc_alloc_slots(void) {} static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) {} #endif /* CONFIG_DEBUG_PAGEALLOC */ @@ -1223,16 +1237,7 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); - if (debug_pagealloc_enabled()) { - linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; - linear_map_hash_slots = memblock_alloc_try_nid( - linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, - ppc64_rma_size, NUMA_NO_NODE); - if (!linear_map_hash_slots) - panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", - __func__, linear_map_hash_count, &ppc64_rma_size); - } - + hash_debug_pagealloc_alloc_slots(); /* create bolted the linear mapping in the hash table */ for_each_mem_range(i, &base, &end) { size = end - base; |