diff options
author | Ritesh Harjani (IBM) <ritesh.list@gmail.com> | 2024-10-18 22:59:49 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2024-10-23 18:53:19 +1100 |
commit | 47dd2e63d42a7a1b0a9c374d3a236f58b97c19e6 (patch) | |
tree | da86d29147beb5b2b7d49bf837532537c8dfb326 /arch/powerpc/mm/book3s64 | |
parent | 685d942d00d8b0edf8431869028e23eac6cc4bab (diff) |
book3s64/hash: Disable debug_pagealloc if it requires more memory
Make size of the linear map to be allocated in RMA region to be of
ppc64_rma_size / 4. If debug_pagealloc requires more memory than that
then do not allocate any memory and disable debug_pagealloc.
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/e1ef66f32a1fe63bcbb89d5c11d86c65beef5ded.1729271995.git.ritesh.list@gmail.com
Diffstat (limited to 'arch/powerpc/mm/book3s64')
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_utils.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index cc2eaa97982c..cffbb6499ac4 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -331,9 +331,19 @@ static unsigned long linear_map_hash_count; static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); static inline void hash_debug_pagealloc_alloc_slots(void) { + unsigned long max_hash_count = ppc64_rma_size / 4; + if (!debug_pagealloc_enabled()) return; linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; + if (unlikely(linear_map_hash_count > max_hash_count)) { + pr_info("linear map size (%llu) greater than 4 times RMA region (%llu). Disabling debug_pagealloc\n", + ((u64)linear_map_hash_count << PAGE_SHIFT), + ppc64_rma_size); + linear_map_hash_count = 0; + return; + } + linear_map_hash_slots = memblock_alloc_try_nid( linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, ppc64_rma_size, NUMA_NO_NODE); @@ -344,7 +354,7 @@ static inline void hash_debug_pagealloc_alloc_slots(void) static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) { - if (!debug_pagealloc_enabled()) + if (!debug_pagealloc_enabled() || !linear_map_hash_count) return; if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = slot | 0x80; @@ -356,6 +366,9 @@ static int hash_debug_pagealloc_map_pages(struct page *page, int numpages, unsigned long flags, vaddr, lmi; int i; + if (!debug_pagealloc_enabled() || !linear_map_hash_count) + return 0; + local_irq_save(flags); for (i = 0; i < numpages; i++, page++) { vaddr = (unsigned long)page_address(page); |