summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/module.c')
-rw-r--r--arch/powerpc/kernel/module.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index ed04a3ba66fe..40a583e9d3c7 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -90,16 +90,17 @@ int module_finalize(const Elf_Ehdr *hdr,
}
static __always_inline void *
-__module_alloc(unsigned long size, unsigned long start, unsigned long end)
+__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
{
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+ gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
- return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
+ return __vmalloc_node_range(size, 1, start, end, gfp, prot,
VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
@@ -114,13 +115,13 @@ void *module_alloc(unsigned long size)
/* First try within 32M limit from _etext to avoid branch trampolines */
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
- ptr = __module_alloc(size, limit, MODULES_END);
+ ptr = __module_alloc(size, limit, MODULES_END, true);
if (!ptr)
- ptr = __module_alloc(size, MODULES_VADDR, MODULES_END);
+ ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
return ptr;
#else
- return __module_alloc(size, VMALLOC_START, VMALLOC_END);
+ return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
#endif
}