summaryrefslogtreecommitdiff
path: root/arch/s390/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/mmap.c')
-rw-r--r--arch/s390/mm/mmap.c75
1 files changed, 39 insertions, 36 deletions
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 206756946589..2a222a7e14f4 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -15,8 +15,8 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/random.h>
-#include <linux/compat.h>
#include <linux/security.h>
+#include <linux/hugetlb.h>
#include <asm/elf.h>
static unsigned long stack_maxrandom_size(void)
@@ -26,7 +26,7 @@ static unsigned long stack_maxrandom_size(void)
return STACK_RND_MASK << PAGE_SHIFT;
}
-static inline int mmap_is_legacy(struct rlimit *rlim_stack)
+static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
@@ -46,11 +46,10 @@ static unsigned long mmap_base_legacy(unsigned long rnd)
}
static inline unsigned long mmap_base(unsigned long rnd,
- struct rlimit *rlim_stack)
+ const struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
- unsigned long gap_min, gap_max;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
@@ -60,19 +59,15 @@ static inline unsigned long mmap_base(unsigned long rnd,
* Top of mmap area (just below the process stack).
* Leave at least a ~128 MB hole.
*/
- gap_min = SZ_128M;
- gap_max = (STACK_TOP / 6) * 5;
-
- if (gap < gap_min)
- gap = gap_min;
- else if (gap > gap_max)
- gap = gap_max;
+ gap = clamp(gap, SZ_128M, (STACK_TOP / 6) * 5);
return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
static int get_align_mask(struct file *filp, unsigned long flags)
{
+ if (filp && is_file_hugepages(filp))
+ return huge_page_mask_align(filp);
if (!(current->flags & PF_RANDOMIZE))
return 0;
if (filp || (flags & MAP_SHARED))
@@ -82,7 +77,7 @@ static int get_align_mask(struct file *filp, unsigned long flags)
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -106,7 +101,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = get_align_mask(filp, flags);
- info.align_offset = pgoff << PAGE_SHIFT;
+ if (!(filp && is_file_hugepages(filp)))
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if (offset_in_page(addr))
return addr;
@@ -117,7 +113,7 @@ check_asce_limit:
unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -144,7 +140,8 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = get_align_mask(filp, flags);
- info.align_offset = pgoff << PAGE_SHIFT;
+ if (!(filp && is_file_hugepages(filp)))
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
@@ -171,7 +168,7 @@ check_asce_limit:
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
@@ -184,29 +181,35 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
*/
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = mmap_base_legacy(random_factor);
- clear_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_clear(MMF_TOPDOWN, mm);
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
- set_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_set(MMF_TOPDOWN, mm);
}
}
-static const pgprot_t protection_map[16] = {
- [VM_NONE] = PAGE_NONE,
- [VM_READ] = PAGE_RO,
- [VM_WRITE] = PAGE_RO,
- [VM_WRITE | VM_READ] = PAGE_RO,
- [VM_EXEC] = PAGE_RX,
- [VM_EXEC | VM_READ] = PAGE_RX,
- [VM_EXEC | VM_WRITE] = PAGE_RX,
- [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
- [VM_SHARED] = PAGE_NONE,
- [VM_SHARED | VM_READ] = PAGE_RO,
- [VM_SHARED | VM_WRITE] = PAGE_RW,
- [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
- [VM_SHARED | VM_EXEC] = PAGE_RX,
- [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
- [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
- [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
-};
+static pgprot_t protection_map[16] __ro_after_init;
+
+void __init setup_protection_map(void)
+{
+ pgprot_t *pm = protection_map;
+
+ pm[VM_NONE] = PAGE_NONE;
+ pm[VM_READ] = PAGE_RO;
+ pm[VM_WRITE] = PAGE_RO;
+ pm[VM_WRITE | VM_READ] = PAGE_RO;
+ pm[VM_EXEC] = PAGE_RX;
+ pm[VM_EXEC | VM_READ] = PAGE_RX;
+ pm[VM_EXEC | VM_WRITE] = PAGE_RX;
+ pm[VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX;
+ pm[VM_SHARED] = PAGE_NONE;
+ pm[VM_SHARED | VM_READ] = PAGE_RO;
+ pm[VM_SHARED | VM_WRITE] = PAGE_RW;
+ pm[VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW;
+ pm[VM_SHARED | VM_EXEC] = PAGE_RX;
+ pm[VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX;
+ pm[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX;
+ pm[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX;
+}
+
DECLARE_VM_GET_PAGE_PROT