summaryrefslogtreecommitdiff
path: root/arch/s390/mm/mmap.c
diff options
context:
space:
mode:
authorSven Schnelle <svens@linux.ibm.com>2024-01-24 09:22:07 +0100
committerHeiko Carstens <hca@linux.ibm.com>2024-02-09 13:58:14 +0100
commit86f48f922ba79ca32db1aabbcf8758148f925eb3 (patch)
treeb3ea94ddf8aa4d405ff66d5139c57791cbc02abe /arch/s390/mm/mmap.c
parent791833f22431aacdec6d489cc138e82c40709450 (diff)
s390/mmap: disable mmap alignment when randomize_va_space = 0
Stefan reported a test case fail in libc. The test runs with randomize_va_space set to zero, i.e. disabled randomization. Additionally, it runs the program with the dynamic loader. Looking at the failure showed that the heap was placed right before some pages mapped from the binary. This made memory allocation fail after a few allocations. Normally, when address randomization is switched off and the binary is loaded from the dynamic loader, the kernel places the binary below the 128MB top gap. So the address map would look like this: 3fff7fd1000-3fff7fd2000 r--p 00000000 5e:01 1447115 /lib/ld64.so.1 3fff7fd2000-3fff7ff2000 r-xp 00001000 5e:01 1447115 /lib/ld64.so.1 3fff7ff2000-3fff7ffc000 r--p 00021000 5e:01 1447115 /lib/ld64.so.1 3fff7ffc000-3fff7ffe000 r--p 0002a000 5e:01 1447115 /lib/ld64.so.1 3fff7ffe000-3fff8000000 rw-p 0002c000 5e:01 1447115 /lib/ld64.so.1 3fff8000000-3fff8021000 rw-p 00000000 00:00 0 [heap] 3fffffda000-3ffffffb000 rw-p 00000000 00:00 0 [stack] 3ffffffc000-3ffffffe000 r--p 00000000 00:00 0 [vvar] 3ffffffe000-40000000000 r-xp 00000000 00:00 0 [vdso] However, commit 1f6b83e5e4d3 ("s390: avoid z13 cache aliasing") introduced a mmap alignment mask of 8MB. With this commit, the memory map now looks like this: 3fff7f80000-3fff7f81000 r--p 00000000 5e:01 1447115 /lib/ld64.so.1 3fff7f81000-3fff7fa1000 r-xp 00001000 5e:01 1447115 /lib/ld64.so.1 3fff7fa1000-3fff7fab000 r--p 00021000 5e:01 1447115 /lib/ld64.so.1 3fff7fab000-3fff7fad000 r--p 0002a000 5e:01 1447115 /lib/ld64.so.1 3fff7fad000-3fff7faf000 rw-p 0002c000 5e:01 1447115 /lib/ld64.so.1 3fff7faf000-3fff7fd0000 rw-p 00000000 00:00 0 [heap] 3fff7fdc000-3fff8000000 rw-p 00000000 00:00 0 3fffffda000-3ffffffb000 rw-p 00000000 00:00 0 [stack] 3ffffffc000-3ffffffe000 r--p 00000000 00:00 0 [vvar] 3ffffffe000-40000000000 r-xp 00000000 00:00 0 [vdso] The reason for this placement is that the elf loader loads the binary to end at mmap_base (0x3fff8000000 on s390). This would result in a start address of 0x3fff7fd1000, but due to the alignment requirement of 8MB, mmap chooses 0x3fff7f80000. This causes a gap between the end of the mapped binary and mmap_base. When the next non-shared and non-file pages are mapped, mmap searches from top to bottom and the first free space it finds is the gap which is now present. This leaves only a few pages for the heap. With enabled address space randomization this doesn't happen because the binary is mapped to a completely different memory area. Fix this by disabling the mmap alignment when address space randomization is disabled. This is in line with what other architectures are doing. Reported-by: Stefan Liebler <stli@linux.ibm.com> Tested-by: Stefan Liebler <stli@linux.ibm.com> Signed-off-by: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm/mmap.c')
-rw-r--r--arch/s390/mm/mmap.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index fc9a7dc26c5e..b14fc0887654 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -71,6 +71,15 @@ static inline unsigned long mmap_base(unsigned long rnd,
return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
+static int get_align_mask(struct file *filp, unsigned long flags)
+{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
+ if (filp || (flags & MAP_SHARED))
+ return MMAP_ALIGN_MASK << PAGE_SHIFT;
+ return 0;
+}
+
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
@@ -97,10 +106,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
- if (filp || (flags & MAP_SHARED))
- info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
- else
- info.align_mask = 0;
+ info.align_mask = get_align_mask(filp, flags);
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if (offset_in_page(addr))
@@ -138,10 +144,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
- if (filp || (flags & MAP_SHARED))
- info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
- else
- info.align_mask = 0;
+ info.align_mask = get_align_mask(filp, flags);
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);