summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/mmap.c')
-rw-r--r--arch/arm64/mm/mmap.c205
1 files changed, 94 insertions, 111 deletions
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 8ed6cb1a900f..08ee177432c2 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -1,142 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/mm/mmap.c
*
* Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/elf.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/export.h>
-#include <linux/shm.h>
-#include <linux/sched.h>
#include <linux/io.h>
-#include <linux/personality.h>
-#include <linux/random.h>
-
-#include <asm/cputype.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#include <asm/cpufeature.h>
+#include <asm/page.h>
+
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_READONLY,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
/*
- * Leave enough space between the mmap area and the stack to honour ulimit in
- * the face of randomisation.
+ * You really shouldn't be using read() or write() on /dev/mem. This might go
+ * away in the future.
*/
-#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
-#define MAX_GAP (STACK_TOP/6*5)
-
-static int mmap_is_legacy(void)
+int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
+ /*
+ * Check whether addr is covered by a memory region without the
+ * MEMBLOCK_NOMAP attribute, and whether that region covers the
+ * entire range. In theory, this could lead to false negatives
+ * if the range is covered by distinct but adjacent memory regions
+ * that only differ in other attributes. However, few of such
+ * attributes have been defined, and it is debatable whether it
+ * follows that /dev/mem read() calls should be able traverse
+ * such boundaries.
+ */
+ return memblock_is_region_memory(addr, size) &&
+ memblock_is_map_memory(addr);
}
/*
- * Since get_random_int() returns the same value within a 1 jiffy window, we
- * will almost always get the same randomisation for the stack and mmap
- * region. This will mean the relative distance between stack and mmap will be
- * the same.
- *
- * To avoid this we can shift the randomness by 1 bit.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
*/
-static unsigned long mmap_rnd(void)
-{
- unsigned long rnd = 0;
-
- if (current->flags & PF_RANDOMIZE)
- rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
-
- return rnd << (PAGE_SHIFT + 1);
-}
-
-static unsigned long mmap_base(void)
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
- unsigned long gap = rlimit(RLIMIT_STACK);
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
+ return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
}
-/*
- * This function, called very early during the creation of a new process VM
- * image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
+static int __init adjust_protection_map(void)
{
/*
- * Fall back to the standard layout if the personality bit is set, or
- * if the expected stack growth is unlimited:
+ * With Enhanced PAN we can honour the execute-only permissions as
+ * there is no PAN override with such mappings.
*/
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ if (cpus_have_cap(ARM64_HAS_EPAN)) {
+ protection_map[VM_EXEC] = PAGE_EXECONLY;
+ protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
}
-}
-EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
-
-/*
- * You really shouldn't be using read() or write() on /dev/mem. This might go
- * away in the future.
- */
-int valid_phys_addr_range(unsigned long addr, size_t size)
-{
- if (addr < PHYS_OFFSET)
- return 0;
- if (addr + size > __pa(high_memory - 1) + 1)
- return 0;
+ if (lpa2_is_enabled())
+ for (int i = 0; i < ARRAY_SIZE(protection_map); i++)
+ pgprot_val(protection_map[i]) &= ~PTE_SHARED;
- return 1;
+ return 0;
}
+arch_initcall(adjust_protection_map);
-/*
- * Do not allow /dev/mem mappings beyond the supported physical range.
- */
-int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
- return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
-}
+ ptdesc_t prot;
-#ifdef CONFIG_STRICT_DEVMEM
+ /* Short circuit GCS to avoid bloating the table. */
+ if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) {
+ prot = _PAGE_GCS_RO;
+ } else {
+ prot = pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+ }
-#include <linux/ioport.h>
-
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number. We mimic x86 here by
- * disallowing access to system RAM as well as device-exclusive MMIO regions.
- * This effectively disable read()/write() on /dev/mem.
- */
-int devmem_is_allowed(unsigned long pfn)
-{
- if (iomem_is_exclusive(pfn << PAGE_SHIFT))
- return 0;
- if (!page_is_ram(pfn))
- return 1;
- return 0;
-}
+ if (vm_flags & VM_ARM64_BTI)
+ prot |= PTE_GP;
+ /*
+ * There are two conditions required for returning a Normal Tagged
+ * memory type: (1) the user requested it via PROT_MTE passed to
+ * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+ * register (1) as VM_MTE in the vma->vm_flags and (2) as
+ * VM_MTE_ALLOWED. Note that the latter can only be set during the
+ * mmap() call since mprotect() does not accept MAP_* flags.
+ * Checking for VM_MTE only is sufficient since arch_validate_flags()
+ * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+ */
+ if (vm_flags & VM_MTE)
+ prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+#ifdef CONFIG_ARCH_HAS_PKEYS
+ if (system_supports_poe()) {
+ if (vm_flags & VM_PKEY_BIT0)
+ prot |= PTE_PO_IDX_0;
+ if (vm_flags & VM_PKEY_BIT1)
+ prot |= PTE_PO_IDX_1;
+ if (vm_flags & VM_PKEY_BIT2)
+ prot |= PTE_PO_IDX_2;
+ }
#endif
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);