summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/fault.c
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2021-06-24 14:05:25 +0100
committerWill Deacon <will@kernel.org>2021-06-24 14:05:25 +0100
commitfdceddb06a5ff5ad3894cf9e8124d5af38ac5793 (patch)
tree1c0e225ad623ec5d6d02c42f400b2202f4567dc0 /arch/arm64/mm/fault.c
parent81ad4bb1fe91d28d793d801e462a284c7f82cc40 (diff)
parentc275c5c6d50a0518cdb0584e85905d10e7cefc6e (diff)
Merge branch 'for-next/mte' into for-next/core
KASAN optimisations for the hardware tagging (MTE) implementation. * for-next/mte: kasan: disable freed user page poisoning with HW tags arm64: mte: handle tags zeroing at page allocation time kasan: use separate (un)poison implementation for integrated init mm: arch: remove indirection level in alloc_zeroed_user_highpage_movable() kasan: speed up mte_set_mem_tag_range
Diffstat (limited to 'arch/arm64/mm/fault.c')
-rw-r--r--arch/arm64/mm/fault.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index d0f972a45099..349c488765ca 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -919,3 +919,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
+
+/*
+ * Used during anonymous page fault handling.
+ */
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+
+ /*
+ * If the page is mapped with PROT_MTE, initialise the tags at the
+ * point of allocation and page zeroing as this is usually faster than
+ * separate DC ZVA and STGM.
+ */
+ if (vma->vm_flags & VM_MTE)
+ flags |= __GFP_ZEROTAGS;
+
+ return alloc_page_vma(flags, vma, vaddr);
+}
+
+void tag_clear_highpage(struct page *page)
+{
+ mte_zero_clear_page_tags(page_address(page));
+ page_kasan_tag_reset(page);
+ set_bit(PG_mte_tagged, &page->flags);
+}