summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/fault.c')
-rw-r--r--arch/arm64/mm/fault.c227
1 files changed, 138 insertions, 89 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 451ba7cbd5ad..be9dab2c7d6a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -23,6 +23,7 @@
#include <linux/sched/debug.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <linux/pkeys.h>
#include <linux/preempt.h>
#include <linux/hugetlb.h>
@@ -52,18 +53,12 @@ struct fault_info {
};
static const struct fault_info fault_info[];
-static struct fault_info debug_fault_info[];
static inline const struct fault_info *esr_to_fault_info(unsigned long esr)
{
return fault_info + (esr & ESR_ELx_FSC);
}
-static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr)
-{
- return debug_fault_info + DBG_ESR_EVT(esr);
-}
-
static void data_abort_decode(unsigned long esr)
{
unsigned long iss2 = ESR_ELx_ISS2(esr);
@@ -238,9 +233,13 @@ int __ptep_set_access_flags(struct vm_area_struct *vma,
pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);
- /* Invalidate a stale read-only entry */
+ /*
+ * Invalidate the local stale read-only entry. Remote stale entries
+ * may still cause page faults and be invalidated via
+ * flush_tlb_fix_spurious_fault().
+ */
if (dirty)
- flush_tlb_page(vma, address);
+ local_flush_tlb_page(vma, address);
return 1;
}
@@ -374,7 +373,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
* Are we prepared to handle this kernel fault?
* We are almost certainly not prepared to handle instruction faults.
*/
- if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
+ if (!is_el1_instruction_abort(esr) && fixup_exception(regs, esr))
return;
if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
@@ -486,6 +485,43 @@ static void do_bad_area(unsigned long far, unsigned long esr,
}
}
+static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags)
+{
+ if (!system_supports_poe())
+ return false;
+
+ /*
+ * We do not check whether an Overlay fault has occurred because we
+ * cannot make a decision based solely on its value:
+ *
+ * - If Overlay is set, a fault did occur due to POE, but it may be
+ * spurious in those cases where we update POR_EL0 without ISB (e.g.
+ * on context-switch). We would then need to manually check POR_EL0
+ * against vma_pkey(vma), which is exactly what
+ * arch_vma_access_permitted() does.
+ *
+ * - If Overlay is not set, we may still need to report a pkey fault.
+ * This is the case if an access was made within a mapping but with no
+ * page mapped, and POR_EL0 forbids the access (according to
+ * vma_pkey()). Such access will result in a SIGSEGV regardless
+ * because core code checks arch_vma_access_permitted(), but in order
+ * to report the correct error code - SEGV_PKUERR - we must handle
+ * that case here.
+ */
+ return !arch_vma_access_permitted(vma,
+ mm_flags & FAULT_FLAG_WRITE,
+ mm_flags & FAULT_FLAG_INSTRUCTION,
+ false);
+}
+
+static bool is_gcs_fault(unsigned long esr)
+{
+ if (!esr_is_data_abort(esr))
+ return false;
+
+ return ESR_ELx_ISS2(esr) & ESR_ELx_GCS;
+}
+
static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
@@ -500,17 +536,35 @@ static bool is_write_abort(unsigned long esr)
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}
+static bool is_invalid_gcs_access(struct vm_area_struct *vma, u64 esr)
+{
+ if (!system_supports_gcs())
+ return false;
+
+ if (unlikely(is_gcs_fault(esr))) {
+ /* GCS accesses must be performed on a GCS page */
+ if (!(vma->vm_flags & VM_SHADOW_STACK))
+ return true;
+ } else if (unlikely(vma->vm_flags & VM_SHADOW_STACK)) {
+ /* Only GCS operations can write to a GCS page */
+ return esr_is_data_abort(esr) && is_write_abort(esr);
+ }
+
+ return false;
+}
+
static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
const struct fault_info *inf;
struct mm_struct *mm = current->mm;
vm_fault_t fault;
- unsigned long vm_flags;
+ vm_flags_t vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
int si_code;
+ int pkey = -1;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -535,6 +589,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
/* It was exec fault */
vm_flags = VM_EXEC;
mm_flags |= FAULT_FLAG_INSTRUCTION;
+ } else if (is_gcs_fault(esr)) {
+ /*
+ * The GCS permission on a page implies both read and
+ * write so always handle any GCS fault as a write fault,
+ * we need to trigger CoW even for GCS reads.
+ */
+ vm_flags = VM_WRITE;
+ mm_flags |= FAULT_FLAG_WRITE;
} else if (is_write_abort(esr)) {
/* It was write fault */
vm_flags = VM_WRITE;
@@ -554,7 +616,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
die_kernel_fault("execution of user memory",
addr, esr, regs);
- if (!search_exception_tables(regs->pc))
+ if (!insn_may_access_user(regs->pc, esr))
die_kernel_fault("access to user memory outside uaccess routines",
addr, esr, regs);
}
@@ -568,6 +630,13 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
if (!vma)
goto lock_mmap;
+ if (is_invalid_gcs_access(vma, esr)) {
+ vma_end_read(vma);
+ fault = 0;
+ si_code = SEGV_ACCERR;
+ goto bad_area;
+ }
+
if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma);
fault = 0;
@@ -575,6 +644,16 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto bad_area;
}
+
+ if (fault_from_pkey(vma, mm_flags)) {
+ pkey = vma_pkey(vma);
+ vma_end_read(vma);
+ fault = 0;
+ si_code = SEGV_PKUERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area;
+ }
+
fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
vma_end_read(vma);
@@ -610,7 +689,16 @@ retry:
goto bad_area;
}
+ if (fault_from_pkey(vma, mm_flags)) {
+ pkey = vma_pkey(vma);
+ mmap_read_unlock(mm);
+ fault = 0;
+ si_code = SEGV_PKUERR;
+ goto bad_area;
+ }
+
fault = handle_mm_fault(vma, addr, mm_flags, regs);
+
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
@@ -669,8 +757,23 @@ bad_area:
arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
} else {
+ /*
+ * The pkey value that we return to userspace can be different
+ * from the pkey that caused the fault.
+ *
+ * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
+ * 2. T1 : set POR_EL0 to deny access to pkey=4, touches, page
+ * 3. T1 : faults...
+ * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
+ * 5. T1 : enters fault handler, takes mmap_lock, etc...
+ * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
+ * faulted on a pte with its pkey=4.
+ */
/* Something tried to access memory that out of memory map */
- arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
+ if (si_code == SEGV_PKUERR)
+ arm64_force_sig_fault_pkey(far, inf->name, pkey);
+ else
+ arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
}
return 0;
@@ -733,6 +836,7 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
*/
siaddr = untagged_addr(far);
}
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
return 0;
@@ -744,9 +848,12 @@ static int do_tag_check_fault(unsigned long far, unsigned long esr,
/*
* The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
* for tag check faults. Set them to corresponding bits in the untagged
- * address.
+ * address if ARM64_MTE_FAR isn't supported.
+ * Otherwise, bits 63:60 of FAR_EL1 are not UNKNOWN.
*/
- far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
+ if (!cpus_have_cap(ARM64_MTE_FAR))
+ far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
+
do_bad_area(far, esr, regs);
return 0;
}
@@ -846,75 +953,6 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
NOKPROBE_SYMBOL(do_sp_pc_abort);
/*
- * __refdata because early_brk64 is __init, but the reference to it is
- * clobbered at arch_initcall time.
- * See traps.c and debug-monitors.c:debug_traps_init().
- */
-static struct fault_info __refdata debug_fault_info[] = {
- { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
- { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
- { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 3" },
- { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
- { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" },
- { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 7" },
-};
-
-void __init hook_debug_fault_code(int nr,
- int (*fn)(unsigned long, unsigned long, struct pt_regs *),
- int sig, int code, const char *name)
-{
- BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
-
- debug_fault_info[nr].fn = fn;
- debug_fault_info[nr].sig = sig;
- debug_fault_info[nr].code = code;
- debug_fault_info[nr].name = name;
-}
-
-/*
- * In debug exception context, we explicitly disable preemption despite
- * having interrupts disabled.
- * This serves two purposes: it makes it much less likely that we would
- * accidentally schedule in exception context and it will force a warning
- * if we somehow manage to schedule by accident.
- */
-static void debug_exception_enter(struct pt_regs *regs)
-{
- preempt_disable();
-
- /* This code is a bit fragile. Test it. */
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
-}
-NOKPROBE_SYMBOL(debug_exception_enter);
-
-static void debug_exception_exit(struct pt_regs *regs)
-{
- preempt_enable_no_resched();
-}
-NOKPROBE_SYMBOL(debug_exception_exit);
-
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
- struct pt_regs *regs)
-{
- const struct fault_info *inf = esr_to_debug_fault_info(esr);
- unsigned long pc = instruction_pointer(regs);
-
- debug_exception_enter(regs);
-
- if (user_mode(regs) && !is_ttbr0_addr(pc))
- arm64_apply_bp_hardening();
-
- if (inf->fn(addr_if_watchpoint, esr, regs)) {
- arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr);
- }
-
- debug_exception_exit(regs);
-}
-NOKPROBE_SYMBOL(do_debug_exception);
-
-/*
* Used during anonymous page fault handling.
*/
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
@@ -930,13 +968,24 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
if (vma->vm_flags & VM_MTE)
flags |= __GFP_ZEROTAGS;
- return vma_alloc_folio(flags, 0, vma, vaddr, false);
+ return vma_alloc_folio(flags, 0, vma, vaddr);
}
-void tag_clear_highpage(struct page *page)
+bool tag_clear_highpages(struct page *page, int numpages)
{
- /* Newly allocated page, shouldn't have been tagged yet */
- WARN_ON_ONCE(!try_page_mte_tagging(page));
- mte_zero_clear_page_tags(page_address(page));
- set_page_mte_tagged(page);
+ /*
+ * Check if MTE is supported and fall back to clear_highpage().
+ * get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
+ * post_alloc_hook() will invoke tag_clear_highpages().
+ */
+ if (!system_supports_mte())
+ return false;
+
+ /* Newly allocated pages, shouldn't have been tagged yet */
+ for (int i = 0; i < numpages; i++, page++) {
+ WARN_ON_ONCE(!try_page_mte_tagging(page));
+ mte_zero_clear_page_tags(page_address(page));
+ set_page_mte_tagged(page);
+ }
+ return true;
}