summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/fault.c')
-rw-r--r--arch/arm64/mm/fault.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5fe6d2e40e9b..efb7b2cbead5 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,6 +40,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/kasan.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr);
}
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= VA_START;
+}
+
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
@@ -141,7 +154,7 @@ void show_pte(unsigned long addr)
pgd_t *pgdp;
pgd_t pgd;
- if (addr < TASK_SIZE) {
+ if (is_ttbr0_addr(addr)) {
/* TTBR0 */
mm = current->active_mm;
if (mm == &init_mm) {
@@ -149,7 +162,7 @@ void show_pte(unsigned long addr)
addr);
return;
}
- } else if (addr >= VA_START) {
+ } else if (is_ttbr1_addr(addr)) {
/* TTBR1 */
mm = &init_mm;
} else {
@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
- if (addr < TASK_SIZE && system_uses_ttbr0_pan())
+ if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
* type", so we ignore this wrinkle and just return the translation
* fault.)
*/
- if (current->thread.fault_address >= TASK_SIZE) {
+ if (!is_ttbr0_addr(current->thread.fault_address)) {
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_LOW:
/*
@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) {
+ if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS",
@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
- if (addr < TASK_SIZE)
+ if (is_ttbr0_addr(addr))
return do_page_fault(addr, esr, regs);
do_bad_area(addr, esr, regs);
@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
- if (addr > TASK_SIZE)
+ if (!is_ttbr0_addr(addr))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs)
{
if (user_mode(regs)) {
- if (instruction_pointer(regs) > TASK_SIZE)
+ if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
}
@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs))
trace_hardirqs_off();
- if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+ if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
if (!inf->fn(addr, esr, regs)) {