summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2024-04-18 12:13:38 +0100
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2024-04-18 12:13:38 +0100
commit1add2e44a39028a9acf54587dcd0ad7084fcc2fa (patch)
tree9dd03a06734a3a9c65ae477a355607b08548d4f9
parentaff00427579d4c915ee92553f712e4c632185e6e (diff)
parenta9a058031cfb7e1fcd7a03a4f7228179a0426cb4 (diff)
Merge branches 'amba' and 'misc' into for-nextfor-next
-rw-r--r--arch/arm/Kconfig22
-rw-r--r--arch/arm/include/asm/assembler.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h26
-rw-r--r--arch/arm/include/asm/proc-fns.h12
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/uaccess-asm.h58
-rw-r--r--arch/arm/include/asm/uaccess.h45
-rw-r--r--arch/arm/kernel/asm-offsets.c1
-rw-r--r--arch/arm/kernel/entry-ftrace.S4
-rw-r--r--arch/arm/kernel/suspend.c8
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S20
-rw-r--r--arch/arm/mm/fault.c29
-rw-r--r--arch/arm/mm/mmu.c7
13 files changed, 216 insertions, 18 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b14aed3a17ab..1394a8491210 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1233,9 +1233,9 @@ config HIGHPTE
consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory.
-config CPU_SW_DOMAIN_PAN
- bool "Enable use of CPU domains to implement privileged no-access"
- depends on MMU && !ARM_LPAE
+config ARM_PAN
+ bool "Enable privileged no-access"
+ depends on MMU
default y
help
Increase kernel security by ensuring that normal kernel accesses
@@ -1244,10 +1244,26 @@ config CPU_SW_DOMAIN_PAN
by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced.
+ The implementation uses CPU domains when !CONFIG_ARM_LPAE and
+ disabling of TTBR0 page table walks with CONFIG_ARM_LPAE.
+
+config CPU_SW_DOMAIN_PAN
+ def_bool y
+ depends on ARM_PAN && !ARM_LPAE
+ help
+ Enable use of CPU domains to implement privileged no-access.
+
CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible.
+config CPU_TTBR0_PAN
+ def_bool y
+ depends on ARM_PAN && ARM_LPAE
+ help
+ Enable privileged no-access by disabling TTBR0 page table walks when
+ running in kernel mode.
+
config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index aebe2c8f6a68..d33c1e24e00b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -21,6 +21,7 @@
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/thread_info.h>
#include <asm/uaccess-asm.h>
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 2f35b4eddaa8..323ad811732e 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -74,6 +74,7 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+#ifndef CONFIG_CPU_TTBR0_PAN
/*
* TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
@@ -93,5 +94,30 @@
#endif
#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+#else
+/*
+ * With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess
+ * disabled regions when TTBR0 is disabled.
+ */
+#define TTBR1_OFFSET 0 /* pointing to swapper_pg_dir */
+#define TTBR1_SIZE 0 /* TTBR1 size controlled via TTBCR.T0SZ */
+#endif
+
+/*
+ * TTBCR register bits.
+ */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1_MASK (3 << 28)
+#define TTBCR_ORGN1_MASK (3 << 26)
+#define TTBCR_IRGN1_MASK (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ_MASK (7 << 16)
+#define TTBCR_SH0_MASK (3 << 12)
+#define TTBCR_ORGN0_MASK (3 << 10)
+#define TTBCR_IRGN0_MASK (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ_MASK (7 << 0)
#endif
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 280396483f5d..9b3105a2a5e0 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -178,6 +178,18 @@ extern void cpu_resume(void);
})
#endif
+static inline unsigned int cpu_get_ttbcr(void)
+{
+ unsigned int ttbcr;
+ asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
+ return ttbcr;
+}
+
+static inline void cpu_set_ttbcr(unsigned int ttbcr)
+{
+ asm("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
+}
+
#else /*!CONFIG_MMU */
#define cpu_switch_mm(pgd,mm) { }
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 14a38cc67e0b..6eb311fb2da0 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -20,6 +20,7 @@ struct pt_regs {
struct svc_pt_regs {
struct pt_regs regs;
u32 dacr;
+ u32 ttbcr;
};
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
index 65da32e1f1c1..4bccd895d954 100644
--- a/arch/arm/include/asm/uaccess-asm.h
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -39,8 +39,9 @@
#endif
.endm
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
.macro uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
@@ -50,11 +51,9 @@
.if \isb
instr_sync
.endif
-#endif
.endm
.macro uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
@@ -64,15 +63,61 @@
.if \isb
instr_sync
.endif
-#endif
.endm
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro uaccess_disable, tmp, isb=1
+ /*
+ * Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
+ * from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
+ * addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ orr \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ bic \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+#else
+
+ .macro uaccess_disable, tmp, isb=1
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ .endm
+
+#endif
+
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
#define DACR(x...) x
#else
#define DACR(x...)
#endif
+#ifdef CONFIG_CPU_TTBR0_PAN
+#define PAN(x...) x
+#else
+#define PAN(x...)
+#endif
+
/*
* Save the address limit on entry to a privileged exception.
*
@@ -86,6 +131,8 @@
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR])
+ PAN( mrc p15, 0, \tmp0, c2, c0, 2)
+ PAN( str \tmp0, [sp, #SVC_TTBCR])
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
/* kernel=client, user=no access */
mov \tmp2, #DACR_UACCESS_DISABLE
@@ -104,8 +151,11 @@
.macro uaccess_exit, tsk, tmp0, tmp1
DACR( ldr \tmp0, [sp, #SVC_DACR])
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ PAN( ldr \tmp0, [sp, #SVC_TTBCR])
+ PAN( mcr p15, 0, \tmp0, c2, c0, 2)
.endm
#undef DACR
+#undef PAN
#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 9556d04387f7..25d21d7d6e3e 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -14,6 +14,8 @@
#include <asm/domain.h>
#include <asm/unaligned.h>
#include <asm/unified.h>
+#include <asm/pgtable.h>
+#include <asm/proc-fns.h>
#include <asm/compiler.h>
#include <asm/extable.h>
@@ -24,9 +26,10 @@
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
static __always_inline unsigned int uaccess_save_and_enable(void)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */
@@ -34,19 +37,49 @@ static __always_inline unsigned int uaccess_save_and_enable(void)
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain;
-#else
- return 0;
-#endif
}
static __always_inline void uaccess_restore(unsigned int flags)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain(flags);
-#endif
}
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+static inline unsigned int uaccess_save_and_enable(void)
+{
+ unsigned int old_ttbcr = cpu_get_ttbcr();
+
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK));
+ isb();
+
+ return old_ttbcr;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+ cpu_set_ttbcr(flags);
+ isb();
+}
+
+#else
+
+static inline unsigned int uaccess_save_and_enable(void)
+{
+ return 0;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+}
+
+#endif
+
/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 4915662842ff..4853875740d0 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -85,6 +85,7 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
+ DEFINE(SVC_TTBCR, offsetof(struct svc_pt_regs, ttbcr));
DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK();
DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3]));
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index 3e7bcaca5e07..bc598e3d8dd2 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -271,6 +271,10 @@ ENTRY(ftrace_stub)
ret lr
ENDPROC(ftrace_stub)
+ENTRY(ftrace_stub_graph)
+ ret lr
+ENDPROC(ftrace_stub_graph)
+
#ifdef CONFIG_DYNAMIC_FTRACE
__INIT
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index c3ec3861dd07..58a6441b58c4 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -12,6 +12,7 @@
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void);
@@ -27,6 +28,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return -EINVAL;
/*
+ * Needed for the MMU disabling/enabing code to be able to run from
+ * TTBR0 addresses.
+ */
+ if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN))
+ uaccess_save_and_enable();
+
+ /*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 6928781e6bee..c289bde04743 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -13,7 +13,8 @@
.text
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
@@ -25,7 +26,23 @@
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro save_regs
+ mrc p15, 0, ip, c2, c0, 2 @ read TTBCR
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c2, c0, 2 @ restore TTBCR
+ ret lr
+ .endm
+
#else
+
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -33,6 +50,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+
#endif
.macro load1b, reg1
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 439dc6a26bb9..dfa9554ef331 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -242,6 +242,27 @@ static inline bool is_permission_fault(unsigned int fsr)
return false;
}
+#ifdef CONFIG_CPU_TTBR0_PAN
+static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
+{
+ struct svc_pt_regs *svcregs;
+
+ /* If we are in user mode: permission granted */
+ if (user_mode(regs))
+ return true;
+
+ /* uaccess state saved above pt_regs on SVC exception entry */
+ svcregs = to_svc_pt_regs(regs);
+
+ return !(svcregs->ttbcr & TTBCR_EPD0);
+}
+#else
+static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
+{
+ return true;
+}
+#endif
+
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -285,6 +306,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ /*
+ * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are
+ * routed via the translation fault mechanism. Check whether uaccess
+ * is disabled while in kernel mode.
+ */
+ if (!ttbr0_usermode_access_allowed(regs))
+ goto no_context;
+
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c24e29c0b9a4..3f774856ca67 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1687,9 +1687,8 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
*/
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
- asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
- asm volatile("mcr p15, 0, %0, c2, c0, 2"
- : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
+ ttbcr = cpu_get_ttbcr();
+ cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10));
flush_cache_all();
/*
@@ -1701,7 +1700,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */
- asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
+ cpu_set_ttbcr(ttbcr);
set_cr(cr);
}