summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2021-11-24 14:08:11 +0100
committerArd Biesheuvel <ardb@kernel.org>2021-12-06 12:49:17 +0100
commit9c46929e7989efacc1dd0a1dd662a839897ea2b6 (patch)
treee55a231cce8f02eea3be6e7b19627c58bccdee52 /arch/arm/kernel
parentc2755910373bb5dfb9aa68ba2924036686815c9e (diff)
ARM: implement THREAD_INFO_IN_TASK for uniprocessor systems
On UP systems, only a single task can be 'current' at the same time, which means we can use a global variable to track it. This means we can also enable THREAD_INFO_IN_TASK for those systems, as in that case, thread_info is accessed via current rather than the other way around, removing the need to store thread_info at the base of the task stack. This, in turn, permits us to enable IRQ stacks and vmap'ed stacks on UP systems as well. To partially mitigate the performance overhead of this arrangement, use a ADD/ADD/LDR sequence with the appropriate PC-relative group relocations to load the value of current when needed. This means that accessing current will still only require a single load as before, avoiding the need for a literal to carry the address of the global variable in each function. However, accessing thread_info will now require this load as well. Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Nicolas Pitre <nico@fluxnic.net> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Tested-by: Marc Zyngier <maz@kernel.org> Tested-by: Vladimir Murzin <vladimir.murzin@arm.com> # ARMv7M
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/entry-armv.S11
-rw-r--r--arch/arm/kernel/entry-v7m.S10
-rw-r--r--arch/arm/kernel/head-common.S4
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/smp.c11
-rw-r--r--arch/arm/kernel/traps.c4
7 files changed, 34 insertions, 16 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 645845e4982a..2c8d76fd7c66 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -43,9 +43,6 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-#ifndef CONFIG_THREAD_INFO_IN_TASK
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
-#endif
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 43d917f0d9a9..b58bda51e4b8 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -807,12 +807,13 @@ ENTRY(__switch_to)
switch_tls r1, r4, r5, r3, r7
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
!defined(CONFIG_STACKPROTECTOR_PER_TASK)
- ldr r9, [r2, #TI_TASK]
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
- add r9, r9, #TSK_STACK_CANARY & ~IMM12_MASK
- .endif
+ add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
+ .else
+ ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
+ .endif
#endif
mov r7, r2 @ Preserve 'next'
#ifdef CONFIG_CPU_USE_DOMAINS
@@ -829,7 +830,7 @@ ENTRY(__switch_to)
#endif
mov r0, r5
#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
- set_current r7
+ set_current r7, r8
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
#else
mov r1, r7
@@ -851,7 +852,7 @@ ENTRY(__switch_to)
@ switches us to another stack, with few other side effects. In order
@ to prevent this distinction from causing any inconsistencies, let's
@ keep the 'set_current' call as close as we can to the update of SP.
- set_current r1
+ set_current r1, r2
mov sp, ip
ret lr
#endif
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 520dd43e7e08..4e0d318b67c6 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -97,15 +97,17 @@ ENTRY(__switch_to)
str sp, [ip], #4
str lr, [ip], #4
mov r5, r0
+ mov r6, r2 @ Preserve 'next'
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
- mov ip, r4
mov r0, r5
- ldmia ip!, {r4 - r11} @ Load all regs saved previously
- ldr sp, [ip]
- ldr pc, [ip, #4]!
+ mov r1, r6
+ ldmia r4, {r4 - r12, lr} @ Load all regs saved previously
+ set_current r1, r2
+ mov sp, ip
+ bx lr
.fnend
ENDPROC(__switch_to)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index da18e0a17dc2..42cae73fcc19 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -105,10 +105,8 @@ __mmap_switched:
mov r1, #0
bl __memset @ clear .bss
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
adr_l r0, init_task @ get swapper task_struct
- set_current r0
-#endif
+ set_current r0, r1
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d47159f3791c..0617af11377f 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -36,7 +36,7 @@
#include "signal.h"
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
DEFINE_PER_CPU(struct task_struct *, __entry_task);
#endif
@@ -46,6 +46,11 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
+#ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO
+asmlinkage struct task_struct *__current;
+EXPORT_SYMBOL(__current);
+#endif
+
static const char *processor_modes[] __maybe_unused = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 9c55ca915ba4..951559e5bea3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -403,6 +403,17 @@ static void smp_store_cpu_info(unsigned int cpuid)
check_cpu_icache_size(cpuid);
}
+static void set_current(struct task_struct *cur)
+{
+ if (!IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) && !is_smp()) {
+ __current = cur;
+ return;
+ }
+
+ /* Set TPIDRURO */
+ asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index b28a705c49cb..3f38357efc46 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -865,7 +865,9 @@ early_initcall(allocate_overflow_stacks);
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
+#ifdef CONFIG_IRQSTACKS
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
+#endif
unsigned long ovf_stk = (unsigned long)this_cpu_read(overflow_stack_ptr);
console_verbose();
@@ -873,8 +875,10 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
pr_emerg("Task stack: [0x%08lx..0x%08lx]\n",
tsk_stk, tsk_stk + THREAD_SIZE);
+#ifdef CONFIG_IRQSTACKS
pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n",
irq_stk - THREAD_SIZE, irq_stk);
+#endif
pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n",
ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk);