summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 17:35:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 17:35:57 -0700
commit9c0e6a89b592f4c4e4d769dbc22d399ab0685159 (patch)
tree54865d08ede844e868b3403670a9a91ad24bba82 /arch/arm/mm
parente6aef3496a00a12e78a571f61d98300cf0a86e6a (diff)
parent234a0f202a09a6144fd3c17ac6d018bdab9780bb (diff)
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Updates for IRQ stacks and virtually mapped stack support, and ftrace: - Support for IRQ and vmap'ed stacks This covers all the work related to implementing IRQ stacks and vmap'ed stacks for all 32-bit ARM systems that are currently supported by the Linux kernel, including RiscPC and Footbridge. It has been submitted for review in four different waves: - IRQ stacks support for v7 SMP systems [0] - vmap'ed stacks support for v7 SMP systems[1] - extending support for both IRQ stacks and vmap'ed stacks for all remaining configurations, including v6/v7 SMP multiplatform kernels and uniprocessor configurations including v7-M [2] - fixes and updates in [3] - ftrace fixes and cleanups Make all flavors of ftrace available on all builds, regardless of ISA choice, unwinder choice or compiler [4]: - use ADD not POP where possible - fix a couple of Thumb2 related issues - enable HAVE_FUNCTION_GRAPH_FP_TEST for robustness - enable the graph tracer with the EABI unwinder - avoid clobbering frame pointer registers to make Clang happy - Fixes for the above" [0] https://lore.kernel.org/linux-arm-kernel/20211115084732.3704393-1-ardb@kernel.org/ [1] https://lore.kernel.org/linux-arm-kernel/20211122092816.2865873-1-ardb@kernel.org/ [2] https://lore.kernel.org/linux-arm-kernel/20211206164659.1495084-1-ardb@kernel.org/ [3] https://lore.kernel.org/linux-arm-kernel/20220124174744.1054712-1-ardb@kernel.org/ [4] https://lore.kernel.org/linux-arm-kernel/20220203082204.1176734-1-ardb@kernel.org/ * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (62 commits) ARM: fix building NOMMU ARMv4/v5 kernels ARM: unwind: only permit stack switch when unwinding call_with_stack() ARM: Revert "unwind: dump exception stack from calling frame" ARM: entry: fix unwinder problems caused by IRQ stacks ARM: unwind: set frame.pc correctly for current-thread unwinding ARM: 9184/1: return_address: disable again for CONFIG_ARM_UNWIND=y ARM: 9183/1: unwind: avoid spurious warnings on bogus code addresses Revert "ARM: 9144/1: forbid ftrace with clang and thumb2_kernel" ARM: mach-bcm: disable ftrace in SMC invocation routines ARM: cacheflush: avoid clobbering the frame pointer ARM: kprobes: treat R7 as the frame pointer register in Thumb2 builds ARM: ftrace: enable the graph tracer with the EABI unwinder ARM: unwind: track location of LR value in stack frame ARM: ftrace: enable HAVE_FUNCTION_GRAPH_FP_TEST ARM: ftrace: avoid unnecessary literal loads ARM: ftrace: avoid redundant loads or clobbering IP ARM: ftrace: use trampolines to keep .init.text in branching range ARM: ftrace: use ADD not POP to counter PUSH at entry ARM: ftrace: ensure that ADR takes the Thumb bit into account ARM: make get_current() and __my_cpu_offset() __always_inline ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/cache-v7.S40
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/ioremap.c18
4 files changed, 31 insertions, 31 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9724c16e9076..d30ee26ccc87 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -386,6 +386,7 @@ config CPU_V6
select CPU_PABRT_V6
select CPU_THUMB_CAPABLE
select CPU_TLB_V6 if MMU
+ select SMP_ON_UP if SMP
# ARMv6k
config CPU_V6K
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 830bbfb26ca5..7c9499b728c4 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -90,7 +90,7 @@ ENDPROC(v7_flush_icache_all)
*
* Flush the D-cache up to the Level of Unification Inner Shareable
*
- * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ * Corrupted registers: r0-r6, r9-r10
*/
ENTRY(v7_flush_dcache_louis)
@@ -117,7 +117,7 @@ ENDPROC(v7_flush_dcache_louis)
*
* Flush the whole D-cache.
*
- * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ * Corrupted registers: r0-r6, r9-r10
*
* - mm - mm_struct describing address space
*/
@@ -149,22 +149,22 @@ flush_levels:
movw r4, #0x3ff
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
clz r5, r4 @ find bit position of way size increment
- movw r7, #0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
+ movw r6, #0x7fff
+ and r1, r6, r1, lsr #13 @ extract max number of the index size
+ mov r6, #1
+ movne r4, r4, lsl r5 @ # of ways shifted into bits [31:...]
+ movne r6, r6, lsl r5 @ 1 shifted left by same amount
loop1:
- mov r9, r7 @ create working copy of max index
+ mov r9, r1 @ create working copy of max index
loop2:
- ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
- THUMB( lsl r6, r4, r5 )
- THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
- ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
- THUMB( lsl r6, r9, r2 )
- THUMB( orr r11, r11, r6 ) @ factor index number into r11
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+ mov r5, r9, lsl r2 @ factor set number into r5
+ orr r5, r5, r4 @ factor way number into r5
+ orr r5, r5, r10 @ factor cache level into r5
+ mcr p15, 0, r5, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the index
bge loop2
- subs r4, r4, #1 @ decrement the way
- bge loop1
+ subs r4, r4, r6 @ decrement the way
+ bcs loop1
skip:
add r10, r10, #2 @ increment cache number
cmp r3, r10
@@ -192,14 +192,12 @@ ENDPROC(v7_flush_dcache_all)
*
*/
ENTRY(v7_flush_kern_cache_all)
- ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_all
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
- ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
ENDPROC(v7_flush_kern_cache_all)
@@ -210,14 +208,12 @@ ENDPROC(v7_flush_kern_cache_all)
* Invalidate the I-cache to the point of unification.
*/
ENTRY(v7_flush_kern_cache_louis)
- ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_louis
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
- ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
- THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
ENDPROC(v7_flush_kern_cache_louis)
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 48091870db89..4204ffa2d104 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -240,8 +240,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
unsigned int cpu = smp_processor_id();
u64 asid;
- if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
- __check_vmalloc_seq(mm);
+ check_vmalloc_seq(mm);
/*
* We cannot update the pgd and the ASID atomicly with classic
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 197f8eb3a775..aa08bcb72db9 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -117,16 +117,21 @@ EXPORT_SYMBOL(ioremap_page);
void __check_vmalloc_seq(struct mm_struct *mm)
{
- unsigned int seq;
+ int seq;
do {
- seq = init_mm.context.vmalloc_seq;
+ seq = atomic_read(&init_mm.context.vmalloc_seq);
memcpy(pgd_offset(mm, VMALLOC_START),
pgd_offset_k(VMALLOC_START),
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
pgd_index(VMALLOC_START)));
- mm->context.vmalloc_seq = seq;
- } while (seq != init_mm.context.vmalloc_seq);
+ /*
+ * Use a store-release so that other CPUs that observe the
+ * counter's new value are guaranteed to see the results of the
+ * memcpy as well.
+ */
+ atomic_set_release(&mm->context.vmalloc_seq, seq);
+ } while (seq != atomic_read(&init_mm.context.vmalloc_seq));
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -157,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
- init_mm.context.vmalloc_seq++;
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
/*
* Free the page table, if there was one.
@@ -174,8 +179,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
- if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
- __check_vmalloc_seq(current->active_mm);
+ check_vmalloc_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}