summaryrefslogtreecommitdiff
path: root/arch/arm/mm/ioremap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 17:35:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 17:35:57 -0700
commit9c0e6a89b592f4c4e4d769dbc22d399ab0685159 (patch)
tree54865d08ede844e868b3403670a9a91ad24bba82 /arch/arm/mm/ioremap.c
parente6aef3496a00a12e78a571f61d98300cf0a86e6a (diff)
parent234a0f202a09a6144fd3c17ac6d018bdab9780bb (diff)
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Updates for IRQ stacks and virtually mapped stack support, and ftrace: - Support for IRQ and vmap'ed stacks This covers all the work related to implementing IRQ stacks and vmap'ed stacks for all 32-bit ARM systems that are currently supported by the Linux kernel, including RiscPC and Footbridge. It has been submitted for review in four different waves: - IRQ stacks support for v7 SMP systems [0] - vmap'ed stacks support for v7 SMP systems[1] - extending support for both IRQ stacks and vmap'ed stacks for all remaining configurations, including v6/v7 SMP multiplatform kernels and uniprocessor configurations including v7-M [2] - fixes and updates in [3] - ftrace fixes and cleanups Make all flavors of ftrace available on all builds, regardless of ISA choice, unwinder choice or compiler [4]: - use ADD not POP where possible - fix a couple of Thumb2 related issues - enable HAVE_FUNCTION_GRAPH_FP_TEST for robustness - enable the graph tracer with the EABI unwinder - avoid clobbering frame pointer registers to make Clang happy - Fixes for the above" [0] https://lore.kernel.org/linux-arm-kernel/20211115084732.3704393-1-ardb@kernel.org/ [1] https://lore.kernel.org/linux-arm-kernel/20211122092816.2865873-1-ardb@kernel.org/ [2] https://lore.kernel.org/linux-arm-kernel/20211206164659.1495084-1-ardb@kernel.org/ [3] https://lore.kernel.org/linux-arm-kernel/20220124174744.1054712-1-ardb@kernel.org/ [4] https://lore.kernel.org/linux-arm-kernel/20220203082204.1176734-1-ardb@kernel.org/ * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (62 commits) ARM: fix building NOMMU ARMv4/v5 kernels ARM: unwind: only permit stack switch when unwinding call_with_stack() ARM: Revert "unwind: dump exception stack from calling frame" ARM: entry: fix unwinder problems caused by IRQ stacks ARM: unwind: set frame.pc correctly for current-thread unwinding ARM: 9184/1: return_address: disable again for CONFIG_ARM_UNWIND=y ARM: 9183/1: unwind: avoid spurious warnings on bogus code addresses Revert "ARM: 9144/1: forbid ftrace with clang and thumb2_kernel" ARM: mach-bcm: disable ftrace in SMC invocation routines ARM: cacheflush: avoid clobbering the frame pointer ARM: kprobes: treat R7 as the frame pointer register in Thumb2 builds ARM: ftrace: enable the graph tracer with the EABI unwinder ARM: unwind: track location of LR value in stack frame ARM: ftrace: enable HAVE_FUNCTION_GRAPH_FP_TEST ARM: ftrace: avoid unnecessary literal loads ARM: ftrace: avoid redundant loads or clobbering IP ARM: ftrace: use trampolines to keep .init.text in branching range ARM: ftrace: use ADD not POP to counter PUSH at entry ARM: ftrace: ensure that ADR takes the Thumb bit into account ARM: make get_current() and __my_cpu_offset() __always_inline ...
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r--arch/arm/mm/ioremap.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 197f8eb3a775..aa08bcb72db9 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -117,16 +117,21 @@ EXPORT_SYMBOL(ioremap_page);
void __check_vmalloc_seq(struct mm_struct *mm)
{
- unsigned int seq;
+ int seq;
do {
- seq = init_mm.context.vmalloc_seq;
+ seq = atomic_read(&init_mm.context.vmalloc_seq);
memcpy(pgd_offset(mm, VMALLOC_START),
pgd_offset_k(VMALLOC_START),
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
pgd_index(VMALLOC_START)));
- mm->context.vmalloc_seq = seq;
- } while (seq != init_mm.context.vmalloc_seq);
+ /*
+ * Use a store-release so that other CPUs that observe the
+ * counter's new value are guaranteed to see the results of the
+ * memcpy as well.
+ */
+ atomic_set_release(&mm->context.vmalloc_seq, seq);
+ } while (seq != atomic_read(&init_mm.context.vmalloc_seq));
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -157,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
- init_mm.context.vmalloc_seq++;
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
/*
* Free the page table, if there was one.
@@ -174,8 +179,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
- if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
- __check_vmalloc_seq(current->active_mm);
+ check_vmalloc_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}