summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/cacheflush.h')
-rw-r--r--arch/arm/include/asm/cacheflush.h117
1 files changed, 77 insertions, 40 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 17d0ae8672fa..8ed8b9a24efe 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/cacheflush.h
*
* Copyright (C) 1999-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_CACHEFLUSH_H
#define _ASMARM_CACHEFLUSH_H
@@ -35,7 +32,7 @@
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
- * See Documentation/cachetlb.txt for more information.
+ * See Documentation/core-api/cachetlb.rst for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
@@ -116,7 +113,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
-};
+} __no_randomize_layout;
/*
* Select the calling method
@@ -140,8 +137,6 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_map_area cpu_cache.dma_map_area
-#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -161,8 +156,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -212,6 +205,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
+ dsb(ishst);
}
/*
@@ -237,14 +231,15 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
vma->vm_flags);
}
-static inline void
-vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
struct mm_struct *mm = vma->vm_mm;
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
- __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
+ vma->vm_flags);
}
}
@@ -253,23 +248,24 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
vivt_flush_cache_range(vma,start,end)
-#define flush_cache_page(vma,addr,pfn) \
- vivt_flush_cache_page(vma,addr,pfn)
+#define flush_cache_pages(vma, addr, pfn, nr) \
+ vivt_flush_cache_pages(vma, addr, pfn, nr)
#else
-extern void flush_cache_mm(struct mm_struct *mm);
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn, unsigned int nr);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
/*
- * flush_cache_user_range is used when we want to ensure that the
+ * flush_icache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(start,end) \
- __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -287,7 +283,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
- * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * If this page isn't mapped (ie, folio_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping.
*
@@ -296,8 +292,11 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* See update_mmu_cache for the user space part.
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *);
+void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
+#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
{
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -319,22 +318,8 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
__flush_anon_page(vma, page, vmaddr);
}
-#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-extern void flush_kernel_dcache_page(struct page *);
-
-#define flush_dcache_mmap_lock(mapping) \
- spin_lock_irq(&(mapping)->tree_lock)
-#define flush_dcache_mmap_unlock(mapping) \
- spin_unlock_irq(&(mapping)->tree_lock)
-
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
-/*
- * We don't appear to need to do anything here. In fact, if we did, we'd
- * duplicate cache flushing elsewhere performed by flush_dcache_page().
- */
-#define flush_icache_page(vma,page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
@@ -352,9 +337,11 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
- dsb();
+ dsb(ishst);
}
+#define flush_cache_vmap_early(start, end) do { } while (0)
+
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
@@ -436,4 +423,54 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ * however some exceptions may exist. Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ */
+#define v7_exit_coherency_flush(level) \
+ asm volatile( \
+ ".arch armv7-a \n\t" \
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
+ "bic r0, r0, #"__stringify(CR_C)" \n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
+ "isb \n\t" \
+ "bl v7_flush_dcache_"__stringify(level)" \n\t" \
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
+ "isb \n\t" \
+ "dsb" \
+ : : : "r0","r1","r2","r3","r4","r5","r6", \
+ "r9","r10","ip","lr","memory" )
+
+void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
+ void *kaddr, unsigned long len);
+
+
+#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
+void check_cpu_icache_size(int cpuid);
+#else
+static inline void check_cpu_icache_size(int cpuid) { }
+#endif
+
#endif