summaryrefslogtreecommitdiff
path: root/arch/csky/abiv2/cacheflush.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/abiv2/cacheflush.c')
-rw-r--r--arch/csky/abiv2/cacheflush.c91
1 files changed, 66 insertions, 25 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 5bb887b275e1..876028b1083f 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -5,47 +5,88 @@
#include <linux/highmem.h>
#include <linux/mm.h>
#include <asm/cache.h>
+#include <asm/tlbflush.h>
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte, unsigned int nr)
{
- unsigned long start;
+ unsigned long pfn = pte_pfn(*pte);
+ struct folio *folio;
+ unsigned int i;
- start = (unsigned long) kmap_atomic(page);
+ flush_tlb_page(vma, address);
- cache_wbinv_range(start, start + PAGE_SIZE);
+ if (!pfn_valid(pfn))
+ return;
- kunmap_atomic((void *)start);
-}
+ folio = page_folio(pfn_to_page(pfn));
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
- unsigned long vaddr, int len)
-{
- unsigned long kaddr;
+ if (test_and_set_bit(PG_dcache_clean, &folio->flags))
+ return;
+
+ icache_inv_range(address, address + nr*PAGE_SIZE);
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ unsigned long addr = (unsigned long) kmap_local_folio(folio,
+ i * PAGE_SIZE);
- kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
+ dcache_wb_range(addr, addr + PAGE_SIZE);
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_range(addr, addr + PAGE_SIZE);
+ kunmap_local((void *) addr);
+ }
+}
- cache_wbinv_range(kaddr, kaddr + len);
+void flush_icache_deferred(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+ cpumask_t *mask = &mm->context.icache_stale_mask;
- kunmap_atomic((void *)kaddr);
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ /*
+ * Ensure the remote hart's writes are visible to this hart.
+ * This pairs with a barrier in flush_icache_mm.
+ */
+ smp_mb();
+ local_icache_inv_all(NULL);
+ }
}
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *pte)
+void flush_icache_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
- unsigned long addr, pfn;
- struct page *page;
+ unsigned int cpu;
+ cpumask_t others, *mask;
- pfn = pte_pfn(*pte);
- if (unlikely(!pfn_valid(pfn)))
- return;
+ preempt_disable();
- page = pfn_to_page(pfn);
- if (page == ZERO_PAGE(0))
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
+ if (mm == current->mm) {
+ icache_inv_range(start, end);
+ preempt_enable();
return;
+ }
+#endif
+
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
+
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_icache_inv_all(NULL);
- addr = (unsigned long) kmap_atomic(page);
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
- cache_wbinv_range(addr, addr + PAGE_SIZE);
+ if (mm != current->active_mm || !cpumask_empty(&others)) {
+ on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
+ cpumask_clear(mask);
+ }
- kunmap_atomic((void *) addr);
+ preempt_enable();
}