summaryrefslogtreecommitdiff
path: root/arch/csky/abiv1/cacheflush.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/abiv1/cacheflush.c')
-rw-r--r--arch/csky/abiv1/cacheflush.c73
1 files changed, 48 insertions, 25 deletions
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c
index 10af8b6fe322..4bc0aad3cf8a 100644
--- a/arch/csky/abiv1/cacheflush.c
+++ b/arch/csky/abiv1/cacheflush.c
@@ -4,49 +4,72 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
+#include <asm/tlbflush.h>
-void flush_dcache_page(struct page *page)
+#define PG_dcache_clean PG_arch_1
+
+void flush_dcache_folio(struct folio *folio)
{
- struct address_space *mapping = page_mapping(page);
- unsigned long addr;
+ struct address_space *mapping;
- if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_arch_1, &(page)->flags);
+ if (is_zero_pfn(folio_pfn(folio)))
return;
+
+ mapping = folio_flush_mapping(folio);
+
+ if (mapping && !folio_mapped(folio))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
+ else {
+ dcache_wbinv_all();
+ if (mapping)
+ icache_inv_all();
+ set_bit(PG_dcache_clean, &folio->flags.f);
}
+}
+EXPORT_SYMBOL(flush_dcache_folio);
- /*
- * We could delay the flush for the !page_mapping case too. But that
- * case is for exec env/arg pages and those are %99 certainly going to
- * get faulted into the tlb (and thus flushed) anyways.
- */
- addr = (unsigned long) page_address(page);
- dcache_wb_range(addr, addr + PAGE_SIZE);
+void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
}
+EXPORT_SYMBOL(flush_dcache_page);
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *pte)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
{
- unsigned long addr;
- struct page *page;
- unsigned long pfn;
+ unsigned long pfn = pte_pfn(*ptep);
+ struct folio *folio;
- pfn = pte_pfn(*pte);
- if (unlikely(!pfn_valid(pfn)))
+ flush_tlb_page(vma, addr);
+
+ if (!pfn_valid(pfn))
+ return;
+
+ if (is_zero_pfn(pfn))
return;
- page = pfn_to_page(pfn);
- addr = (unsigned long) page_address(page);
+ folio = page_folio(pfn_to_page(pfn));
+ if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f))
+ dcache_wbinv_all();
- if (vma->vm_flags & VM_EXEC ||
- pages_do_alias(addr, address & PAGE_MASK))
- cache_wbinv_all();
+ if (folio_flush_mapping(folio)) {
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_all();
+ }
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ dcache_wbinv_all();
- clear_bit(PG_arch_1, &(page)->flags);
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_all();
}