summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/include/asm/cacheflush.h31
-rw-r--r--arch/parisc/include/asm/page.h6
-rw-r--r--arch/parisc/kernel/cache.c326
3 files changed, 236 insertions, 127 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index e8b4a03343d3..8d03b3b26229 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -59,20 +59,12 @@ void flush_dcache_page(struct page *page);
flush_kernel_icache_range_asm(s,e); \
} while (0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { \
- flush_cache_page(vma, vaddr, page_to_pfn(page)); \
- memcpy(dst, src, len); \
- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
-} while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-do { \
- flush_cache_page(vma, vaddr, page_to_pfn(page)); \
- memcpy(dst, src, len); \
-} while (0)
-
-void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len);
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len);
+void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long pfn);
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
@@ -80,16 +72,7 @@ void flush_cache_range(struct vm_area_struct *vma,
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
#define ARCH_HAS_FLUSH_ANON_PAGE
-static inline void
-flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
-{
- if (PageAnon(page)) {
- flush_tlb_page(vma, vmaddr);
- preempt_disable();
- flush_dcache_page_asm(page_to_phys(page), vmaddr);
- preempt_enable();
- }
-}
+void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
static inline void kunmap_flush_on_unmap(void *addr)
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 0561568f7b48..6faaaa3ebe9b 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -26,12 +26,14 @@
#define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from))
struct page;
+struct vm_area_struct;
void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from);
#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg);
+void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr,
+ struct vm_area_struct *vma);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
/*
* These are used to make use of C type-checking..
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index e7911225a4f8..0fd04073d4b6 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -27,6 +27,7 @@
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/shmparam.h>
+#include <asm/mmu_context.h>
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
@@ -91,7 +92,7 @@ static inline void flush_data_cache(void)
}
-/* Virtual address of pfn. */
+/* Kernel virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
void
@@ -124,11 +125,13 @@ show_cache_info(struct seq_file *m)
cache_info.ic_size/1024 );
if (cache_info.dc_loop != 1)
snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
- seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
+ seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
cache_info.dc_size/1024,
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
- ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
+ ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
+ cache_info.dc_conf.cc_alias
+ );
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
cache_info.it_size,
cache_info.dt_size,
@@ -324,25 +327,81 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
-static inline void
-__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
- unsigned long physaddr)
+static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
- if (!static_branch_likely(&parisc_has_cache))
- return;
+ unsigned long flags, space, pgd, prot;
+#ifdef CONFIG_TLB_PTLOCK
+ unsigned long pgd_lock;
+#endif
+
+ vmaddr &= PAGE_MASK;
+
preempt_disable();
- purge_dcache_page_asm(physaddr, vmaddr);
+
+ /* Set context for flush */
+ local_irq_save(flags);
+ prot = mfctl(8);
+ space = mfsp(SR_USER);
+ pgd = mfctl(25);
+#ifdef CONFIG_TLB_PTLOCK
+ pgd_lock = mfctl(28);
+#endif
+ switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
+ local_irq_restore(flags);
+
+ flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
- flush_icache_page_asm(physaddr, vmaddr);
+ flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
+ flush_tlb_page(vma, vmaddr);
+
+ /* Restore previous context */
+ local_irq_save(flags);
+#ifdef CONFIG_TLB_PTLOCK
+ mtctl(pgd_lock, 28);
+#endif
+ mtctl(pgd, 25);
+ mtsp(space, SR_USER);
+ mtctl(prot, 8);
+ local_irq_restore(flags);
+
preempt_enable();
}
+static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
+{
+ pte_t *ptep = NULL;
+ pgd_t *pgd = mm->pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (!pgd_none(*pgd)) {
+ p4d = p4d_offset(pgd, addr);
+ if (!p4d_none(*p4d)) {
+ pud = pud_offset(p4d, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd))
+ ptep = pte_offset_map(pmd, addr);
+ }
+ }
+ }
+ return ptep;
+}
+
+static inline bool pte_needs_flush(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
+ == (_PAGE_PRESENT | _PAGE_ACCESSED);
+}
+
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping_file(page);
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;
+ unsigned long count = 0;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
@@ -357,33 +416,52 @@ void flush_dcache_page(struct page *page)
pgoff = page->index;
- /* We have carefully arranged in arch_get_unmapped_area() that
+ /*
+ * We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
- * to flush one address here for them all to become coherent */
-
+ * to flush one address here for them all to become coherent
+ * on machines that support equivalent aliasing
+ */
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
+ if (parisc_requires_coherency()) {
+ pte_t *ptep;
- /* The TLB is the engine of coherence on parisc: The
- * CPU is entitled to speculate any page with a TLB
- * mapping, so here we kill the mapping then flush the
- * page along a special flush only alias mapping.
- * This guarantees that the page is no-longer in the
- * cache for any process and nor may it be
- * speculatively read in (until the user or kernel
- * specifically accesses it, of course) */
-
- flush_tlb_page(mpnt, addr);
- if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
- != (addr & (SHM_COLOUR - 1))) {
- __flush_cache_page(mpnt, addr, page_to_phys(page));
- if (parisc_requires_coherency() && old_addr)
- printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
- old_addr = addr;
+ ptep = get_ptep(mpnt->vm_mm, addr);
+ if (ptep && pte_needs_flush(*ptep))
+ flush_user_cache_page(mpnt, addr);
+ } else {
+ /*
+ * The TLB is the engine of coherence on parisc:
+ * The CPU is entitled to speculate any page
+ * with a TLB mapping, so here we kill the
+ * mapping then flush the page along a special
+ * flush only alias mapping. This guarantees that
+ * the page is no-longer in the cache for any
+ * process and nor may it be speculatively read
+ * in (until the user or kernel specifically
+ * accesses it, of course)
+ */
+ flush_tlb_page(mpnt, addr);
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ __flush_cache_page(mpnt, addr, page_to_phys(page));
+ /*
+ * Software is allowed to have any number
+ * of private mappings to a page.
+ */
+ if (!(mpnt->vm_flags & VM_SHARED))
+ continue;
+ if (old_addr)
+ pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+ old_addr, addr, mpnt->vm_file);
+ old_addr = addr;
+ }
}
+ WARN_ON(++count == 4096);
}
flush_dcache_mmap_unlock(mapping);
}
@@ -403,7 +481,7 @@ void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size;
- unsigned long threshold;
+ unsigned long threshold, threshold2;
alltime = mfctl(16);
flush_data_cache();
@@ -417,11 +495,16 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
- threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
- if (threshold > cache_info.dc_size)
- threshold = cache_info.dc_size;
- if (threshold)
- parisc_cache_flush_threshold = threshold;
+ threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
+ pr_info("Calculated flush threshold is %lu KiB\n",
+ threshold/1024);
+
+ /*
+ * The threshold computed above isn't very reliable. The following
+ * heuristic works reasonably well on c8000/rp3440.
+ */
+ threshold2 = cache_info.dc_size * num_online_cpus();
+ parisc_cache_flush_threshold = threshold2;
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
parisc_cache_flush_threshold/1024);
@@ -477,19 +560,47 @@ void flush_kernel_dcache_page_addr(void *addr)
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
+static void flush_cache_page_if_present(struct vm_area_struct *vma,
+ unsigned long vmaddr, unsigned long pfn)
{
- /* Copy using kernel mapping. No coherency is needed (all in
- kunmap) for the `to' page. However, the `from' page needs to
- be flushed through a mapping equivalent to the user mapping
- before it can be accessed through the kernel mapping. */
- preempt_disable();
- flush_dcache_page_asm(__pa(vfrom), vaddr);
- copy_page_asm(vto, vfrom);
- preempt_enable();
+ pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
+
+ /*
+ * The pte check is racy and sometimes the flush will trigger
+ * a non-access TLB miss. Hopefully, the page has already been
+ * flushed.
+ */
+ if (ptep && pte_needs_flush(*ptep))
+ flush_cache_page(vma, vmaddr, pfn);
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *kto, *kfrom;
+
+ kfrom = kmap_local_page(from);
+ kto = kmap_local_page(to);
+ flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
+ copy_page_asm(kto, kfrom);
+ kunmap_local(kto);
+ kunmap_local(kfrom);
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)
+{
+ flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ memcpy(dst, src, len);
+ flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)
+{
+ flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ memcpy(dst, src, len);
}
-EXPORT_SYMBOL(copy_user_page);
/* __flush_tlb_range()
*
@@ -520,92 +631,105 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
return 0;
}
-static inline unsigned long mm_total_size(struct mm_struct *mm)
+static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- struct vm_area_struct *vma;
- unsigned long usize = 0;
-
- for (vma = mm->mmap; vma; vma = vma->vm_next)
- usize += vma->vm_end - vma->vm_start;
- return usize;
-}
-
-static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
-{
- pte_t *ptep = NULL;
+ unsigned long addr, pfn;
+ pte_t *ptep;
- if (!pgd_none(*pgd)) {
- p4d_t *p4d = p4d_offset(pgd, addr);
- if (!p4d_none(*p4d)) {
- pud_t *pud = pud_offset(p4d, addr);
- if (!pud_none(*pud)) {
- pmd_t *pmd = pmd_offset(pud, addr);
- if (!pmd_none(*pmd))
- ptep = pte_offset_map(pmd, addr);
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ /*
+ * The vma can contain pages that aren't present. Although
+ * the pte search is expensive, we need the pte to find the
+ * page pfn and to check whether the page should be flushed.
+ */
+ ptep = get_ptep(vma->vm_mm, addr);
+ if (ptep && pte_needs_flush(*ptep)) {
+ if (parisc_requires_coherency()) {
+ flush_user_cache_page(vma, addr);
+ } else {
+ pfn = pte_pfn(*ptep);
+ if (WARN_ON(!pfn_valid(pfn)))
+ return;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
- return ptep;
}
-static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline unsigned long mm_total_size(struct mm_struct *mm)
{
- unsigned long addr, pfn;
- pte_t *ptep;
+ struct vm_area_struct *vma;
+ unsigned long usize = 0;
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- ptep = get_ptep(mm->pgd, addr);
- if (ptep) {
- pfn = pte_pfn(*ptep);
- flush_cache_page(vma, addr, pfn);
- }
- }
+ for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
+ usize += vma->vm_end - vma->vm_start;
+ return usize;
}
void flush_cache_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
- /* Flushing the whole cache on each cpu takes forever on
- rp3440, etc. So, avoid it if the mm isn't too big. */
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- mm_total_size(mm) >= parisc_cache_flush_threshold) {
- if (mm->context.space_id)
- flush_tlb_all();
+ /*
+ * Flushing the whole cache on each cpu takes forever on
+ * rp3440, etc. So, avoid it if the mm isn't too big.
+ *
+ * Note that we must flush the entire cache on machines
+ * with aliasing caches to prevent random segmentation
+ * faults.
+ */
+ if (!parisc_requires_coherency()
+ || mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
+ return;
+ flush_tlb_all();
flush_cache_all();
return;
}
+ /* Flush mm */
for (vma = mm->mmap; vma; vma = vma->vm_next)
- flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
+ flush_cache_pages(vma, vma->vm_start, vma->vm_end);
}
-void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- end - start >= parisc_cache_flush_threshold) {
- if (vma->vm_mm->context.space_id)
- flush_tlb_range(vma, start, end);
+ if (!parisc_requires_coherency()
+ || end - start >= parisc_cache_flush_threshold) {
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
+ return;
+ flush_tlb_range(vma, start, end);
flush_cache_all();
return;
}
- flush_cache_pages(vma, vma->vm_mm, start, end);
+ flush_cache_pages(vma, start, end);
}
-void
-flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
+void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
- if (pfn_valid(pfn)) {
- if (likely(vma->vm_mm->context.space_id)) {
- flush_tlb_page(vma, vmaddr);
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
- } else {
- __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
- }
+ if (WARN_ON(!pfn_valid(pfn)))
+ return;
+ if (parisc_requires_coherency())
+ flush_user_cache_page(vma, vmaddr);
+ else
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+ if (!PageAnon(page))
+ return;
+
+ if (parisc_requires_coherency()) {
+ flush_user_cache_page(vma, vmaddr);
+ return;
}
+
+ flush_tlb_page(vma, vmaddr);
+ preempt_disable();
+ flush_dcache_page_asm(page_to_phys(page), vmaddr);
+ preempt_enable();
}
void flush_kernel_vmap_range(void *vaddr, int size)