summaryrefslogtreecommitdiff
path: root/arch/xtensa
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2017-07-28 17:42:59 -0700
committerMax Filippov <jcmvbkbc@gmail.com>2017-07-28 18:35:13 -0700
commit6d0f581d1768d3eaba15776e7dd1fdfec10cfe36 (patch)
treebc94af25f0162390340982ddd3c97f10bc577bdf /arch/xtensa
parent536dcc9c34035778892a7e3cbf45166ce73f8d34 (diff)
xtensa: fix cache aliasing handling code for WT cache
Currently building kernel for xtensa core with aliasing WT cache fails with the following messages: mm/memory.c:2152: undefined reference to `flush_dcache_page' mm/memory.c:2332: undefined reference to `local_flush_cache_page' mm/memory.c:1919: undefined reference to `local_flush_cache_range' mm/memory.c:4179: undefined reference to `copy_to_user_page' mm/memory.c:4183: undefined reference to `copy_from_user_page' This happens because implementation of these functions is only compiled when data cache is WB, which looks wrong: even when data cache doesn't need flushing it still needs invalidation. The functions like __flush_[invalidate_]dcache_* are correctly defined for both WB and WT caches (and even if they weren't that'd still be ok, just slower). Fix this by providing the same implementation of the above functions for both WB and WT cache. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/mm/cache.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 1a804a2f9a5b..dbb1cdef3663 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -120,10 +120,6 @@ void copy_user_highpage(struct page *dst, struct page *src,
preempt_enable();
}
-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
-
/*
* Any time the kernel writes to a user page cache page, or it is about to
* read from a page cache page this routine is called.
@@ -208,7 +204,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
__invalidate_icache_page_alias(virt, phys);
}
-#endif
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
@@ -225,7 +221,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
flush_tlb_page(vma, addr);
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long phys = page_to_phys(page);
@@ -256,7 +252,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
* flush_dcache_page() on the page.
*/
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,