summaryrefslogtreecommitdiff
path: root/arch/mips/mm/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/cache.c')
-rw-r--r--arch/mips/mm/cache.c182
1 files changed, 78 insertions, 104 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 33b409391ddb..df1ced4fc3b5 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -14,13 +14,16 @@
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <asm/bcache.h>
#include <asm/cacheflush.h>
-#include <asm/highmem.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/setup.h>
+#include <asm/pgtable.h>
/* Cache operations. */
void (*flush_cache_all)(void);
@@ -36,7 +39,6 @@ EXPORT_SYMBOL_GPL(flush_icache_range);
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(local_flush_icache_range);
void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
-EXPORT_SYMBOL_GPL(__flush_icache_user_range);
void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
@@ -47,14 +49,30 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
/* MIPS specific cache operations */
-void (*local_flush_data_cache_page)(void * addr);
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);
-EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
+/*
+ * Dummy cache handling routine
+ */
+
+void cache_noop(void) {}
+
+#ifdef CONFIG_BOARD_SCACHE
+
+static struct bcache_ops no_sc_ops = {
+ .bc_enable = (void *)cache_noop,
+ .bc_disable = (void *)cache_noop,
+ .bc_wback_inv = (void *)cache_noop,
+ .bc_inv = (void *)cache_noop
+};
+
+struct bcache_ops *bcops = &no_sc_ops;
+#endif
+
#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
@@ -81,13 +99,15 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
return 0;
}
-void __flush_dcache_page(struct page *page)
+void __flush_dcache_pages(struct page *page, unsigned int nr)
{
- struct address_space *mapping = page_mapping_file(page);
+ struct folio *folio = page_folio(page);
+ struct address_space *mapping = folio_flush_mapping(folio);
unsigned long addr;
+ unsigned int i;
if (mapping && !mapping_mapped(mapping)) {
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
return;
}
@@ -96,25 +116,21 @@ void __flush_dcache_page(struct page *page)
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
- if (PageHighMem(page))
- addr = (unsigned long)kmap_atomic(page);
- else
- addr = (unsigned long)page_address(page);
-
- flush_data_cache_page(addr);
-
- if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ for (i = 0; i < nr; i++) {
+ addr = (unsigned long)kmap_local_page(nth_page(page, i));
+ flush_data_cache_page(addr);
+ kunmap_local((void *)addr);
+ }
}
-
-EXPORT_SYMBOL(__flush_dcache_page);
+EXPORT_SYMBOL(__flush_dcache_pages);
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
unsigned long addr = (unsigned long) page_address(page);
+ struct folio *folio = page_folio(page);
if (pages_do_alias(addr, vmaddr)) {
- if (page_mapcount(page) && !Page_dcache_dirty(page)) {
+ if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
@@ -129,115 +145,73 @@ EXPORT_SYMBOL(__flush_anon_page);
void __update_cache(unsigned long address, pte_t pte)
{
- struct page *page;
+ struct folio *folio;
unsigned long pfn, addr;
int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
+ unsigned int i;
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
- page = pfn_to_page(pfn);
- if (Page_dcache_dirty(page)) {
- if (PageHighMem(page))
- addr = (unsigned long)kmap_atomic(page);
- else
- addr = (unsigned long)page_address(page);
-
- if (exec || pages_do_alias(addr, address & PAGE_MASK))
- flush_data_cache_page(addr);
- if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ folio = page_folio(pfn_to_page(pfn));
+ address &= PAGE_MASK;
+ address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
+
+ if (folio_test_dcache_dirty(folio)) {
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ addr = (unsigned long)kmap_local_folio(folio, i);
- ClearPageDcacheDirty(page);
+ if (exec || pages_do_alias(addr, address))
+ flush_data_cache_page(addr);
+ kunmap_local((void *)addr);
+ address += PAGE_SIZE;
+ }
+ folio_clear_dcache_dirty(folio);
}
}
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
+#define PM(p) __pgprot(_page_cachable_default | (p))
+
+static pgprot_t protection_map[16] __ro_after_init;
+DECLARE_VM_GET_PAGE_PROT
+
static inline void setup_protection_map(void)
{
- if (cpu_has_rixi) {
- protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
-
- protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
- protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
- protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
-
- } else {
- protection_map[0] = PAGE_NONE;
- protection_map[1] = PAGE_READONLY;
- protection_map[2] = PAGE_COPY;
- protection_map[3] = PAGE_COPY;
- protection_map[4] = PAGE_READONLY;
- protection_map[5] = PAGE_READONLY;
- protection_map[6] = PAGE_COPY;
- protection_map[7] = PAGE_COPY;
- protection_map[8] = PAGE_NONE;
- protection_map[9] = PAGE_READONLY;
- protection_map[10] = PAGE_SHARED;
- protection_map[11] = PAGE_SHARED;
- protection_map[12] = PAGE_READONLY;
- protection_map[13] = PAGE_READONLY;
- protection_map[14] = PAGE_SHARED;
- protection_map[15] = PAGE_SHARED;
- }
+ protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = PM(_PAGE_PRESENT);
+ protection_map[5] = PM(_PAGE_PRESENT);
+ protection_map[6] = PM(_PAGE_PRESENT);
+ protection_map[7] = PM(_PAGE_PRESENT);
+
+ protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ _PAGE_NO_READ);
+ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = PM(_PAGE_PRESENT);
+ protection_map[13] = PM(_PAGE_PRESENT);
+ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}
+#undef PM
+
void cpu_cache_init(void)
{
- if (cpu_has_3k_cache) {
- extern void __weak r3k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache)
r3k_cache_init();
- }
- if (cpu_has_6k_cache) {
- extern void __weak r6k_cache_init(void);
-
- r6k_cache_init();
- }
- if (cpu_has_4k_cache) {
- extern void __weak r4k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache)
r4k_cache_init();
- }
- if (cpu_has_8k_cache) {
- extern void __weak r8k_cache_init(void);
-
- r8k_cache_init();
- }
- if (cpu_has_tx39_cache) {
- extern void __weak tx39_cache_init(void);
-
- tx39_cache_init();
- }
-
- if (cpu_has_octeon_cache) {
- extern void __weak octeon_cache_init(void);
+ if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
octeon_cache_init();
- }
setup_protection_map();
}
-
-int __weak __uncached_access(struct file *file, unsigned long addr)
-{
- if (file->f_flags & O_DSYNC)
- return 1;
-
- return addr >= __pa(high_memory);
-}