diff options
Diffstat (limited to 'arch/mips/mm')
| -rw-r--r-- | arch/mips/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/mips/mm/c-octeon.c | 39 | ||||
| -rw-r--r-- | arch/mips/mm/c-r3k.c | 5 | ||||
| -rw-r--r-- | arch/mips/mm/c-r4k.c | 189 | ||||
| -rw-r--r-- | arch/mips/mm/c-tx39.c | 414 | ||||
| -rw-r--r-- | arch/mips/mm/cache.c | 102 | ||||
| -rw-r--r-- | arch/mips/mm/cex-gen.S | 2 | ||||
| -rw-r--r-- | arch/mips/mm/context.c | 5 | ||||
| -rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 3 | ||||
| -rw-r--r-- | arch/mips/mm/fault.c | 46 | ||||
| -rw-r--r-- | arch/mips/mm/hugetlbpage.c | 10 | ||||
| -rw-r--r-- | arch/mips/mm/init.c | 108 | ||||
| -rw-r--r-- | arch/mips/mm/ioremap.c | 8 | ||||
| -rw-r--r-- | arch/mips/mm/ioremap64.c | 4 | ||||
| -rw-r--r-- | arch/mips/mm/mmap.c | 8 | ||||
| -rw-r--r-- | arch/mips/mm/page-funcs.S | 2 | ||||
| -rw-r--r-- | arch/mips/mm/page.c | 207 | ||||
| -rw-r--r-- | arch/mips/mm/pgtable-32.c | 19 | ||||
| -rw-r--r-- | arch/mips/mm/pgtable-64.c | 29 | ||||
| -rw-r--r-- | arch/mips/mm/pgtable.c | 6 | ||||
| -rw-r--r-- | arch/mips/mm/physaddr.c | 16 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-funcs.S | 2 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-r3k.c | 46 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-r4k.c | 116 | ||||
| -rw-r--r-- | arch/mips/mm/tlbex.c | 308 | ||||
| -rw-r--r-- | arch/mips/mm/uasm-mips.c | 4 | ||||
| -rw-r--r-- | arch/mips/mm/uasm.c | 3 |
27 files changed, 529 insertions, 1173 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 4acc4f3d31f8..304692391519 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -36,7 +36,6 @@ obj-$(CONFIG_CPU_R3K_TLB) += tlb-r3k.o obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_R3000) += c-r3k.o obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o -obj-$(CONFIG_CPU_TX39XX) += c-tx39.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 8ae181e08311..b7393b61cfa7 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -23,14 +23,13 @@ #include <asm/r4kcache.h> #include <asm/traps.h> #include <asm/mmu_context.h> -#include <asm/war.h> #include <asm/octeon/octeon.h> unsigned long long cache_err_dcache[NR_CPUS]; EXPORT_SYMBOL_GPL(cache_err_dcache); -/** +/* * Octeon automatically flushes the dcache on tlb changes, so * from Linux's viewpoint it acts much like a physically * tagged cache. No flushing is needed @@ -56,8 +55,8 @@ static void local_octeon_flush_icache_range(unsigned long start, } /** - * Flush caches as necessary for all cores affected by a - * vma. If no vma is supplied, all cores are flushed. + * octeon_flush_icache_all_cores - Flush caches as necessary for all cores + * affected by a vma. If no vma is supplied, all cores are flushed. * * @vma: VMA to flush or NULL to flush all icaches. */ @@ -84,15 +83,20 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) else mask = *cpu_online_mask; cpumask_clear_cpu(cpu, &mask); +#ifdef CONFIG_CAVIUM_OCTEON_SOC for_each_cpu(cpu, &mask) octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); +#else + smp_call_function_many(&mask, (smp_call_func_t)octeon_local_flush_icache, + NULL, 1); +#endif preempt_enable(); #endif } -/** +/* * Called to flush the icache on all cores */ static void octeon_flush_icache_all(void) @@ -102,8 +106,7 @@ static void octeon_flush_icache_all(void) /** - * Called to flush all memory associated with a memory - * context. + * octeon_flush_cache_mm - flush all memory associated with a memory context. * * @mm: Memory context to flush */ @@ -116,7 +119,7 @@ static void octeon_flush_cache_mm(struct mm_struct *mm) } -/** +/* * Flush a range of kernel addresses out of the icache * */ @@ -127,11 +130,11 @@ static void octeon_flush_icache_range(unsigned long start, unsigned long end) /** - * Flush a range out of a vma + * octeon_flush_cache_range - Flush a range out of a vma * * @vma: VMA to flush - * @start: - * @end: + * @start: beginning address for flush + * @end: ending address for flush */ static void octeon_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) @@ -142,11 +145,11 @@ static void octeon_flush_cache_range(struct vm_area_struct *vma, /** - * Flush a specific page of a vma + * octeon_flush_cache_page - Flush a specific page of a vma * * @vma: VMA to flush page for * @page: Page to flush - * @pfn: + * @pfn: Page frame number */ static void octeon_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) @@ -160,7 +163,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) BUG(); } -/** +/* * Probe Octeon's caches * */ @@ -256,7 +259,7 @@ static void octeon_cache_error_setup(void) set_handler(0x100, &except_vec2_octeon, 0x80); } -/** +/* * Setup the Octeon cache flush routines * */ @@ -333,7 +336,7 @@ static void co_cache_error_call_notifiers(unsigned long val) } /* - * Called when the the exception is recoverable + * Called when the exception is recoverable */ asmlinkage void cache_parity_error_octeon_recoverable(void) @@ -341,8 +344,8 @@ asmlinkage void cache_parity_error_octeon_recoverable(void) co_cache_error_call_notifiers(0); } -/** - * Called when the the exception is not recoverable +/* + * Called when the exception is not recoverable */ asmlinkage void cache_parity_error_octeon_non_recoverable(void) diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index df6755ca1892..5869df848fab 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -261,10 +261,6 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE); } -static void local_r3k_flush_data_cache_page(void *addr) -{ -} - static void r3k_flush_data_cache_page(unsigned long addr) { } @@ -302,7 +298,6 @@ void r3k_cache_init(void) __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range; - local_flush_data_cache_page = local_r3k_flush_data_cache_page; flush_data_cache_page = r3k_flush_data_cache_page; _dma_cache_wback_inv = r3k_dma_cache_wback_inv; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 74b09e801c3a..10413b6f6662 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -33,7 +33,6 @@ #include <asm/r4kcache.h> #include <asm/sections.h> #include <asm/mmu_context.h> -#include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/traps.h> #include <asm/mips-cps.h> @@ -111,20 +110,6 @@ static unsigned long dcache_size __read_mostly; static unsigned long vcache_size __read_mostly; static unsigned long scache_size __read_mostly; -/* - * Dummy cache handling routines for machines without boardcaches - */ -static void cache_noop(void) {} - -static struct bcache_ops no_sc_ops = { - .bc_enable = (void *)cache_noop, - .bc_disable = (void *)cache_noop, - .bc_wback_inv = (void *)cache_noop, - .bc_inv = (void *)cache_noop -}; - -struct bcache_ops *bcops = &no_sc_ops; - #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) @@ -202,24 +187,6 @@ static void r4k_blast_dcache_user_page_setup(void) #endif -static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); - -static void r4k_blast_dcache_page_indexed_setup(void) -{ - unsigned long dc_lsize = cpu_dcache_line_size(); - - if (dc_lsize == 0) - r4k_blast_dcache_page_indexed = (void *)cache_noop; - else if (dc_lsize == 16) - r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; - else if (dc_lsize == 32) - r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; - else if (dc_lsize == 64) - r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; - else if (dc_lsize == 128) - r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed; -} - void (* r4k_blast_dcache)(void); EXPORT_SYMBOL(r4k_blast_dcache); @@ -281,39 +248,6 @@ static inline void tx49_blast_icache32(void) addr | ws, 32); } -static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) -{ - unsigned long flags; - - local_irq_save(flags); - blast_icache32_page_indexed(page); - local_irq_restore(flags); -} - -static inline void tx49_blast_icache32_page_indexed(unsigned long page) -{ - unsigned long indexmask = current_cpu_data.icache.waysize - 1; - unsigned long start = INDEX_BASE + (page & indexmask); - unsigned long end = start + PAGE_SIZE; - unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; - unsigned long ws_end = current_cpu_data.icache.ways << - current_cpu_data.icache.waybit; - unsigned long ws, addr; - - CACHE32_UNROLL32_ALIGN2; - /* I'm in even chunk. blast odd chunks */ - for (ws = 0; ws < ws_end; ws += ws_inc) - for (addr = start + 0x400; addr < end; addr += 0x400 * 2) - cache_unroll(32, kernel_cache, Index_Invalidate_I, - addr | ws, 32); - CACHE32_UNROLL32_ALIGN; - /* I'm in odd chunk. blast even chunks */ - for (ws = 0; ws < ws_end; ws += ws_inc) - for (addr = start; addr < end; addr += 0x400 * 2) - cache_unroll(32, kernel_cache, Index_Invalidate_I, - addr | ws, 32); -} - static void (* r4k_blast_icache_page)(unsigned long addr); static void r4k_blast_icache_page_setup(void) @@ -356,34 +290,6 @@ static void r4k_blast_icache_user_page_setup(void) #endif -static void (* r4k_blast_icache_page_indexed)(unsigned long addr); - -static void r4k_blast_icache_page_indexed_setup(void) -{ - unsigned long ic_lsize = cpu_icache_line_size(); - - if (ic_lsize == 0) - r4k_blast_icache_page_indexed = (void *)cache_noop; - else if (ic_lsize == 16) - r4k_blast_icache_page_indexed = blast_icache16_page_indexed; - else if (ic_lsize == 32) { - if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) && - cpu_is_r4600_v1_x()) - r4k_blast_icache_page_indexed = - blast_icache32_r4600_v1_page_indexed; - else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV)) - r4k_blast_icache_page_indexed = - tx49_blast_icache32_page_indexed; - else if (current_cpu_type() == CPU_LOONGSON2EF) - r4k_blast_icache_page_indexed = - loongson2_blast_icache32_page_indexed; - else - r4k_blast_icache_page_indexed = - blast_icache32_page_indexed; - } else if (ic_lsize == 64) - r4k_blast_icache_page_indexed = blast_icache64_page_indexed; -} - void (* r4k_blast_icache)(void); EXPORT_SYMBOL(r4k_blast_icache); @@ -429,24 +335,6 @@ static void r4k_blast_scache_page_setup(void) r4k_blast_scache_page = blast_scache128_page; } -static void (* r4k_blast_scache_page_indexed)(unsigned long addr); - -static void r4k_blast_scache_page_indexed_setup(void) -{ - unsigned long sc_lsize = cpu_scache_line_size(); - - if (scache_size == 0) - r4k_blast_scache_page_indexed = (void *)cache_noop; - else if (sc_lsize == 16) - r4k_blast_scache_page_indexed = blast_scache16_page_indexed; - else if (sc_lsize == 32) - r4k_blast_scache_page_indexed = blast_scache32_page_indexed; - else if (sc_lsize == 64) - r4k_blast_scache_page_indexed = blast_scache64_page_indexed; - else if (sc_lsize == 128) - r4k_blast_scache_page_indexed = blast_scache128_page_indexed; -} - static void (* r4k_blast_scache)(void); static void r4k_blast_scache_setup(void) @@ -680,13 +568,14 @@ static inline void local_r4k_flush_cache_page(void *args) if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) vaddr = NULL; else { + struct folio *folio = page_folio(page); /* * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ map_coherent = (cpu_has_dc_aliases && - page_mapcount(page) && - !Page_dcache_dirty(page)); + folio_mapped(folio) && + !folio_test_dcache_dirty(folio)); if (map_coherent) vaddr = kmap_coherent(page, addr); else @@ -1195,50 +1084,6 @@ static void probe_pcache(void) c->options |= MIPS_CPU_PREFETCH; break; - case CPU_VR4133: - write_c0_config(config & ~VR41_CONF_P4K); - fallthrough; - case CPU_VR4131: - /* Workaround for cache instruction bug of VR4131 */ - if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || - c->processor_id == 0x0c82U) { - config |= 0x00400000U; - if (c->processor_id == 0x0c80U) - config |= VR41_CONF_BP; - write_c0_config(config); - } else - c->options |= MIPS_CPU_CACHE_CDEX_P; - - icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); - c->icache.linesz = 16 << ((config & CONF_IB) >> 5); - c->icache.ways = 2; - c->icache.waybit = __ffs(icache_size/2); - - dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); - c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); - c->dcache.ways = 2; - c->dcache.waybit = __ffs(dcache_size/2); - break; - - case CPU_VR41XX: - case CPU_VR4111: - case CPU_VR4121: - case CPU_VR4122: - case CPU_VR4181: - case CPU_VR4181A: - icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); - c->icache.linesz = 16 << ((config & CONF_IB) >> 5); - c->icache.ways = 1; - c->icache.waybit = 0; /* doesn't matter */ - - dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); - c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); - c->dcache.ways = 1; - c->dcache.waybit = 0; /* does not matter */ - - c->options |= MIPS_CPU_CACHE_CDEX_P; - break; - case CPU_RM7000: rm7k_erratum31(); @@ -1410,7 +1255,6 @@ static void probe_pcache(void) case CPU_I6500: case CPU_SB1: case CPU_SB1A: - case CPU_XLR: c->dcache.flags |= MIPS_CACHE_PINDEX; break; @@ -1641,10 +1485,6 @@ static void loongson3_sc_init(void) return; } -extern int r5k_sc_init(void); -extern int rm7k_sc_init(void); -extern int mips_sc_init(void); - static void setup_scache(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -1699,7 +1539,6 @@ static void setup_scache(void) return; case CPU_CAVIUM_OCTEON3: - case CPU_XLP: /* don't need to worry about L2, fully coherent */ return; @@ -1811,7 +1650,7 @@ static void coherency_setup(void) /* * c0_status.cu=0 specifies that updates by the sc instruction use - * the coherency mode specified by the TLB; 1 means cachable + * the coherency mode specified by the TLB; 1 means cacheable * coherent update on write will be used. Not all processors have * this bit and; some wire it to zero, others like Toshiba had the * silly idea of putting something else there ... @@ -1868,13 +1707,10 @@ void r4k_cache_init(void) setup_scache(); r4k_blast_dcache_page_setup(); - r4k_blast_dcache_page_indexed_setup(); r4k_blast_dcache_setup(); r4k_blast_icache_page_setup(); - r4k_blast_icache_page_indexed_setup(); r4k_blast_icache_setup(); r4k_blast_scache_page_setup(); - r4k_blast_scache_page_indexed_setup(); r4k_blast_scache_setup(); r4k_blast_scache_node_setup(); #ifdef CONFIG_EVA @@ -1906,7 +1742,6 @@ void r4k_cache_init(void) __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; flush_icache_all = r4k_flush_icache_all; - local_flush_data_cache_page = local_r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page; flush_icache_range = r4k_flush_icache_range; local_flush_icache_range = local_r4k_flush_icache_range; @@ -1914,15 +1749,9 @@ void r4k_cache_init(void) __local_flush_icache_user_range = local_r4k_flush_icache_user_range; #ifdef CONFIG_DMA_NONCOHERENT - if (dma_default_coherent) { - _dma_cache_wback_inv = (void *)cache_noop; - _dma_cache_wback = (void *)cache_noop; - _dma_cache_inv = (void *)cache_noop; - } else { - _dma_cache_wback_inv = r4k_dma_cache_wback_inv; - _dma_cache_wback = r4k_dma_cache_wback_inv; - _dma_cache_inv = r4k_dma_cache_inv; - } + _dma_cache_wback_inv = r4k_dma_cache_wback_inv; + _dma_cache_wback = r4k_dma_cache_wback_inv; + _dma_cache_inv = r4k_dma_cache_inv; #endif /* CONFIG_DMA_NONCOHERENT */ build_clear_page(); @@ -1955,7 +1784,6 @@ void r4k_cache_init(void) /* I$ fills from D$ just by emptying the write buffers */ flush_cache_page = (void *)b5k_instruction_hazard; flush_cache_range = (void *)b5k_instruction_hazard; - local_flush_data_cache_page = (void *)b5k_instruction_hazard; flush_data_cache_page = (void *)b5k_instruction_hazard; flush_icache_range = (void *)b5k_instruction_hazard; local_flush_icache_range = (void *)b5k_instruction_hazard; @@ -1975,7 +1803,6 @@ void r4k_cache_init(void) flush_cache_range = (void *)cache_noop; flush_icache_all = (void *)cache_noop; flush_data_cache_page = (void *)cache_noop; - local_flush_data_cache_page = (void *)cache_noop; break; } } @@ -1997,7 +1824,7 @@ static struct notifier_block r4k_cache_pm_notifier_block = { .notifier_call = r4k_cache_pm_notifier, }; -int __init r4k_cache_init_pm(void) +static int __init r4k_cache_init_pm(void) { return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); } diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c deleted file mode 100644 index 03dfbb40ec73..000000000000 --- a/arch/mips/mm/c-tx39.c +++ /dev/null @@ -1,414 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * r2300.c: R2000 and R3000 specific mmu/cache code. - * - * Copyright (C) 1996 David S. Miller (davem@davemloft.net) - * - * with a lot of changes to make this thing work for R3000s - * Tx39XX R4k style caches added. HK - * Copyright (C) 1998, 1999, 2000 Harald Koerfgen - * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov - */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/mm.h> - -#include <asm/cacheops.h> -#include <asm/page.h> -#include <asm/mmu_context.h> -#include <asm/isadep.h> -#include <asm/io.h> -#include <asm/bootinfo.h> -#include <asm/cpu.h> - -/* For R3000 cores with R4000 style caches */ -static unsigned long icache_size, dcache_size; /* Size in bytes */ - -#include <asm/r4kcache.h> - -/* This sequence is required to ensure icache is disabled immediately */ -#define TX39_STOP_STREAMING() \ -__asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - "b 1f\n\t" \ - "nop\n\t" \ - "1:\n\t" \ - ".set pop" \ - ) - -/* TX39H-style cache flush routines. */ -static void tx39h_flush_icache_all(void) -{ - unsigned long flags, config; - - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16(); - write_c0_conf(config); - local_irq_restore(flags); -} - -static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) -{ - /* Catch bad driver code */ - BUG_ON(size == 0); - - iob(); - blast_inv_dcache_range(addr, addr + size); -} - - -/* TX39H2,TX39H3 */ -static inline void tx39_blast_dcache_page(unsigned long addr) -{ - if (current_cpu_type() != CPU_TX3912) - blast_dcache16_page(addr); -} - -static inline void tx39_blast_dcache_page_indexed(unsigned long addr) -{ - blast_dcache16_page_indexed(addr); -} - -static inline void tx39_blast_dcache(void) -{ - blast_dcache16(); -} - -static inline void tx39_blast_icache_page(unsigned long addr) -{ - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16_page(addr); - write_c0_conf(config); - local_irq_restore(flags); -} - -static inline void tx39_blast_icache_page_indexed(unsigned long addr) -{ - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16_page_indexed(addr); - write_c0_conf(config); - local_irq_restore(flags); -} - -static inline void tx39_blast_icache(void) -{ - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16(); - write_c0_conf(config); - local_irq_restore(flags); -} - -static void tx39__flush_cache_vmap(void) -{ - tx39_blast_dcache(); -} - -static void tx39__flush_cache_vunmap(void) -{ - tx39_blast_dcache(); -} - -static inline void tx39_flush_cache_all(void) -{ - if (!cpu_has_dc_aliases) - return; - - tx39_blast_dcache(); -} - -static inline void tx39___flush_cache_all(void) -{ - tx39_blast_dcache(); - tx39_blast_icache(); -} - -static void tx39_flush_cache_mm(struct mm_struct *mm) -{ - if (!cpu_has_dc_aliases) - return; - - if (cpu_context(smp_processor_id(), mm) != 0) - tx39_blast_dcache(); -} - -static void tx39_flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if (!cpu_has_dc_aliases) - return; - if (!(cpu_context(smp_processor_id(), vma->vm_mm))) - return; - - tx39_blast_dcache(); -} - -static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) -{ - int exec = vma->vm_flags & VM_EXEC; - struct mm_struct *mm = vma->vm_mm; - pmd_t *pmdp; - pte_t *ptep; - - /* - * If ownes no valid ASID yet, cannot possibly have gotten - * this page into the cache. - */ - if (cpu_context(smp_processor_id(), mm) == 0) - return; - - page &= PAGE_MASK; - pmdp = pmd_off(mm, page); - ptep = pte_offset_kernel(pmdp, page); - - /* - * If the page isn't marked valid, the page cannot possibly be - * in the cache. - */ - if (!(pte_val(*ptep) & _PAGE_PRESENT)) - return; - - /* - * Doing flushes for another ASID than the current one is - * too difficult since stupid R4k caches do a TLB translation - * for every cache flush operation. So we do indexed flushes - * in that case, which doesn't overly flush the cache too much. - */ - if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { - if (cpu_has_dc_aliases || exec) - tx39_blast_dcache_page(page); - if (exec) - tx39_blast_icache_page(page); - - return; - } - - /* - * Do indexed flush, too much work to get the (possible) TLB refills - * to work correctly. - */ - if (cpu_has_dc_aliases || exec) - tx39_blast_dcache_page_indexed(page); - if (exec) - tx39_blast_icache_page_indexed(page); -} - -static void local_tx39_flush_data_cache_page(void * addr) -{ - tx39_blast_dcache_page((unsigned long)addr); -} - -static void tx39_flush_data_cache_page(unsigned long addr) -{ - tx39_blast_dcache_page(addr); -} - -static void tx39_flush_icache_range(unsigned long start, unsigned long end) -{ - if (end - start > dcache_size) - tx39_blast_dcache(); - else - protected_blast_dcache_range(start, end); - - if (end - start > icache_size) - tx39_blast_icache(); - else { - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - protected_blast_icache_range(start, end); - write_c0_conf(config); - local_irq_restore(flags); - } -} - -static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size) -{ - BUG(); -} - -static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) -{ - unsigned long end; - - if (((size | addr) & (PAGE_SIZE - 1)) == 0) { - end = addr + size; - do { - tx39_blast_dcache_page(addr); - addr += PAGE_SIZE; - } while(addr != end); - } else if (size > dcache_size) { - tx39_blast_dcache(); - } else { - blast_dcache_range(addr, addr + size); - } -} - -static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) -{ - unsigned long end; - - if (((size | addr) & (PAGE_SIZE - 1)) == 0) { - end = addr + size; - do { - tx39_blast_dcache_page(addr); - addr += PAGE_SIZE; - } while(addr != end); - } else if (size > dcache_size) { - tx39_blast_dcache(); - } else { - blast_inv_dcache_range(addr, addr + size); - } -} - -static __init void tx39_probe_cache(void) -{ - unsigned long config; - - config = read_c0_conf(); - - icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >> - TX39_CONF_ICS_SHIFT)); - dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >> - TX39_CONF_DCS_SHIFT)); - - current_cpu_data.icache.linesz = 16; - switch (current_cpu_type()) { - case CPU_TX3912: - current_cpu_data.icache.ways = 1; - current_cpu_data.dcache.ways = 1; - current_cpu_data.dcache.linesz = 4; - break; - - case CPU_TX3927: - current_cpu_data.icache.ways = 2; - current_cpu_data.dcache.ways = 2; - current_cpu_data.dcache.linesz = 16; - break; - - case CPU_TX3922: - default: - current_cpu_data.icache.ways = 1; - current_cpu_data.dcache.ways = 1; - current_cpu_data.dcache.linesz = 16; - break; - } -} - -void tx39_cache_init(void) -{ - extern void build_clear_page(void); - extern void build_copy_page(void); - unsigned long config; - - config = read_c0_conf(); - config &= ~TX39_CONF_WBON; - write_c0_conf(config); - - tx39_probe_cache(); - - switch (current_cpu_type()) { - case CPU_TX3912: - /* TX39/H core (writethru direct-map cache) */ - __flush_cache_vmap = tx39__flush_cache_vmap; - __flush_cache_vunmap = tx39__flush_cache_vunmap; - flush_cache_all = tx39h_flush_icache_all; - __flush_cache_all = tx39h_flush_icache_all; - flush_cache_mm = (void *) tx39h_flush_icache_all; - flush_cache_range = (void *) tx39h_flush_icache_all; - flush_cache_page = (void *) tx39h_flush_icache_all; - flush_icache_range = (void *) tx39h_flush_icache_all; - local_flush_icache_range = (void *) tx39h_flush_icache_all; - - local_flush_data_cache_page = (void *) tx39h_flush_icache_all; - flush_data_cache_page = (void *) tx39h_flush_icache_all; - - _dma_cache_wback_inv = tx39h_dma_cache_wback_inv; - - shm_align_mask = PAGE_SIZE - 1; - - break; - - case CPU_TX3922: - case CPU_TX3927: - default: - /* TX39/H2,H3 core (writeback 2way-set-associative cache) */ - /* board-dependent init code may set WBON */ - - __flush_cache_vmap = tx39__flush_cache_vmap; - __flush_cache_vunmap = tx39__flush_cache_vunmap; - - flush_cache_all = tx39_flush_cache_all; - __flush_cache_all = tx39___flush_cache_all; - flush_cache_mm = tx39_flush_cache_mm; - flush_cache_range = tx39_flush_cache_range; - flush_cache_page = tx39_flush_cache_page; - flush_icache_range = tx39_flush_icache_range; - local_flush_icache_range = tx39_flush_icache_range; - - __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range; - - local_flush_data_cache_page = local_tx39_flush_data_cache_page; - flush_data_cache_page = tx39_flush_data_cache_page; - - _dma_cache_wback_inv = tx39_dma_cache_wback_inv; - _dma_cache_wback = tx39_dma_cache_wback_inv; - _dma_cache_inv = tx39_dma_cache_inv; - - shm_align_mask = max_t(unsigned long, - (dcache_size / current_cpu_data.dcache.ways) - 1, - PAGE_SIZE - 1); - - break; - } - - __flush_icache_user_range = flush_icache_range; - __local_flush_icache_user_range = local_flush_icache_range; - - current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways; - current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways; - - current_cpu_data.icache.sets = - current_cpu_data.icache.waysize / current_cpu_data.icache.linesz; - current_cpu_data.dcache.sets = - current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz; - - if (current_cpu_data.dcache.waysize > PAGE_SIZE) - current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES; - - current_cpu_data.icache.waybit = 0; - current_cpu_data.dcache.waybit = 0; - - pr_info("Primary instruction cache %ldkB, linesize %d bytes\n", - icache_size >> 10, current_cpu_data.icache.linesz); - pr_info("Primary data cache %ldkB, linesize %d bytes\n", - dcache_size >> 10, current_cpu_data.dcache.linesz); - - build_clear_page(); - build_copy_page(); - tx39h_flush_icache_all(); -} diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 830ab91e574f..e3b4224c9a40 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -17,6 +17,7 @@ #include <linux/highmem.h> #include <linux/pagemap.h> +#include <asm/bcache.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/cpu.h> @@ -48,14 +49,30 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); /* MIPS specific cache operations */ -void (*local_flush_data_cache_page)(void * addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); -EXPORT_SYMBOL_GPL(local_flush_data_cache_page); EXPORT_SYMBOL(flush_data_cache_page); EXPORT_SYMBOL(flush_icache_all); +/* + * Dummy cache handling routine + */ + +void cache_noop(void) {} + +#ifdef CONFIG_BOARD_SCACHE + +static struct bcache_ops no_sc_ops = { + .bc_enable = (void *)cache_noop, + .bc_disable = (void *)cache_noop, + .bc_wback_inv = (void *)cache_noop, + .bc_inv = (void *)cache_noop +}; + +struct bcache_ops *bcops = &no_sc_ops; +#endif + #ifdef CONFIG_DMA_NONCOHERENT /* DMA cache operations. */ @@ -82,40 +99,38 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, return 0; } -void __flush_dcache_page(struct page *page) +void __flush_dcache_folio_pages(struct folio *folio, struct page *page, + unsigned int nr) { - struct address_space *mapping = page_mapping_file(page); + struct address_space *mapping = folio_flush_mapping(folio); unsigned long addr; + unsigned int i; if (mapping && !mapping_mapped(mapping)) { - SetPageDcacheDirty(page); + folio_set_dcache_dirty(folio); return; } /* - * We could delay the flush for the !page_mapping case too. But that + * We could delay the flush for the !folio_mapping case too. But that * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ - if (PageHighMem(page)) - addr = (unsigned long)kmap_atomic(page); - else - addr = (unsigned long)page_address(page); - - flush_data_cache_page(addr); - - if (PageHighMem(page)) - kunmap_atomic((void *)addr); + for (i = 0; i < nr; i++) { + addr = (unsigned long)kmap_local_page(page + i); + flush_data_cache_page(addr); + kunmap_local((void *)addr); + } } - -EXPORT_SYMBOL(__flush_dcache_page); +EXPORT_SYMBOL(__flush_dcache_folio_pages); void __flush_anon_page(struct page *page, unsigned long vmaddr) { unsigned long addr = (unsigned long) page_address(page); + struct folio *folio = page_folio(page); if (pages_do_alias(addr, vmaddr)) { - if (page_mapcount(page) && !Page_dcache_dirty(page)) { + if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); @@ -130,27 +145,29 @@ EXPORT_SYMBOL(__flush_anon_page); void __update_cache(unsigned long address, pte_t pte) { - struct page *page; + struct folio *folio; unsigned long pfn, addr; int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; + unsigned int i; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; - page = pfn_to_page(pfn); - if (Page_dcache_dirty(page)) { - if (PageHighMem(page)) - addr = (unsigned long)kmap_atomic(page); - else - addr = (unsigned long)page_address(page); - - if (exec || pages_do_alias(addr, address & PAGE_MASK)) - flush_data_cache_page(addr); - if (PageHighMem(page)) - kunmap_atomic((void *)addr); + folio = page_folio(pfn_to_page(pfn)); + address &= PAGE_MASK; + address -= offset_in_folio(folio, pfn << PAGE_SHIFT); + + if (folio_test_dcache_dirty(folio)) { + for (i = 0; i < folio_nr_pages(folio); i++) { + addr = (unsigned long)kmap_local_folio(folio, i); - ClearPageDcacheDirty(page); + if (exec || pages_do_alias(addr, address)) + flush_data_cache_page(addr); + kunmap_local((void *)addr); + address += PAGE_SIZE; + } + folio_clear_dcache_dirty(folio); } } @@ -159,6 +176,9 @@ EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +static pgprot_t protection_map[16] __ro_after_init; +DECLARE_VM_GET_PAGE_PROT + static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); @@ -185,27 +205,13 @@ static inline void setup_protection_map(void) void cpu_cache_init(void) { - if (cpu_has_3k_cache) { - extern void __weak r3k_cache_init(void); - + if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache) r3k_cache_init(); - } - if (cpu_has_4k_cache) { - extern void __weak r4k_cache_init(void); - + if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache) r4k_cache_init(); - } - if (cpu_has_tx39_cache) { - extern void __weak tx39_cache_init(void); - - tx39_cache_init(); - } - - if (cpu_has_octeon_cache) { - extern void __weak octeon_cache_init(void); + if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache) octeon_cache_init(); - } setup_protection_map(); } diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S index 45dff5cd4b8e..e528583d1331 100644 --- a/arch/mips/mm/cex-gen.S +++ b/arch/mips/mm/cex-gen.S @@ -25,7 +25,7 @@ * This is a very bad place to be. Our cache error * detection has triggered. If we have write-back data * in the cache, we may not be able to recover. As a - * first-order desperate measure, turn off KSEG0 cacheing. + * first-order desperate measure, turn off KSEG0 caching. */ mfc0 k0,CP0_CONFIG li k1,~CONF_CM_CMASK diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c index b25564090939..966f40066f03 100644 --- a/arch/mips/mm/context.c +++ b/arch/mips/mm/context.c @@ -67,7 +67,7 @@ static void flush_context(void) int cpu; /* Update the list of reserved MMIDs and the MMID bitmap */ - bitmap_clear(mmid_map, 0, num_mmids); + bitmap_zero(mmid_map, num_mmids); /* Reserve an MMID for kmap/wired entries */ __set_bit(MMID_KERNEL_WIRED, mmid_map); @@ -277,8 +277,7 @@ static int mmid_init(void) WARN_ON(num_mmids <= num_possible_cpus()); atomic64_set(&mmid_version, asid_first_version(0)); - mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map), - GFP_KERNEL); + mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL); if (!mmid_map) panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 3c4fc97b9f39..ab4f2a75a7d0 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, #endif #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { dev->dma_coherent = coherent; } diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index e7abda9c013f..37fedeaca2e9 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -26,6 +26,7 @@ #include <asm/mmu_context.h> #include <asm/ptrace.h> #include <asm/highmem.h> /* For VMALLOC_END */ +#include <asm/traps.h> #include <linux/kdebug.h> int show_unhandled_signals = 1; @@ -35,7 +36,7 @@ int show_unhandled_signals = 1; * and the problem, and then passes it off to one of the appropriate * routines. */ -static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, +static void __do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { struct vm_area_struct * vma = NULL; @@ -82,8 +83,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) goto VMALLOC_FAULT_TARGET; -#ifdef MODULE_START - if (unlikely(address >= MODULE_START && address < MODULE_END)) +#ifdef MODULES_VADDR + if (unlikely(address >= MODULES_VADDR && address < MODULES_END)) goto VMALLOC_FAULT_TARGET; #endif @@ -99,21 +100,13 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: si_code = SEGV_ACCERR; if (write) { @@ -162,6 +155,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -171,18 +168,17 @@ good_area: goto do_sigbus; BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_RETRY) { - flags |= FAULT_FLAG_TRIED; - /* - * No need to mmap_read_unlock(mm) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; - goto retry; - } + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; } mmap_read_unlock(mm); @@ -323,8 +319,9 @@ vmalloc_fault: } #endif } +NOKPROBE_SYMBOL(__do_page_fault); -asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { enum ctx_state prev_state; @@ -333,3 +330,4 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, __do_page_fault(regs, write, address); exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_page_fault); diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index 7eaff5b07873..0b9e15555b59 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -57,13 +57,3 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, } return (pte_t *) pmd; } - -int pmd_huge(pmd_t pmd) -{ - return (pmd_val(pmd) & _PAGE_HUGE) != 0; -} - -int pud_huge(pud_t pud) -{ - return (pud_val(pud) & _PAGE_HUGE) != 0; -} diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 19347dc6bbf8..a673d3d68254 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -31,6 +31,7 @@ #include <linux/gfp.h> #include <linux/kcore.h> #include <linux/initrd.h> +#include <linux/execmem.h> #include <asm/bootinfo.h> #include <asm/cachectl.h> @@ -38,6 +39,7 @@ #include <asm/dma.h> #include <asm/maar.h> #include <asm/mmu_context.h> +#include <asm/mmzone.h> #include <asm/sections.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -57,24 +59,16 @@ EXPORT_SYMBOL(zero_page_mask); /* * Not static inline because used by IP27 special magic initialization code */ -void setup_zero_pages(void) +static void __init setup_zero_pages(void) { - unsigned int order, i; - struct page *page; + unsigned int order; if (cpu_has_vce) order = 3; else order = 0; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!empty_zero_page) - panic("Oh boy, that early out of memory?"); - - page = virt_to_page((void *)empty_zero_page); - split_page(page, order); - for (i = 0; i < (1 << order); i++, page++) - mark_page_reserved(page); + empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE); zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } @@ -88,7 +82,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) pte_t pte; int tlbidx; - BUG_ON(Page_dcache_dirty(page)); + BUG_ON(folio_test_dcache_dirty(page_folio(page))); preempt_disable(); pagefault_disable(); @@ -169,11 +163,12 @@ void kunmap_coherent(void) void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { + struct folio *src = page_folio(from); void *vfrom, *vto; vto = kmap_atomic(to); if (cpu_has_dc_aliases && - page_mapcount(from) && !Page_dcache_dirty(from)) { + folio_mapped(src) && !folio_test_dcache_dirty(src)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(); @@ -194,15 +189,17 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { + struct folio *folio = page_folio(page); + if (cpu_has_dc_aliases && - page_mapcount(page) && !Page_dcache_dirty(page)) { + folio_mapped(folio) && !folio_test_dcache_dirty(folio)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(); } else { memcpy(dst, src, len); if (cpu_has_dc_aliases) - SetPageDcacheDirty(page); + folio_set_dcache_dirty(folio); } if (vma->vm_flags & VM_EXEC) flush_cache_page(vma, vaddr, page_to_pfn(page)); @@ -212,15 +209,17 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { + struct folio *folio = page_folio(page); + if (cpu_has_dc_aliases && - page_mapcount(page) && !Page_dcache_dirty(page)) { + folio_mapped(folio) && !folio_test_dcache_dirty(folio)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(); } else { memcpy(dst, src, len); if (cpu_has_dc_aliases) - SetPageDcacheDirty(page); + folio_set_dcache_dirty(folio); } } EXPORT_SYMBOL_GPL(copy_from_user_page); @@ -426,44 +425,16 @@ void __init paging_init(void) static struct kcore_list kcore_kseg0; #endif -static inline void __init mem_init_free_highmem(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - if (cpu_has_dc_aliases) - return; - - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - if (!memblock_is_memory(PFN_PHYS(tmp))) - SetPageReserved(page); - else - free_highmem_page(page); - } -#endif -} - -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* - * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE + * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE * bits to hold a full 32b physical address on MIPS32 systems. */ - BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT)); - -#ifdef CONFIG_HIGHMEM - max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; -#else - max_mapnr = max_low_pfn; -#endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT)); maar_init(); - memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ - mem_init_free_highmem(); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) @@ -473,6 +444,11 @@ void __init mem_init(void) 0x80000000 - 4, KCORE_TEXT); #endif } +#else /* CONFIG_NUMA */ +void __init arch_mm_preinit(void) +{ + setup_zero_pages(); /* This comes from node 0 */ +} #endif /* !CONFIG_NUMA */ void free_init_pages(const char *what, unsigned long begin, unsigned long end) @@ -519,17 +495,9 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) return node_distance(cpu_to_node(from), cpu_to_node(to)); } -static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, - size_t align) +static int __init pcpu_cpu_to_node(int cpu) { - return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_ACCESSIBLE, - cpu_to_node(cpu)); -} - -static void __init pcpu_fc_free(void *ptr, size_t size) -{ - memblock_free_early(__pa(ptr), size); + return cpu_to_node(cpu); } void __init setup_per_cpu_areas(void) @@ -545,7 +513,7 @@ void __init setup_per_cpu_areas(void) rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, pcpu_cpu_distance, - pcpu_fc_alloc, pcpu_fc_free); + pcpu_cpu_to_node); if (rc < 0) panic("Failed to initialize percpu areas."); @@ -576,3 +544,25 @@ EXPORT_SYMBOL_GPL(invalid_pmd_table); #endif pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; EXPORT_SYMBOL(invalid_pte_table); + +#ifdef CONFIG_EXECMEM +#ifdef MODULES_VADDR +static struct execmem_info execmem_info __ro_after_init; + +struct execmem_info __init *execmem_arch_setup(void) +{ + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = MODULES_VADDR, + .end = MODULES_END, + .pgprot = PAGE_KERNEL, + .alignment = 1, + }, + }, + }; + + return &execmem_info; +} +#endif +#endif /* CONFIG_EXECMEM */ diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index b6dad2fd5575..c6c4576cd4a8 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -44,9 +44,9 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, * ioremap_prot gives the caller control over cache coherency attributes (CCA) */ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; unsigned long offset, pfn, last_pfn; struct vm_struct *area; phys_addr_t last_addr; @@ -72,6 +72,10 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, flags == _CACHE_UNCACHED) return (void __iomem *) CKSEG1ADDR(phys_addr); + /* Early remaps should use the unmapped regions til' VM is available */ + if (WARN_ON_ONCE(!slab_is_available())) + return NULL; + /* * Don't allow anybody to remap RAM that may be allocated by the page * allocator, since that could lead to races & data clobbering. diff --git a/arch/mips/mm/ioremap64.c b/arch/mips/mm/ioremap64.c index 15e7820d6a5f..acc03ba20098 100644 --- a/arch/mips/mm/ioremap64.c +++ b/arch/mips/mm/ioremap64.c @@ -3,9 +3,9 @@ #include <ioremap.h> void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE); void __iomem *addr; diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 00fe90c6db3e..5d2a1225785b 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -34,7 +34,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = {}; if (unlikely(len > TASK_SIZE)) return -ENOMEM; @@ -92,14 +92,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, */ } - info.flags = 0; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, UP); @@ -111,7 +111,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, */ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr0, len, pgoff, flags, DOWN); diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S index 43181ac0a1af..42d0516ca18a 100644 --- a/arch/mips/mm/page-funcs.S +++ b/arch/mips/mm/page-funcs.S @@ -8,8 +8,8 @@ * Copyright (C) 2012 MIPS Technologies, Inc. * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> */ +#include <linux/export.h> #include <asm/asm.h> -#include <asm/export.h> #include <asm/regdef.h> #ifdef CONFIG_SIBYTE_DMA_PAGEOPS diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 504bc4047c4c..1df237bd4a72 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -24,8 +24,8 @@ #include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/mmu_context.h> +#include <asm/regdef.h> #include <asm/cpu.h> -#include <asm/war.h> #ifdef CONFIG_SIBYTE_DMA_PAGEOPS #include <asm/sibyte/sb1250.h> @@ -35,19 +35,6 @@ #include <asm/uasm.h> -/* Registers used in the assembled routines. */ -#define ZERO 0 -#define AT 2 -#define A0 4 -#define A1 5 -#define A2 6 -#define T0 8 -#define T1 9 -#define T2 10 -#define T3 11 -#define T9 25 -#define RA 31 - /* Handle labels (which must be positive integers). */ enum label_id { label_clear_nopref = 1, @@ -103,18 +90,20 @@ static int cache_line_size; static inline void pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) { - if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { + if (cpu_has_64bit_gp_regs && + IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) && + r4k_daddiu_bug()) { if (off > 0x7fff) { - uasm_i_lui(buf, T9, uasm_rel_hi(off)); - uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off)); + uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off)); + uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off)); } else - uasm_i_addiu(buf, T9, ZERO, off); - uasm_i_daddu(buf, reg1, reg2, T9); + uasm_i_addiu(buf, GPR_T9, GPR_ZERO, off); + uasm_i_daddu(buf, reg1, reg2, GPR_T9); } else { if (off > 0x7fff) { - uasm_i_lui(buf, T9, uasm_rel_hi(off)); - uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off)); - UASM_i_ADDU(buf, reg1, reg2, T9); + uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off)); + uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off)); + UASM_i_ADDU(buf, reg1, reg2, GPR_T9); } else UASM_i_ADDIU(buf, reg1, reg2, off); } @@ -232,9 +221,9 @@ static void set_prefetch_parameters(void) static void build_clear_store(u32 **buf, int off) { if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { - uasm_i_sd(buf, ZERO, off, A0); + uasm_i_sd(buf, GPR_ZERO, off, GPR_A0); } else { - uasm_i_sw(buf, ZERO, off, A0); + uasm_i_sw(buf, GPR_ZERO, off, GPR_A0); } } @@ -245,10 +234,10 @@ static inline void build_clear_pref(u32 **buf, int off) if (pref_bias_clear_store) { _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, - A0); + GPR_A0); } else if (cache_line_size == (half_clear_loop_size << 1)) { if (cpu_has_cache_cdex_s) { - uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); + uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0); } else if (cpu_has_cache_cdex_p) { if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && cpu_is_r4600_v1_x()) { @@ -260,9 +249,9 @@ static inline void build_clear_pref(u32 **buf, int off) if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) - uasm_i_lw(buf, ZERO, ZERO, AT); + uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT); - uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); + uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0); } } } @@ -300,12 +289,12 @@ void build_clear_page(void) off = PAGE_SIZE - pref_bias_clear_store; if (off > 0xffff || !pref_bias_clear_store) - pg_addiu(&buf, A2, A0, off); + pg_addiu(&buf, GPR_A2, GPR_A0, off); else - uasm_i_ori(&buf, A2, A0, off); + uasm_i_ori(&buf, GPR_A2, GPR_A0, off); if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) - uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); + uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; @@ -319,36 +308,36 @@ void build_clear_page(void) build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); - pg_addiu(&buf, A0, A0, 2 * off); + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { build_clear_pref(&buf, off); if (off == -clear_word_size) - uasm_il_bne(&buf, &r, A0, A2, label_clear_pref); + uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_pref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); if (pref_bias_clear_store) { - pg_addiu(&buf, A2, A0, pref_bias_clear_store); + pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_clear_store); uasm_l_clear_nopref(&l, buf); off = 0; do { build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); - pg_addiu(&buf, A0, A0, 2 * off); + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { if (off == -clear_word_size) - uasm_il_bne(&buf, &r, A0, A2, + uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_nopref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); } - uasm_i_jr(&buf, RA); + uasm_i_jr(&buf, GPR_RA); uasm_i_nop(&buf); BUG_ON(buf > &__clear_page_end); @@ -368,18 +357,18 @@ void build_clear_page(void) static void build_copy_load(u32 **buf, int reg, int off) { if (cpu_has_64bit_gp_regs) { - uasm_i_ld(buf, reg, off, A1); + uasm_i_ld(buf, reg, off, GPR_A1); } else { - uasm_i_lw(buf, reg, off, A1); + uasm_i_lw(buf, reg, off, GPR_A1); } } static void build_copy_store(u32 **buf, int reg, int off) { if (cpu_has_64bit_gp_regs) { - uasm_i_sd(buf, reg, off, A0); + uasm_i_sd(buf, reg, off, GPR_A0); } else { - uasm_i_sw(buf, reg, off, A0); + uasm_i_sw(buf, reg, off, GPR_A0); } } @@ -389,7 +378,7 @@ static inline void build_copy_load_pref(u32 **buf, int off) return; if (pref_bias_copy_load) - _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); + _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, GPR_A1); } static inline void build_copy_store_pref(u32 **buf, int off) @@ -399,10 +388,10 @@ static inline void build_copy_store_pref(u32 **buf, int off) if (pref_bias_copy_store) { _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, - A0); + GPR_A0); } else if (cache_line_size == (half_copy_loop_size << 1)) { if (cpu_has_cache_cdex_s) { - uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); + uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0); } else if (cpu_has_cache_cdex_p) { if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && cpu_is_r4600_v1_x()) { @@ -414,9 +403,9 @@ static inline void build_copy_store_pref(u32 **buf, int off) if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) - uasm_i_lw(buf, ZERO, ZERO, AT); + uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT); - uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); + uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0); } } } @@ -453,12 +442,12 @@ void build_copy_page(void) off = PAGE_SIZE - pref_bias_copy_load; if (off > 0xffff || !pref_bias_copy_load) - pg_addiu(&buf, A2, A0, off); + pg_addiu(&buf, GPR_A2, GPR_A0, off); else - uasm_i_ori(&buf, A2, A0, off); + uasm_i_ori(&buf, GPR_A2, GPR_A0, off); if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) - uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); + uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * cache_line_size : 0; @@ -475,126 +464,126 @@ void build_copy_page(void) uasm_l_copy_pref_both(&l, buf); do { build_copy_load_pref(&buf, off); - build_copy_load(&buf, T0, off); + build_copy_load(&buf, GPR_T0, off); build_copy_load_pref(&buf, off + copy_word_size); - build_copy_load(&buf, T1, off + copy_word_size); + build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); - build_copy_load(&buf, T2, off + 2 * copy_word_size); + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); - build_copy_load(&buf, T3, off + 3 * copy_word_size); + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); - build_copy_store(&buf, T0, off); + build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); - build_copy_store(&buf, T1, off + copy_word_size); + build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); - build_copy_store(&buf, T2, off + 2 * copy_word_size); + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); - build_copy_store(&buf, T3, off + 3 * copy_word_size); + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); - pg_addiu(&buf, A1, A1, 2 * off); - pg_addiu(&buf, A0, A0, 2 * off); + pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { build_copy_load_pref(&buf, off); - build_copy_load(&buf, T0, off); + build_copy_load(&buf, GPR_T0, off); build_copy_load_pref(&buf, off + copy_word_size); - build_copy_load(&buf, T1, off + copy_word_size); + build_copy_load(&buf, GPR_T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); - build_copy_load(&buf, T2, off + 2 * copy_word_size); + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); - build_copy_load(&buf, T3, off + 3 * copy_word_size); + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); - build_copy_store(&buf, T0, off); + build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); - build_copy_store(&buf, T1, off + copy_word_size); + build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); - build_copy_store(&buf, T2, off + 2 * copy_word_size); + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) - uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both); - build_copy_store(&buf, T3, off + 3 * copy_word_size); + uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_both); + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); if (pref_bias_copy_load - pref_bias_copy_store) { - pg_addiu(&buf, A2, A0, + pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_load - pref_bias_copy_store); uasm_l_copy_pref_store(&l, buf); off = 0; do { - build_copy_load(&buf, T0, off); - build_copy_load(&buf, T1, off + copy_word_size); - build_copy_load(&buf, T2, off + 2 * copy_word_size); - build_copy_load(&buf, T3, off + 3 * copy_word_size); + build_copy_load(&buf, GPR_T0, off); + build_copy_load(&buf, GPR_T1, off + copy_word_size); + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); - build_copy_store(&buf, T0, off); + build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); - build_copy_store(&buf, T1, off + copy_word_size); + build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); - build_copy_store(&buf, T2, off + 2 * copy_word_size); + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); - build_copy_store(&buf, T3, off + 3 * copy_word_size); + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); - pg_addiu(&buf, A1, A1, 2 * off); - pg_addiu(&buf, A0, A0, 2 * off); + pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { - build_copy_load(&buf, T0, off); - build_copy_load(&buf, T1, off + copy_word_size); - build_copy_load(&buf, T2, off + 2 * copy_word_size); - build_copy_load(&buf, T3, off + 3 * copy_word_size); + build_copy_load(&buf, GPR_T0, off); + build_copy_load(&buf, GPR_T1, off + copy_word_size); + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); - build_copy_store(&buf, T0, off); + build_copy_store(&buf, GPR_T0, off); build_copy_store_pref(&buf, off + copy_word_size); - build_copy_store(&buf, T1, off + copy_word_size); + build_copy_store(&buf, GPR_T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); - build_copy_store(&buf, T2, off + 2 * copy_word_size); + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) - uasm_il_bne(&buf, &r, A2, A0, + uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_store); - build_copy_store(&buf, T3, off + 3 * copy_word_size); + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } if (pref_bias_copy_store) { - pg_addiu(&buf, A2, A0, pref_bias_copy_store); + pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_store); uasm_l_copy_nopref(&l, buf); off = 0; do { - build_copy_load(&buf, T0, off); - build_copy_load(&buf, T1, off + copy_word_size); - build_copy_load(&buf, T2, off + 2 * copy_word_size); - build_copy_load(&buf, T3, off + 3 * copy_word_size); - build_copy_store(&buf, T0, off); - build_copy_store(&buf, T1, off + copy_word_size); - build_copy_store(&buf, T2, off + 2 * copy_word_size); - build_copy_store(&buf, T3, off + 3 * copy_word_size); + build_copy_load(&buf, GPR_T0, off); + build_copy_load(&buf, GPR_T1, off + copy_word_size); + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); + build_copy_store(&buf, GPR_T0, off); + build_copy_store(&buf, GPR_T1, off + copy_word_size); + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); - pg_addiu(&buf, A1, A1, 2 * off); - pg_addiu(&buf, A0, A0, 2 * off); + pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); off = -off; do { - build_copy_load(&buf, T0, off); - build_copy_load(&buf, T1, off + copy_word_size); - build_copy_load(&buf, T2, off + 2 * copy_word_size); - build_copy_load(&buf, T3, off + 3 * copy_word_size); - build_copy_store(&buf, T0, off); - build_copy_store(&buf, T1, off + copy_word_size); - build_copy_store(&buf, T2, off + 2 * copy_word_size); + build_copy_load(&buf, GPR_T0, off); + build_copy_load(&buf, GPR_T1, off + copy_word_size); + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); + build_copy_store(&buf, GPR_T0, off); + build_copy_store(&buf, GPR_T1, off + copy_word_size); + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); if (off == -(4 * copy_word_size)) - uasm_il_bne(&buf, &r, A2, A0, + uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_nopref); - build_copy_store(&buf, T3, off + 3 * copy_word_size); + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } - uasm_i_jr(&buf, RA); + uasm_i_jr(&buf, GPR_RA); uasm_i_nop(&buf); BUG_ON(buf > &__copy_page_end); diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index 61891af25019..e2cf2166d5cb 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -13,9 +13,9 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> -void pgd_init(unsigned long page) +void pgd_init(void *addr) { - unsigned long *p = (unsigned long *) page; + unsigned long *p = (unsigned long *)addr; int i; for (i = 0; i < USER_PTRS_PER_PGD; i+=8) { @@ -31,16 +31,6 @@ void pgd_init(unsigned long page) } #if defined(CONFIG_TRANSPARENT_HUGEPAGE) -pmd_t mk_pmd(struct page *page, pgprot_t prot) -{ - pmd_t pmd; - - pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); - - return pmd; -} - - void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { @@ -61,9 +51,8 @@ void __init pagetable_init(void) #endif /* Initialize the entire pgd. */ - pgd_init((unsigned long)swapper_pg_dir); - pgd_init((unsigned long)swapper_pg_dir - + sizeof(pgd_t) * USER_PTRS_PER_PGD); + pgd_init(swapper_pg_dir); + pgd_init(&swapper_pg_dir[USER_PTRS_PER_PGD]); pgd_base = swapper_pg_dir; diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 7536f7804c44..b24f865de357 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -13,7 +13,7 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> -void pgd_init(unsigned long page) +void pgd_init(void *addr) { unsigned long *p, *end; unsigned long entry; @@ -26,7 +26,7 @@ void pgd_init(unsigned long page) entry = (unsigned long)invalid_pte_table; #endif - p = (unsigned long *) page; + p = (unsigned long *) addr; end = p + PTRS_PER_PGD; do { @@ -43,11 +43,12 @@ void pgd_init(unsigned long page) } #ifndef __PAGETABLE_PMD_FOLDED -void pmd_init(unsigned long addr, unsigned long pagetable) +void pmd_init(void *addr) { unsigned long *p, *end; + unsigned long pagetable = (unsigned long)invalid_pte_table; - p = (unsigned long *) addr; + p = (unsigned long *)addr; end = p + PTRS_PER_PMD; do { @@ -66,9 +67,10 @@ EXPORT_SYMBOL_GPL(pmd_init); #endif #ifndef __PAGETABLE_PUD_FOLDED -void pud_init(unsigned long addr, unsigned long pagetable) +void pud_init(void *addr) { unsigned long *p, *end; + unsigned long pagetable = (unsigned long)invalid_pmd_table; p = (unsigned long *)addr; end = p + PTRS_PER_PUD; @@ -87,20 +89,13 @@ void pud_init(unsigned long addr, unsigned long pagetable) } #endif -pmd_t mk_pmd(struct page *page, pgprot_t prot) -{ - pmd_t pmd; - - pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); - - return pmd; -} - +#ifdef CONFIG_TRANSPARENT_HUGEPAGE void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; } +#endif void __init pagetable_init(void) { @@ -108,12 +103,12 @@ void __init pagetable_init(void) pgd_t *pgd_base; /* Initialize the entire pgd. */ - pgd_init((unsigned long)swapper_pg_dir); + pgd_init(swapper_pg_dir); #ifndef __PAGETABLE_PUD_FOLDED - pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table); + pud_init(invalid_pud_table); #endif #ifndef __PAGETABLE_PMD_FOLDED - pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); + pmd_init(invalid_pmd_table); #endif pgd_base = swapper_pg_dir; /* diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 05560b042d82..10835414819f 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -10,12 +10,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *ret, *init; + pgd_t *init, *ret; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = __pgd_alloc(mm, PGD_TABLE_ORDER); if (ret) { init = pgd_offset(&init_mm, 0UL); - pgd_init((unsigned long)ret); + pgd_init(ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c index a1ced5e44951..a6b1bf82057a 100644 --- a/arch/mips/mm/physaddr.c +++ b/arch/mips/mm/physaddr.c @@ -5,6 +5,7 @@ #include <linux/mmdebug.h> #include <linux/mm.h> +#include <asm/addrspace.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/page.h> @@ -12,15 +13,6 @@ static inline bool __debug_virt_addr_valid(unsigned long x) { - /* high_memory does not get immediately defined, and there - * are early callers of __pa() against PAGE_OFFSET - */ - if (!high_memory && x >= PAGE_OFFSET) - return true; - - if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) - return true; - /* * MAX_DMA_ADDRESS is a virtual address that may not correspond to an * actual physical address. Enough code relies on @@ -30,13 +22,15 @@ static inline bool __debug_virt_addr_valid(unsigned long x) if (x == MAX_DMA_ADDRESS) return true; - return false; + return x >= PAGE_OFFSET && (KSEGX(x) < KSEG2 || + IS_ENABLED(CONFIG_EVA) || + !IS_ENABLED(CONFIG_HIGHMEM)); } phys_addr_t __virt_to_phys(volatile const void *x) { WARN(!__debug_virt_addr_valid((unsigned long)x), - "virt_to_phys used for non-linear address: %pK (%pS)\n", + "virt_to_phys used for non-linear address: %p (%pS)\n", x, x); return __virt_to_phys_nodebug(x); diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S index 00fef578c8cd..2705d7dcb33e 100644 --- a/arch/mips/mm/tlb-funcs.S +++ b/arch/mips/mm/tlb-funcs.S @@ -11,8 +11,8 @@ * Copyright (C) 2012 MIPS Technologies, Inc. * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> */ +#include <linux/export.h> #include <asm/asm.h> -#include <asm/export.h> #include <asm/regdef.h> #define FASTPATH_SIZE 128 diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index a36622ebea55..173f7b36033b 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -23,11 +23,11 @@ #include <asm/io.h> #include <asm/bootinfo.h> #include <asm/cpu.h> +#include <asm/setup.h> +#include <asm/tlbex.h> #undef DEBUG_TLB -extern void build_tlb_refill_handler(void); - /* CP0 hazard avoidance. */ #define BARRIER \ __asm__ __volatile__( \ @@ -36,8 +36,6 @@ extern void build_tlb_refill_handler(void); "nop\n\t" \ ".set pop\n\t") -int r3k_have_wired_reg; /* Should be in cpu_data? */ - /* TLB operations. */ static void local_flush_tlb_from(int entry) { @@ -62,7 +60,7 @@ void local_flush_tlb_all(void) printk("[tlball]"); #endif local_irq_save(flags); - local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8); + local_flush_tlb_from(8); local_irq_restore(flags); } @@ -185,7 +183,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) int idx, pid; /* - * Handle debugger faulting in for debugee. + * Handle debugger faulting in for debuggee. */ if (current->active_mm != vma->vm_mm) return; @@ -224,34 +222,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long old_ctx; static unsigned long wired = 0; - if (r3k_have_wired_reg) { /* TX39XX */ - unsigned long old_pagemask; - unsigned long w; - -#ifdef DEBUG_TLB - printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n", - entrylo0, entryhi, pagemask); -#endif - - local_irq_save(flags); - /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi() & asid_mask; - old_pagemask = read_c0_pagemask(); - w = read_c0_wired(); - write_c0_wired(w + 1); - write_c0_index(w << 8); - write_c0_pagemask(pagemask); - write_c0_entryhi(entryhi); - write_c0_entrylo0(entrylo0); - BARRIER; - tlb_write_indexed(); - - write_c0_entryhi(old_ctx); - write_c0_pagemask(old_pagemask); - local_flush_tlb_all(); - local_irq_restore(flags); - - } else if (wired < 8) { + if (wired < 8) { #ifdef DEBUG_TLB printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n", entrylo0, entryhi); @@ -272,13 +243,6 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, void tlb_init(void) { - switch (current_cpu_type()) { - case CPU_TX3922: - case CPU_TX3927: - r3k_have_wired_reg = 1; - write_c0_wired(0); /* Set to 8 on reset... */ - break; - } local_flush_tlb_from(0); build_tlb_refill_handler(); } diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 1b939abbe4ca..44a662536148 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -12,9 +12,11 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/smp.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/export.h> +#include <linux/sort.h> #include <asm/cpu.h> #include <asm/cpu-type.h> @@ -22,9 +24,9 @@ #include <asm/hazards.h> #include <asm/mmu_context.h> #include <asm/tlb.h> +#include <asm/tlbex.h> #include <asm/tlbmisc.h> - -extern void build_tlb_refill_handler(void); +#include <asm/setup.h> /* * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has @@ -297,11 +299,11 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; - pte_t *ptep; + pte_t *ptep, *ptemap = NULL; int idx, pid; /* - * Handle debugger faulting in for debugee. + * Handle debugger faulting in for debuggee. */ if (current->active_mm != vma->vm_mm) return; @@ -326,7 +328,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) idx = read_c0_index(); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* this could be a huge page */ - if (pmd_huge(*pmdp)) { + if (pmd_leaf(*pmdp)) { unsigned long lo; write_c0_pagemask(PM_HUGE_MASK); ptep = (pte_t *)pmdp; @@ -344,7 +346,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) } else #endif { - ptep = pte_offset_map(pmdp, address); + ptemap = ptep = pte_offset_map(pmdp, address); + /* + * update_mmu_cache() is called between pte_offset_map_lock() + * and pte_unmap_unlock(), so we can assume that ptep is not + * NULL here: and what should be done below if it were NULL? + */ #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #ifdef CONFIG_XPA @@ -373,6 +380,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlbw_use_hazard(); htw_start(); flush_micro_tlb_vm(vma); + + if (ptemap) + pte_unmap(ptemap); local_irq_restore(flags); } @@ -450,6 +460,7 @@ EXPORT_SYMBOL(has_transparent_hugepage); int temp_tlb_entry; +#ifndef CONFIG_64BIT __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { @@ -488,6 +499,7 @@ out: local_irq_restore(flags); return ret; } +#endif static int ntlb; static int __init set_ntlb(char *str) @@ -498,6 +510,97 @@ static int __init set_ntlb(char *str) __setup("ntlb=", set_ntlb); + +/* Comparison function for EntryHi VPN fields. */ +static int r4k_vpn_cmp(const void *a, const void *b) +{ + long v = *(unsigned long *)a - *(unsigned long *)b; + int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0; + return s ? (v != 0) | v >> s : v; +} + +/* + * Initialise all TLB entries with unique values that do not clash with + * what we have been handed over and what we'll be using ourselves. + */ +static void __ref r4k_tlb_uniquify(void) +{ + int tlbsize = current_cpu_data.tlbsize; + bool use_slab = slab_is_available(); + int start = num_wired_entries(); + phys_addr_t tlb_vpn_size; + unsigned long *tlb_vpns; + unsigned long vpn_mask; + int cnt, ent, idx, i; + + vpn_mask = GENMASK(cpu_vmbits - 1, 13); + vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; + + tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); + tlb_vpns = (use_slab ? + kmalloc(tlb_vpn_size, GFP_KERNEL) : + memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns))); + if (WARN_ON(!tlb_vpns)) + return; /* Pray local_flush_tlb_all() is good enough. */ + + htw_stop(); + + for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { + unsigned long vpn; + + write_c0_index(i); + mtc0_tlbr_hazard(); + tlb_read(); + tlb_read_hazard(); + vpn = read_c0_entryhi(); + vpn &= vpn_mask & PAGE_MASK; + tlb_vpns[cnt] = vpn; + + /* Prevent any large pages from overlapping regular ones. */ + write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + tlbw_use_hazard(); + } + + sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL); + + write_c0_pagemask(PM_DEFAULT_MASK); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + + idx = 0; + ent = tlbsize; + for (i = start; i < tlbsize; i++) + while (1) { + unsigned long entryhi, vpn; + + entryhi = UNIQUE_ENTRYHI(ent); + vpn = entryhi & vpn_mask & PAGE_MASK; + + if (idx >= cnt || vpn < tlb_vpns[idx]) { + write_c0_entryhi(entryhi); + write_c0_index(i); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + ent++; + break; + } else if (vpn == tlb_vpns[idx]) { + ent++; + } else { + idx++; + } + } + + tlbw_use_hazard(); + htw_start(); + flush_micro_tlb(); + if (use_slab) + kfree(tlb_vpns); + else + memblock_free(tlb_vpns, tlb_vpn_size); +} + /* * Configure TLB (for init or after a CPU has been powered off). */ @@ -537,6 +640,7 @@ static void r4k_tlb_configure(void) temp_tlb_entry = current_cpu_data.tlbsize - 1; /* From this point on the ARC firmware is dead. */ + r4k_tlb_uniquify(); local_flush_tlb_all(); /* Did I tell you that ARC SUCKS? */ diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index cd4afcdf3725..69ea54bdc0c3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -32,8 +32,9 @@ #include <asm/cacheflush.h> #include <asm/cpu-type.h> +#include <asm/mipsregs.h> #include <asm/mmu_context.h> -#include <asm/war.h> +#include <asm/regdef.h> #include <asm/uasm.h> #include <asm/setup.h> #include <asm/tlbex.h> @@ -254,7 +255,7 @@ static void output_pgtable_bits_defines(void) pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT); pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT); - pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT); + pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT); pr_debug("\n"); } @@ -277,27 +278,6 @@ static inline void dump_handler(const char *symbol, const void *start, const voi pr_debug("\tEND(%s)\n", symbol); } -/* The only general purpose registers allowed in TLB handlers. */ -#define K0 26 -#define K1 27 - -/* Some CP0 registers */ -#define C0_INDEX 0, 0 -#define C0_ENTRYLO0 2, 0 -#define C0_TCBIND 2, 2 -#define C0_ENTRYLO1 3, 0 -#define C0_CONTEXT 4, 0 -#define C0_PAGEMASK 5, 0 -#define C0_PWBASE 5, 5 -#define C0_PWFIELD 5, 6 -#define C0_PWSIZE 5, 7 -#define C0_PWCTL 6, 6 -#define C0_BADVADDR 8, 0 -#define C0_PGD 9, 7 -#define C0_ENTRYHI 10, 0 -#define C0_EPC 14, 0 -#define C0_XCONTEXT 20, 0 - #ifdef CONFIG_64BIT # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) #else @@ -325,13 +305,7 @@ static unsigned int kscratch_used_mask; static inline int __maybe_unused c0_kscratch(void) { - switch (current_cpu_type()) { - case CPU_XLP: - case CPU_XLR: - return 22; - default: - return 31; - } + return 31; } static int allocate_kscratch(void) @@ -363,30 +337,30 @@ static struct work_registers build_get_work_registers(u32 **p) if (scratch_reg >= 0) { /* Save in CPU local C0_KScratch? */ UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); - r.r1 = K0; - r.r2 = K1; - r.r3 = 1; + r.r1 = GPR_K0; + r.r2 = GPR_K1; + r.r3 = GPR_AT; return r; } if (num_possible_cpus() > 1) { /* Get smp_processor_id */ - UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); - UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT); + UASM_i_CPUID_MFC0(p, GPR_K0, SMP_CPUID_REG); + UASM_i_SRL_SAFE(p, GPR_K0, GPR_K0, SMP_CPUID_REGSHIFT); - /* handler_reg_save index in K0 */ - UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); + /* handler_reg_save index in GPR_K0 */ + UASM_i_SLL(p, GPR_K0, GPR_K0, ilog2(sizeof(struct tlb_reg_save))); - UASM_i_LA(p, K1, (long)&handler_reg_save); - UASM_i_ADDU(p, K0, K0, K1); + UASM_i_LA(p, GPR_K1, (long)&handler_reg_save); + UASM_i_ADDU(p, GPR_K0, GPR_K0, GPR_K1); } else { - UASM_i_LA(p, K0, (long)&handler_reg_save); + UASM_i_LA(p, GPR_K0, (long)&handler_reg_save); } - /* K0 now points to save area, save $1 and $2 */ - UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); - UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); + /* GPR_K0 now points to save area, save $1 and $2 */ + UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0); + UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0); - r.r1 = K1; + r.r1 = GPR_K1; r.r2 = 1; r.r3 = 2; return r; @@ -399,9 +373,9 @@ static void build_restore_work_registers(u32 **p) UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); return; } - /* K0 already points to save area, restore $1 and $2 */ - UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); - UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); + /* GPR_K0 already points to save area, restore $1 and $2 */ + UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0); + UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0); } #ifndef CONFIG_MIPS_PGD_C0_CONTEXT @@ -420,22 +394,22 @@ static void build_r3000_tlb_refill_handler(void) memset(tlb_handler, 0, sizeof(tlb_handler)); p = tlb_handler; - uasm_i_mfc0(&p, K0, C0_BADVADDR); - uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ - uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); - uasm_i_srl(&p, K0, K0, 22); /* load delay */ - uasm_i_sll(&p, K0, K0, 2); - uasm_i_addu(&p, K1, K1, K0); - uasm_i_mfc0(&p, K0, C0_CONTEXT); - uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ - uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ - uasm_i_addu(&p, K1, K1, K0); - uasm_i_lw(&p, K0, 0, K1); + uasm_i_mfc0(&p, GPR_K0, C0_BADVADDR); + uasm_i_lui(&p, GPR_K1, uasm_rel_hi(pgdc)); /* cp0 delay */ + uasm_i_lw(&p, GPR_K1, uasm_rel_lo(pgdc), GPR_K1); + uasm_i_srl(&p, GPR_K0, GPR_K0, 22); /* load delay */ + uasm_i_sll(&p, GPR_K0, GPR_K0, 2); + uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0); + uasm_i_mfc0(&p, GPR_K0, C0_CONTEXT); + uasm_i_lw(&p, GPR_K1, 0, GPR_K1); /* cp0 delay */ + uasm_i_andi(&p, GPR_K0, GPR_K0, 0xffc); /* load delay */ + uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0); + uasm_i_lw(&p, GPR_K0, 0, GPR_K1); uasm_i_nop(&p); /* load delay */ - uasm_i_mtc0(&p, K0, C0_ENTRYLO0); - uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ + uasm_i_mtc0(&p, GPR_K0, C0_ENTRYLO0); + uasm_i_mfc0(&p, GPR_K1, C0_EPC); /* cp0 delay */ uasm_i_tlbwr(&p); /* cp0 delay */ - uasm_i_jr(&p, K1); + uasm_i_jr(&p, GPR_K1); uasm_i_rfe(&p); /* branch delay */ if (p > tlb_handler + 32) @@ -553,7 +527,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_5KC: case CPU_TX49XX: case CPU_PR4450: - case CPU_XLR: uasm_i_nop(p); tlbw(p); break; @@ -594,25 +567,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, tlbw(p); break; - case CPU_VR4111: - case CPU_VR4121: - case CPU_VR4122: - case CPU_VR4181: - case CPU_VR4181A: - uasm_i_nop(p); - uasm_i_nop(p); - tlbw(p); - uasm_i_nop(p); - uasm_i_nop(p); - break; - - case CPU_VR4131: - case CPU_VR4133: - uasm_i_nop(p); - uasm_i_nop(p); - tlbw(p); - break; - case CPU_XBURST: tlbw(p); uasm_i_nop(p); @@ -634,7 +588,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, return; } - if (cpu_has_rixi && !!_PAGE_NO_EXEC) { + if (cpu_has_rixi && _PAGE_NO_EXEC != 0) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { @@ -816,7 +770,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, if (check_for_high_segbits) { /* - * The kernel currently implicitely assumes that the + * The kernel currently implicitly assumes that the * MIPS SEGBITS parameter for the processor is * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never * allocate virtual addresses outside the maximum @@ -826,7 +780,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * everything but the lower xuseg addresses goes down * the module_alloc/vmalloc path. */ - uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, ptr, label_vmalloc); } else { uasm_il_bltz(p, r, tmp, label_vmalloc); @@ -1003,22 +957,6 @@ static void build_adjust_context(u32 **p, unsigned int ctx) unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); - switch (current_cpu_type()) { - case CPU_VR41XX: - case CPU_VR4111: - case CPU_VR4121: - case CPU_VR4122: - case CPU_VR4131: - case CPU_VR4181: - case CPU_VR4181A: - case CPU_VR4133: - shift += 2; - break; - - default: - break; - } - if (shift) UASM_i_SRL(p, ctx, ctx, shift); uasm_i_andi(p, ctx, ctx, mask); @@ -1135,7 +1073,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_SW(p, scratch, scratchpad_offset(0), 0); uasm_i_dsrl_safe(p, scratch, tmp, - PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, scratch, label_vmalloc); if (pgd_reg == -1) { @@ -1319,11 +1257,11 @@ static void build_r4000_tlb_refill_handler(void) memset(final_handler, 0, sizeof(final_handler)); if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { - htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, + htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, GPR_K0, GPR_K1, scratch_reg); vmalloc_mode = refill_scratch; } else { - htlb_info.huge_pte = K0; + htlb_info.huge_pte = GPR_K0; htlb_info.restore_scratch = 0; htlb_info.need_reload_pte = true; vmalloc_mode = refill_noscratch; @@ -1333,29 +1271,29 @@ static void build_r4000_tlb_refill_handler(void) if (bcm1250_m3_war()) { unsigned int segbits = 44; - uasm_i_dmfc0(&p, K0, C0_BADVADDR); - uasm_i_dmfc0(&p, K1, C0_ENTRYHI); - uasm_i_xor(&p, K0, K0, K1); - uasm_i_dsrl_safe(&p, K1, K0, 62); - uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); - uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); - uasm_i_or(&p, K0, K0, K1); - uasm_il_bnez(&p, &r, K0, label_leave); + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); + uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI); + uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1); + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62); + uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1); + uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits); + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1); + uasm_il_bnez(&p, &r, GPR_K0, label_leave); /* No need for uasm_i_nop */ } #ifdef CONFIG_64BIT - build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ + build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */ #else - build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ + build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */ #endif #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT - build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); + build_is_huge_pte(&p, &r, GPR_K0, GPR_K1, label_tlb_huge_update); #endif - build_get_ptep(&p, K0, K1); - build_update_entries(&p, K0, K1); + build_get_ptep(&p, GPR_K0, GPR_K1); + build_update_entries(&p, GPR_K0, GPR_K1); build_tlb_write_entry(&p, &l, &r, tlb_random); uasm_l_leave(&l, p); uasm_i_eret(&p); /* return from trap */ @@ -1363,14 +1301,14 @@ static void build_r4000_tlb_refill_handler(void) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT uasm_l_tlb_huge_update(&l, p); if (htlb_info.need_reload_pte) - UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); - build_huge_update_entries(&p, htlb_info.huge_pte, K1); - build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, + UASM_i_LW(&p, htlb_info.huge_pte, 0, GPR_K1); + build_huge_update_entries(&p, htlb_info.huge_pte, GPR_K1); + build_huge_tlb_write_entry(&p, &l, &r, GPR_K0, tlb_random, htlb_info.restore_scratch); #endif #ifdef CONFIG_64BIT - build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); + build_get_pgd_vmalloc64(&p, &l, &r, GPR_K0, GPR_K1, vmalloc_mode); #endif /* @@ -1383,6 +1321,7 @@ static void build_r4000_tlb_refill_handler(void) switch (boot_cpu_type()) { default: if (sizeof(long) == 4) { + fallthrough; case CPU_LOONGSON2EF: /* Loongson2 ebase is different than r4k, we have more space */ if ((p - tlb_handler) > 64) @@ -1500,12 +1439,12 @@ static void setup_pw(void) #endif pgd_i = PGDIR_SHIFT; /* 1st level PGD */ #ifndef __PAGETABLE_PMD_FOLDED - pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER; pmd_i = PMD_SHIFT; /* 2nd level PMD */ pmd_w = PMD_SHIFT - PAGE_SHIFT; #else - pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER; #endif pt_i = PAGE_SHIFT; /* 3rd level PTE */ @@ -1542,34 +1481,35 @@ static void build_loongson3_tlb_refill_handler(void) memset(tlb_handler, 0, sizeof(tlb_handler)); if (check_for_high_segbits) { - uasm_i_dmfc0(&p, K0, C0_BADVADDR); - uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); - uasm_il_beqz(&p, &r, K1, label_vmalloc); + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, + PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); + uasm_il_beqz(&p, &r, GPR_K1, label_vmalloc); uasm_i_nop(&p); - uasm_il_bgez(&p, &r, K0, label_large_segbits_fault); + uasm_il_bgez(&p, &r, GPR_K0, label_large_segbits_fault); uasm_i_nop(&p); uasm_l_vmalloc(&l, p); } - uasm_i_dmfc0(&p, K1, C0_PGD); + uasm_i_dmfc0(&p, GPR_K1, C0_PGD); - uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ + uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */ #ifndef __PAGETABLE_PMD_FOLDED - uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ + uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */ #endif - uasm_i_ldpte(&p, K1, 0); /* even */ - uasm_i_ldpte(&p, K1, 1); /* odd */ + uasm_i_ldpte(&p, GPR_K1, 0); /* even */ + uasm_i_ldpte(&p, GPR_K1, 1); /* odd */ uasm_i_tlbwr(&p); /* restore page mask */ if (PM_DEFAULT_MASK >> 16) { - uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16); - uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff); - uasm_i_mtc0(&p, K0, C0_PAGEMASK); + uasm_i_lui(&p, GPR_K0, PM_DEFAULT_MASK >> 16); + uasm_i_ori(&p, GPR_K0, GPR_K0, PM_DEFAULT_MASK & 0xffff); + uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK); } else if (PM_DEFAULT_MASK) { - uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK); - uasm_i_mtc0(&p, K0, C0_PAGEMASK); + uasm_i_ori(&p, GPR_K0, 0, PM_DEFAULT_MASK); + uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK); } else { uasm_i_mtc0(&p, 0, C0_PAGEMASK); } @@ -1578,8 +1518,8 @@ static void build_loongson3_tlb_refill_handler(void) if (check_for_high_segbits) { uasm_l_large_segbits_fault(&l, p); - UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0); - uasm_i_jr(&p, K1); + UASM_i_LA(&p, GPR_K1, (unsigned long)tlb_do_page_fault_0); + uasm_i_jr(&p, GPR_K1); uasm_i_nop(&p); } @@ -1757,7 +1697,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, /* * Check if PTE is present, if not then jump to LABEL. PTR points to * the page table where this PTE is located, PTE will be re-loaded - * with it's original value. + * with its original value. */ static void build_pte_present(u32 **p, struct uasm_reloc **r, @@ -1945,11 +1885,11 @@ static void build_r3000_tlb_load_handler(void) memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); - build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); + build_pte_present(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ - build_make_valid(&p, &r, K0, K1, -1); - build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); + build_make_valid(&p, &r, GPR_K0, GPR_K1, -1); + build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1); uasm_l_nopage_tlbl(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); @@ -1975,11 +1915,11 @@ static void build_r3000_tlb_store_handler(void) memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); - build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); + build_pte_writable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ - build_make_write(&p, &r, K0, K1, -1); - build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); + build_make_write(&p, &r, GPR_K0, GPR_K1, -1); + build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1); uasm_l_nopage_tlbs(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); @@ -2005,11 +1945,11 @@ static void build_r3000_tlb_modify_handler(void) memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); - build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); + build_pte_modifiable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ - build_make_write(&p, &r, K0, K1, -1); - build_r3000_pte_reload_tlbwi(&p, K0, K1); + build_make_write(&p, &r, GPR_K0, GPR_K1, -1); + build_r3000_pte_reload_tlbwi(&p, GPR_K0, GPR_K1); uasm_l_nopage_tlbm(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); @@ -2072,7 +2012,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, UASM_i_MFC0(p, wr.r1, C0_BADVADDR); UASM_i_LW(p, wr.r2, 0, wr.r2); - UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); + UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2); uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); @@ -2125,14 +2065,14 @@ static void build_r4000_tlb_load_handler(void) if (bcm1250_m3_war()) { unsigned int segbits = 44; - uasm_i_dmfc0(&p, K0, C0_BADVADDR); - uasm_i_dmfc0(&p, K1, C0_ENTRYHI); - uasm_i_xor(&p, K0, K0, K1); - uasm_i_dsrl_safe(&p, K1, K0, 62); - uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); - uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); - uasm_i_or(&p, K0, K0, K1); - uasm_il_bnez(&p, &r, K0, label_leave); + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); + uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI); + uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1); + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62); + uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1); + uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits); + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1); + uasm_il_bnez(&p, &r, GPR_K0, label_leave); /* No need for uasm_i_nop */ } @@ -2165,17 +2105,8 @@ static void build_r4000_tlb_load_handler(void) uasm_i_tlbr(&p); - switch (current_cpu_type()) { - default: - if (cpu_has_mips_r2_exec_hazard) { - uasm_i_ehb(&p); - - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - break; - } - } + if (cpu_has_mips_r2_exec_hazard) + uasm_i_ehb(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { @@ -2240,17 +2171,8 @@ static void build_r4000_tlb_load_handler(void) uasm_i_tlbr(&p); - switch (current_cpu_type()) { - default: - if (cpu_has_mips_r2_exec_hazard) { - uasm_i_ehb(&p); - - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - break; - } - } + if (cpu_has_mips_r2_exec_hazard) + uasm_i_ehb(&p); /* Examine entrylo 0 or 1 based on ptr. */ if (use_bbit_insns()) { @@ -2293,9 +2215,9 @@ static void build_r4000_tlb_load_handler(void) build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_0 & 1) { - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); - uasm_i_jr(&p, K0); + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_0)); + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_0)); + uasm_i_jr(&p, GPR_K0); } else #endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); @@ -2349,9 +2271,9 @@ static void build_r4000_tlb_store_handler(void) build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); - uasm_i_jr(&p, K0); + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1)); + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1)); + uasm_i_jr(&p, GPR_K0); } else #endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); @@ -2406,9 +2328,9 @@ static void build_r4000_tlb_modify_handler(void) build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); - uasm_i_jr(&p, K0); + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1)); + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1)); + uasm_i_jr(&p, GPR_K0); } else #endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); @@ -2574,7 +2496,7 @@ static void check_pabits(void) unsigned long entry; unsigned pabits, fillbits; - if (!cpu_has_rixi || !_PAGE_NO_EXEC) { + if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) { /* * We'll only be making use of the fact that we can rotate bits * into the fill if the CPU supports RIXI, so don't bother @@ -2620,7 +2542,7 @@ void build_tlb_refill_handler(void) check_pabits(); #ifdef CONFIG_64BIT - check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); #endif if (cpu_has_3kex) { diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 7154a1d99aad..e15c6700cd08 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -90,7 +90,7 @@ static const struct insn insn_table[insn_invalid] = { RS | RT | RD}, [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, - [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op), + [insn_dmulu] = {M(spec_op, 0, 0, 0, dmultu_dmulu_op, dmultu_op), RS | RT | RD}, [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, @@ -150,6 +150,8 @@ static const struct insn insn_table[insn_invalid] = { [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op), RS | RT | RD}, + [insn_muhu] = {M(spec_op, 0, 0, 0, multu_muhu_op, multu_op), + RS | RT | RD}, #ifndef CONFIG_CPU_MIPSR6 [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, #else diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 81dd226d6b6b..125140979d62 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -59,7 +59,7 @@ enum opcode { insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0, - insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor, + insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_muhu, insn_nor, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc, insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll, insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, @@ -344,6 +344,7 @@ I_u1(_mtlo) I_u3u1u2(_mul) I_u1u2(_multu) I_u3u1u2(_mulu) +I_u3u1u2(_muhu) I_u3u1u2(_nor) I_u3u1u2(_or) I_u2u1u3(_ori) |
