summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile9
-rw-r--r--arch/mips/mm/c-octeon.c58
-rw-r--r--arch/mips/mm/c-r3k.c20
-rw-r--r--arch/mips/mm/c-r4k.c306
-rw-r--r--arch/mips/mm/c-tx39.c421
-rw-r--r--arch/mips/mm/cache.c182
-rw-r--r--arch/mips/mm/cex-gen.S2
-rw-r--r--arch/mips/mm/context.c5
-rw-r--r--arch/mips/mm/dma-noncoherent.c65
-rw-r--r--arch/mips/mm/fault.c74
-rw-r--r--arch/mips/mm/highmem.c119
-rw-r--r--arch/mips/mm/hugetlbpage.c16
-rw-r--r--arch/mips/mm/init.c107
-rw-r--r--arch/mips/mm/ioremap.c155
-rw-r--r--arch/mips/mm/ioremap64.c23
-rw-r--r--arch/mips/mm/maccess.c10
-rw-r--r--arch/mips/mm/page-funcs.S2
-rw-r--r--arch/mips/mm/page.c224
-rw-r--r--arch/mips/mm/pgtable-32.c13
-rw-r--r--arch/mips/mm/pgtable-64.c24
-rw-r--r--arch/mips/mm/pgtable.c10
-rw-r--r--arch/mips/mm/physaddr.c50
-rw-r--r--arch/mips/mm/sc-ip22.c1
-rw-r--r--arch/mips/mm/sc-mips.c14
-rw-r--r--arch/mips/mm/sc-r5k.c1
-rw-r--r--arch/mips/mm/tlb-funcs.S2
-rw-r--r--arch/mips/mm/tlb-r3k.c47
-rw-r--r--arch/mips/mm/tlb-r4k.c28
-rw-r--r--arch/mips/mm/tlbex.c335
-rw-r--r--arch/mips/mm/uasm-mips.c4
-rw-r--r--arch/mips/mm/uasm.c5
31 files changed, 774 insertions, 1558 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 46f483e952c8..304692391519 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -22,8 +22,12 @@ else
obj-y += uasm-mips.o
endif
+ifndef CONFIG_EVA
+obj-y += maccess.o
+endif
+
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
-obj-$(CONFIG_64BIT) += pgtable-64.o
+obj-$(CONFIG_64BIT) += ioremap64.o pgtable-64.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o
@@ -32,7 +36,6 @@ obj-$(CONFIG_CPU_R3K_TLB) += tlb-r3k.o
obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o
obj-$(CONFIG_CPU_R3000) += c-r3k.o
obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
-obj-$(CONFIG_CPU_TX39XX) += c-tx39.o
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
@@ -40,3 +43,5 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o
+
+obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 8064821e9805..b7393b61cfa7 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -20,18 +20,16 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/traps.h>
#include <asm/mmu_context.h>
-#include <asm/war.h>
#include <asm/octeon/octeon.h>
unsigned long long cache_err_dcache[NR_CPUS];
EXPORT_SYMBOL_GPL(cache_err_dcache);
-/**
+/*
* Octeon automatically flushes the dcache on tlb changes, so
* from Linux's viewpoint it acts much like a physically
* tagged cache. No flushing is needed
@@ -57,8 +55,8 @@ static void local_octeon_flush_icache_range(unsigned long start,
}
/**
- * Flush caches as necessary for all cores affected by a
- * vma. If no vma is supplied, all cores are flushed.
+ * octeon_flush_icache_all_cores - Flush caches as necessary for all cores
+ * affected by a vma. If no vma is supplied, all cores are flushed.
*
* @vma: VMA to flush or NULL to flush all icaches.
*/
@@ -85,15 +83,20 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
else
mask = *cpu_online_mask;
cpumask_clear_cpu(cpu, &mask);
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
for_each_cpu(cpu, &mask)
octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
+#else
+ smp_call_function_many(&mask, (smp_call_func_t)octeon_local_flush_icache,
+ NULL, 1);
+#endif
preempt_enable();
#endif
}
-/**
+/*
* Called to flush the icache on all cores
*/
static void octeon_flush_icache_all(void)
@@ -103,8 +106,7 @@ static void octeon_flush_icache_all(void)
/**
- * Called to flush all memory associated with a memory
- * context.
+ * octeon_flush_cache_mm - flush all memory associated with a memory context.
*
* @mm: Memory context to flush
*/
@@ -117,7 +119,7 @@ static void octeon_flush_cache_mm(struct mm_struct *mm)
}
-/**
+/*
* Flush a range of kernel addresses out of the icache
*
*/
@@ -128,11 +130,11 @@ static void octeon_flush_icache_range(unsigned long start, unsigned long end)
/**
- * Flush a range out of a vma
+ * octeon_flush_cache_range - Flush a range out of a vma
*
* @vma: VMA to flush
- * @start:
- * @end:
+ * @start: beginning address for flush
+ * @end: ending address for flush
*/
static void octeon_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
@@ -143,11 +145,11 @@ static void octeon_flush_cache_range(struct vm_area_struct *vma,
/**
- * Flush a specific page of a vma
+ * octeon_flush_cache_page - Flush a specific page of a vma
*
* @vma: VMA to flush page for
* @page: Page to flush
- * @pfn:
+ * @pfn: Page frame number
*/
static void octeon_flush_cache_page(struct vm_area_struct *vma,
unsigned long page, unsigned long pfn)
@@ -161,7 +163,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
BUG();
}
-/**
+/*
* Probe Octeon's caches
*
*/
@@ -237,17 +239,17 @@ static void probe_octeon(void)
c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
if (smp_processor_id() == 0) {
- pr_notice("Primary instruction cache %ldkB, %s, %d way, "
- "%d sets, linesize %d bytes.\n",
- icache_size >> 10,
- cpu_has_vtag_icache ?
+ pr_info("Primary instruction cache %ldkB, %s, %d way, "
+ "%d sets, linesize %d bytes.\n",
+ icache_size >> 10,
+ cpu_has_vtag_icache ?
"virtually tagged" : "physically tagged",
- c->icache.ways, c->icache.sets, c->icache.linesz);
+ c->icache.ways, c->icache.sets, c->icache.linesz);
- pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
- "linesize %d bytes.\n",
- dcache_size >> 10, c->dcache.ways,
- c->dcache.sets, c->dcache.linesz);
+ pr_info("Primary data cache %ldkB, %d-way, %d sets, "
+ "linesize %d bytes.\n",
+ dcache_size >> 10, c->dcache.ways,
+ c->dcache.sets, c->dcache.linesz);
}
}
@@ -257,7 +259,7 @@ static void octeon_cache_error_setup(void)
set_handler(0x100, &except_vec2_octeon, 0x80);
}
-/**
+/*
* Setup the Octeon cache flush routines
*
*/
@@ -334,7 +336,7 @@ static void co_cache_error_call_notifiers(unsigned long val)
}
/*
- * Called when the the exception is recoverable
+ * Called when the exception is recoverable
*/
asmlinkage void cache_parity_error_octeon_recoverable(void)
@@ -342,8 +344,8 @@ asmlinkage void cache_parity_error_octeon_recoverable(void)
co_cache_error_call_notifiers(0);
}
-/**
- * Called when the the exception is not recoverable
+/*
+ * Called when the exception is not recoverable
*/
asmlinkage void cache_parity_error_octeon_non_recoverable(void)
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 15bb8cf59828..5869df848fab 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -16,7 +16,6 @@
#include <linux/mm.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/isadep.h>
#include <asm/io.h>
@@ -240,9 +239,6 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -253,11 +249,8 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
if (cpu_context(smp_processor_id(), mm) == 0)
return;
- pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
/* Invalid => no such page in the cache. */
if (!(pte_val(*ptep) & _PAGE_PRESENT))
@@ -268,10 +261,6 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
r3k_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
}
-static void local_r3k_flush_data_cache_page(void *addr)
-{
-}
-
static void r3k_flush_data_cache_page(unsigned long addr)
{
}
@@ -309,16 +298,15 @@ void r3k_cache_init(void)
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
- local_flush_data_cache_page = local_r3k_flush_data_cache_page;
flush_data_cache_page = r3k_flush_data_cache_page;
_dma_cache_wback_inv = r3k_dma_cache_wback_inv;
_dma_cache_wback = r3k_dma_cache_wback_inv;
_dma_cache_inv = r3k_dma_cache_wback_inv;
- printk("Primary instruction cache %ldkB, linesize %ld bytes.\n",
+ pr_info("Primary instruction cache %ldkB, linesize %ld bytes.\n",
icache_size >> 10, icache_lsize);
- printk("Primary data cache %ldkB, linesize %ld bytes.\n",
+ pr_info("Primary data cache %ldkB, linesize %ld bytes.\n",
dcache_size >> 10, dcache_lsize);
build_clear_page();
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 5f3d0103b95d..10413b6f6662 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/bitops.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/bcache.h>
#include <asm/bootinfo.h>
@@ -29,14 +30,11 @@
#include <asm/cpu-type.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
-#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
-#include <asm/dma-coherence.h>
#include <asm/mips-cps.h>
/*
@@ -112,28 +110,15 @@ static unsigned long dcache_size __read_mostly;
static unsigned long vcache_size __read_mostly;
static unsigned long scache_size __read_mostly;
-/*
- * Dummy cache handling routines for machines without boardcaches
- */
-static void cache_noop(void) {}
-
-static struct bcache_ops no_sc_ops = {
- .bc_enable = (void *)cache_noop,
- .bc_disable = (void *)cache_noop,
- .bc_wback_inv = (void *)cache_noop,
- .bc_inv = (void *)cache_noop
-};
-
-struct bcache_ops *bcops = &no_sc_ops;
-
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
#define R4600_HIT_CACHEOP_WAR_IMPL \
do { \
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \
+ cpu_is_r4600_v2_x()) \
*(volatile unsigned long *)CKSEG1; \
- if (R4600_V1_HIT_CACHEOP_WAR) \
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \
__asm__ __volatile__("nop;nop;nop;nop"); \
} while (0)
@@ -202,24 +187,6 @@ static void r4k_blast_dcache_user_page_setup(void)
#endif
-static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
-
-static void r4k_blast_dcache_page_indexed_setup(void)
-{
- unsigned long dc_lsize = cpu_dcache_line_size();
-
- if (dc_lsize == 0)
- r4k_blast_dcache_page_indexed = (void *)cache_noop;
- else if (dc_lsize == 16)
- r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
- else if (dc_lsize == 32)
- r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
- else if (dc_lsize == 64)
- r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
- else if (dc_lsize == 128)
- r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
-}
-
void (* r4k_blast_dcache)(void);
EXPORT_SYMBOL(r4k_blast_dcache);
@@ -239,7 +206,7 @@ static void r4k_blast_dcache_setup(void)
r4k_blast_dcache = blast_dcache128;
}
-/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
+/* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */
#define JUMP_TO_ALIGN(order) \
__asm__ __volatile__( \
"b\t1f\n\t" \
@@ -281,39 +248,6 @@ static inline void tx49_blast_icache32(void)
addr | ws, 32);
}
-static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- blast_icache32_page_indexed(page);
- local_irq_restore(flags);
-}
-
-static inline void tx49_blast_icache32_page_indexed(unsigned long page)
-{
- unsigned long indexmask = current_cpu_data.icache.waysize - 1;
- unsigned long start = INDEX_BASE + (page & indexmask);
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- CACHE32_UNROLL32_ALIGN2;
- /* I'm in even chunk. blast odd chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache_unroll(32, kernel_cache, Index_Invalidate_I,
- addr | ws, 32);
- CACHE32_UNROLL32_ALIGN;
- /* I'm in odd chunk. blast even chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400 * 2)
- cache_unroll(32, kernel_cache, Index_Invalidate_I,
- addr | ws, 32);
-}
-
static void (* r4k_blast_icache_page)(unsigned long addr);
static void r4k_blast_icache_page_setup(void)
@@ -356,33 +290,6 @@ static void r4k_blast_icache_user_page_setup(void)
#endif
-static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
-
-static void r4k_blast_icache_page_indexed_setup(void)
-{
- unsigned long ic_lsize = cpu_icache_line_size();
-
- if (ic_lsize == 0)
- r4k_blast_icache_page_indexed = (void *)cache_noop;
- else if (ic_lsize == 16)
- r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
- else if (ic_lsize == 32) {
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
- r4k_blast_icache_page_indexed =
- blast_icache32_r4600_v1_page_indexed;
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
- r4k_blast_icache_page_indexed =
- tx49_blast_icache32_page_indexed;
- else if (current_cpu_type() == CPU_LOONGSON2EF)
- r4k_blast_icache_page_indexed =
- loongson2_blast_icache32_page_indexed;
- else
- r4k_blast_icache_page_indexed =
- blast_icache32_page_indexed;
- } else if (ic_lsize == 64)
- r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
-}
-
void (* r4k_blast_icache)(void);
EXPORT_SYMBOL(r4k_blast_icache);
@@ -395,9 +302,10 @@ static void r4k_blast_icache_setup(void)
else if (ic_lsize == 16)
r4k_blast_icache = blast_icache16;
else if (ic_lsize == 32) {
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
+ cpu_is_r4600_v1_x())
r4k_blast_icache = blast_r4600_v1_icache32;
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
+ else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
r4k_blast_icache = tx49_blast_icache32;
else if (current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache = loongson2_blast_icache32;
@@ -427,24 +335,6 @@ static void r4k_blast_scache_page_setup(void)
r4k_blast_scache_page = blast_scache128_page;
}
-static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
-
-static void r4k_blast_scache_page_indexed_setup(void)
-{
- unsigned long sc_lsize = cpu_scache_line_size();
-
- if (scache_size == 0)
- r4k_blast_scache_page_indexed = (void *)cache_noop;
- else if (sc_lsize == 16)
- r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
- else if (sc_lsize == 32)
- r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
- else if (sc_lsize == 64)
- r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
- else if (sc_lsize == 128)
- r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
-}
-
static void (* r4k_blast_scache)(void);
static void r4k_blast_scache_setup(void)
@@ -653,9 +543,6 @@ static inline void local_r4k_flush_cache_page(void *args)
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
int map_coherent = 0;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
void *vaddr;
@@ -668,11 +555,8 @@ static inline void local_r4k_flush_cache_page(void *args)
return;
addr &= PAGE_MASK;
- pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
/*
* If the page isn't marked valid, the page cannot possibly be
@@ -684,13 +568,14 @@ static inline void local_r4k_flush_cache_page(void *args)
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
vaddr = NULL;
else {
+ struct folio *folio = page_folio(page);
/*
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one.
*/
map_coherent = (cpu_has_dc_aliases &&
- page_mapcount(page) &&
- !Page_dcache_dirty(page));
+ folio_mapped(folio) &&
+ !folio_test_dcache_dirty(folio));
if (map_coherent)
vaddr = kmap_coherent(page, addr);
else
@@ -901,6 +786,31 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
__sync();
}
+static void prefetch_cache_inv(unsigned long addr, unsigned long size)
+{
+ unsigned int linesz = cpu_scache_line_size();
+ unsigned long addr0 = addr, addr1;
+
+ addr0 &= ~(linesz - 1);
+ addr1 = (addr0 + size - 1) & ~(linesz - 1);
+
+ protected_writeback_scache_line(addr0);
+ if (likely(addr1 != addr0))
+ protected_writeback_scache_line(addr1);
+ else
+ return;
+
+ addr0 += linesz;
+ if (likely(addr1 != addr0))
+ protected_writeback_scache_line(addr0);
+ else
+ return;
+
+ addr1 -= linesz;
+ if (likely(addr1 > addr0))
+ protected_writeback_scache_line(addr0);
+}
+
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
@@ -908,6 +818,10 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
return;
preempt_disable();
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ prefetch_cache_inv(addr, size);
+
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
if (current_cpu_type() != CPU_LOONGSON64)
@@ -1020,7 +934,7 @@ static inline void rm7k_erratum31(void)
"cache\t%1, 0x3000(%0)\n\t"
".set pop\n"
:
- : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
+ : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
}
}
@@ -1044,12 +958,12 @@ static inline int alias_74k_erratum(struct cpuinfo_mips *c)
if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
present = 1;
if (rev == PRID_REV_ENCODE_332(2, 4, 0))
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
break;
case PRID_IMP_1074K:
if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
present = 1;
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
}
break;
default:
@@ -1139,6 +1053,7 @@ static void probe_pcache(void)
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
+ case CPU_R4300:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
@@ -1169,50 +1084,6 @@ static void probe_pcache(void)
c->options |= MIPS_CPU_PREFETCH;
break;
- case CPU_VR4133:
- write_c0_config(config & ~VR41_CONF_P4K);
- /* fall through */
- case CPU_VR4131:
- /* Workaround for cache instruction bug of VR4131 */
- if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
- c->processor_id == 0x0c82U) {
- config |= 0x00400000U;
- if (c->processor_id == 0x0c80U)
- config |= VR41_CONF_BP;
- write_c0_config(config);
- } else
- c->options |= MIPS_CPU_CACHE_CDEX_P;
-
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 2;
- c->icache.waybit = __ffs(icache_size/2);
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 2;
- c->dcache.waybit = __ffs(dcache_size/2);
- break;
-
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 1;
- c->dcache.waybit = 0; /* does not matter */
-
- c->options |= MIPS_CPU_CACHE_CDEX_P;
- break;
-
case CPU_RM7000:
rm7k_erratum31();
@@ -1274,7 +1145,8 @@ static void probe_pcache(void)
c->dcache.linesz;
c->dcache.waybit = 0;
if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
- (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0))
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
+ (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -1383,7 +1255,6 @@ static void probe_pcache(void)
case CPU_I6500:
case CPU_SB1:
case CPU_SB1A:
- case CPU_XLR:
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
@@ -1396,7 +1267,7 @@ static void probe_pcache(void)
case CPU_74K:
case CPU_1074K:
has_74k_erratum = alias_74k_erratum(c);
- /* Fall through. */
+ fallthrough;
case CPU_M14KC:
case CPU_M14KEC:
case CPU_24K:
@@ -1420,7 +1291,7 @@ static void probe_pcache(void)
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
}
- /* fall through */
+ fallthrough;
default:
if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
c->dcache.flags |= MIPS_CACHE_ALIASES;
@@ -1467,17 +1338,17 @@ static void probe_pcache(void)
c->icache.ways = 1;
}
- printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
- icache_size >> 10,
- c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
- way_string[c->icache.ways], c->icache.linesz);
+ pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
+ icache_size >> 10,
+ c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
+ way_string[c->icache.ways], c->icache.linesz);
- printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
- dcache_size >> 10, way_string[c->dcache.ways],
- (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
- (c->dcache.flags & MIPS_CACHE_ALIASES) ?
+ pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+ dcache_size >> 10, way_string[c->dcache.ways],
+ (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
+ (c->dcache.flags & MIPS_CACHE_ALIASES) ?
"cache aliases" : "no aliases",
- c->dcache.linesz);
+ c->dcache.linesz);
}
static void probe_vcache(void)
@@ -1567,7 +1438,7 @@ static int probe_scache(void)
return 1;
}
-static void __init loongson2_sc_init(void)
+static void loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1583,7 +1454,7 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
-static void __init loongson3_sc_init(void)
+static void loongson3_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config2, lsize;
@@ -1597,11 +1468,14 @@ static void __init loongson3_sc_init(void)
c->scache.sets = 64 << ((config2 >> 8) & 15);
c->scache.ways = 1 + (config2 & 15);
- scache_size = c->scache.sets *
- c->scache.ways *
- c->scache.linesz;
- /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
- scache_size *= 4;
+ /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
+ c->scache.sets *= 2;
+ else
+ c->scache.sets *= 4;
+
+ scache_size = c->scache.sets * c->scache.ways * c->scache.linesz;
+
c->scache.waybit = 0;
c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
@@ -1611,10 +1485,6 @@ static void __init loongson3_sc_init(void)
return;
}
-extern int r5k_sc_init(void);
-extern int rm7k_sc_init(void);
-extern int mips_sc_init(void);
-
static void setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1669,21 +1539,25 @@ static void setup_scache(void)
return;
case CPU_CAVIUM_OCTEON3:
- case CPU_XLP:
/* don't need to worry about L2, fully coherent */
return;
default:
- if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
- MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
@@ -1776,7 +1650,7 @@ static void coherency_setup(void)
/*
* c0_status.cu=0 specifies that updates by the sc instruction use
- * the coherency mode specified by the TLB; 1 means cachable
+ * the coherency mode specified by the TLB; 1 means cacheable
* coherent update on write will be used. Not all processors have
* this bit and; some wire it to zero, others like Toshiba had the
* silly idea of putting something else there ...
@@ -1833,13 +1707,10 @@ void r4k_cache_init(void)
setup_scache();
r4k_blast_dcache_page_setup();
- r4k_blast_dcache_page_indexed_setup();
r4k_blast_dcache_setup();
r4k_blast_icache_page_setup();
- r4k_blast_icache_page_indexed_setup();
r4k_blast_icache_setup();
r4k_blast_scache_page_setup();
- r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
r4k_blast_scache_node_setup();
#ifdef CONFIG_EVA
@@ -1871,7 +1742,6 @@ void r4k_cache_init(void)
__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
flush_icache_all = r4k_flush_icache_all;
- local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
@@ -1879,19 +1749,9 @@ void r4k_cache_init(void)
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#ifdef CONFIG_DMA_NONCOHERENT
-#ifdef CONFIG_DMA_MAYBE_COHERENT
- if (coherentio == IO_COHERENCE_ENABLED ||
- (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
- _dma_cache_wback_inv = (void *)cache_noop;
- _dma_cache_wback = (void *)cache_noop;
- _dma_cache_inv = (void *)cache_noop;
- } else
-#endif /* CONFIG_DMA_MAYBE_COHERENT */
- {
- _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
- _dma_cache_wback = r4k_dma_cache_wback_inv;
- _dma_cache_inv = r4k_dma_cache_inv;
- }
+ _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
+ _dma_cache_wback = r4k_dma_cache_wback_inv;
+ _dma_cache_inv = r4k_dma_cache_inv;
#endif /* CONFIG_DMA_NONCOHERENT */
build_clear_page();
@@ -1924,7 +1784,6 @@ void r4k_cache_init(void)
/* I$ fills from D$ just by emptying the write buffers */
flush_cache_page = (void *)b5k_instruction_hazard;
flush_cache_range = (void *)b5k_instruction_hazard;
- local_flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_icache_range = (void *)b5k_instruction_hazard;
local_flush_icache_range = (void *)b5k_instruction_hazard;
@@ -1944,7 +1803,6 @@ void r4k_cache_init(void)
flush_cache_range = (void *)cache_noop;
flush_icache_all = (void *)cache_noop;
flush_data_cache_page = (void *)cache_noop;
- local_flush_data_cache_page = (void *)cache_noop;
break;
}
}
@@ -1966,7 +1824,7 @@ static struct notifier_block r4k_cache_pm_notifier_block = {
.notifier_call = r4k_cache_pm_notifier,
};
-int __init r4k_cache_init_pm(void)
+static int __init r4k_cache_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
}
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
deleted file mode 100644
index 686867270627..000000000000
--- a/arch/mips/mm/c-tx39.c
+++ /dev/null
@@ -1,421 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * r2300.c: R2000 and R3000 specific mmu/cache code.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- *
- * with a lot of changes to make this thing work for R3000s
- * Tx39XX R4k style caches added. HK
- * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
- * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
-#include <asm/cacheops.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/isadep.h>
-#include <asm/io.h>
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-
-/* For R3000 cores with R4000 style caches */
-static unsigned long icache_size, dcache_size; /* Size in bytes */
-
-#include <asm/r4kcache.h>
-
-/* This sequence is required to ensure icache is disabled immediately */
-#define TX39_STOP_STREAMING() \
-__asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "b 1f\n\t" \
- "nop\n\t" \
- "1:\n\t" \
- ".set pop" \
- )
-
-/* TX39H-style cache flush routines. */
-static void tx39h_flush_icache_all(void)
-{
- unsigned long flags, config;
-
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16();
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
-{
- /* Catch bad driver code */
- BUG_ON(size == 0);
-
- iob();
- blast_inv_dcache_range(addr, addr + size);
-}
-
-
-/* TX39H2,TX39H3 */
-static inline void tx39_blast_dcache_page(unsigned long addr)
-{
- if (current_cpu_type() != CPU_TX3912)
- blast_dcache16_page(addr);
-}
-
-static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
-{
- blast_dcache16_page_indexed(addr);
-}
-
-static inline void tx39_blast_dcache(void)
-{
- blast_dcache16();
-}
-
-static inline void tx39_blast_icache_page(unsigned long addr)
-{
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16_page(addr);
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static inline void tx39_blast_icache_page_indexed(unsigned long addr)
-{
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16_page_indexed(addr);
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static inline void tx39_blast_icache(void)
-{
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- blast_icache16();
- write_c0_conf(config);
- local_irq_restore(flags);
-}
-
-static void tx39__flush_cache_vmap(void)
-{
- tx39_blast_dcache();
-}
-
-static void tx39__flush_cache_vunmap(void)
-{
- tx39_blast_dcache();
-}
-
-static inline void tx39_flush_cache_all(void)
-{
- if (!cpu_has_dc_aliases)
- return;
-
- tx39_blast_dcache();
-}
-
-static inline void tx39___flush_cache_all(void)
-{
- tx39_blast_dcache();
- tx39_blast_icache();
-}
-
-static void tx39_flush_cache_mm(struct mm_struct *mm)
-{
- if (!cpu_has_dc_aliases)
- return;
-
- if (cpu_context(smp_processor_id(), mm) != 0)
- tx39_blast_dcache();
-}
-
-static void tx39_flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (!cpu_has_dc_aliases)
- return;
- if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
- return;
-
- tx39_blast_dcache();
-}
-
-static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
-{
- int exec = vma->vm_flags & VM_EXEC;
- struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- /*
- * If ownes no valid ASID yet, cannot possibly have gotten
- * this page into the cache.
- */
- if (cpu_context(smp_processor_id(), mm) == 0)
- return;
-
- page &= PAGE_MASK;
- pgdp = pgd_offset(mm, page);
- p4dp = p4d_offset(pgdp, page);
- pudp = pud_offset(p4dp, page);
- pmdp = pmd_offset(pudp, page);
- ptep = pte_offset(pmdp, page);
-
- /*
- * If the page isn't marked valid, the page cannot possibly be
- * in the cache.
- */
- if (!(pte_val(*ptep) & _PAGE_PRESENT))
- return;
-
- /*
- * Doing flushes for another ASID than the current one is
- * too difficult since stupid R4k caches do a TLB translation
- * for every cache flush operation. So we do indexed flushes
- * in that case, which doesn't overly flush the cache too much.
- */
- if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
- if (cpu_has_dc_aliases || exec)
- tx39_blast_dcache_page(page);
- if (exec)
- tx39_blast_icache_page(page);
-
- return;
- }
-
- /*
- * Do indexed flush, too much work to get the (possible) TLB refills
- * to work correctly.
- */
- if (cpu_has_dc_aliases || exec)
- tx39_blast_dcache_page_indexed(page);
- if (exec)
- tx39_blast_icache_page_indexed(page);
-}
-
-static void local_tx39_flush_data_cache_page(void * addr)
-{
- tx39_blast_dcache_page((unsigned long)addr);
-}
-
-static void tx39_flush_data_cache_page(unsigned long addr)
-{
- tx39_blast_dcache_page(addr);
-}
-
-static void tx39_flush_icache_range(unsigned long start, unsigned long end)
-{
- if (end - start > dcache_size)
- tx39_blast_dcache();
- else
- protected_blast_dcache_range(start, end);
-
- if (end - start > icache_size)
- tx39_blast_icache();
- else {
- unsigned long flags, config;
- /* disable icache (set ICE#) */
- local_irq_save(flags);
- config = read_c0_conf();
- write_c0_conf(config & ~TX39_CONF_ICE);
- TX39_STOP_STREAMING();
- protected_blast_icache_range(start, end);
- write_c0_conf(config);
- local_irq_restore(flags);
- }
-}
-
-static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
- BUG();
-}
-
-static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
-{
- unsigned long end;
-
- if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
- end = addr + size;
- do {
- tx39_blast_dcache_page(addr);
- addr += PAGE_SIZE;
- } while(addr != end);
- } else if (size > dcache_size) {
- tx39_blast_dcache();
- } else {
- blast_dcache_range(addr, addr + size);
- }
-}
-
-static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
-{
- unsigned long end;
-
- if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
- end = addr + size;
- do {
- tx39_blast_dcache_page(addr);
- addr += PAGE_SIZE;
- } while(addr != end);
- } else if (size > dcache_size) {
- tx39_blast_dcache();
- } else {
- blast_inv_dcache_range(addr, addr + size);
- }
-}
-
-static __init void tx39_probe_cache(void)
-{
- unsigned long config;
-
- config = read_c0_conf();
-
- icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
- TX39_CONF_ICS_SHIFT));
- dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
- TX39_CONF_DCS_SHIFT));
-
- current_cpu_data.icache.linesz = 16;
- switch (current_cpu_type()) {
- case CPU_TX3912:
- current_cpu_data.icache.ways = 1;
- current_cpu_data.dcache.ways = 1;
- current_cpu_data.dcache.linesz = 4;
- break;
-
- case CPU_TX3927:
- current_cpu_data.icache.ways = 2;
- current_cpu_data.dcache.ways = 2;
- current_cpu_data.dcache.linesz = 16;
- break;
-
- case CPU_TX3922:
- default:
- current_cpu_data.icache.ways = 1;
- current_cpu_data.dcache.ways = 1;
- current_cpu_data.dcache.linesz = 16;
- break;
- }
-}
-
-void tx39_cache_init(void)
-{
- extern void build_clear_page(void);
- extern void build_copy_page(void);
- unsigned long config;
-
- config = read_c0_conf();
- config &= ~TX39_CONF_WBON;
- write_c0_conf(config);
-
- tx39_probe_cache();
-
- switch (current_cpu_type()) {
- case CPU_TX3912:
- /* TX39/H core (writethru direct-map cache) */
- __flush_cache_vmap = tx39__flush_cache_vmap;
- __flush_cache_vunmap = tx39__flush_cache_vunmap;
- flush_cache_all = tx39h_flush_icache_all;
- __flush_cache_all = tx39h_flush_icache_all;
- flush_cache_mm = (void *) tx39h_flush_icache_all;
- flush_cache_range = (void *) tx39h_flush_icache_all;
- flush_cache_page = (void *) tx39h_flush_icache_all;
- flush_icache_range = (void *) tx39h_flush_icache_all;
- local_flush_icache_range = (void *) tx39h_flush_icache_all;
-
- local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
- flush_data_cache_page = (void *) tx39h_flush_icache_all;
-
- _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
-
- shm_align_mask = PAGE_SIZE - 1;
-
- break;
-
- case CPU_TX3922:
- case CPU_TX3927:
- default:
- /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
- /* board-dependent init code may set WBON */
-
- __flush_cache_vmap = tx39__flush_cache_vmap;
- __flush_cache_vunmap = tx39__flush_cache_vunmap;
-
- flush_cache_all = tx39_flush_cache_all;
- __flush_cache_all = tx39___flush_cache_all;
- flush_cache_mm = tx39_flush_cache_mm;
- flush_cache_range = tx39_flush_cache_range;
- flush_cache_page = tx39_flush_cache_page;
- flush_icache_range = tx39_flush_icache_range;
- local_flush_icache_range = tx39_flush_icache_range;
-
- __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
-
- local_flush_data_cache_page = local_tx39_flush_data_cache_page;
- flush_data_cache_page = tx39_flush_data_cache_page;
-
- _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
- _dma_cache_wback = tx39_dma_cache_wback_inv;
- _dma_cache_inv = tx39_dma_cache_inv;
-
- shm_align_mask = max_t(unsigned long,
- (dcache_size / current_cpu_data.dcache.ways) - 1,
- PAGE_SIZE - 1);
-
- break;
- }
-
- __flush_icache_user_range = flush_icache_range;
- __local_flush_icache_user_range = local_flush_icache_range;
-
- current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
- current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
-
- current_cpu_data.icache.sets =
- current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
- current_cpu_data.dcache.sets =
- current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
-
- if (current_cpu_data.dcache.waysize > PAGE_SIZE)
- current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
-
- current_cpu_data.icache.waybit = 0;
- current_cpu_data.dcache.waybit = 0;
-
- printk("Primary instruction cache %ldkB, linesize %d bytes\n",
- icache_size >> 10, current_cpu_data.icache.linesz);
- printk("Primary data cache %ldkB, linesize %d bytes\n",
- dcache_size >> 10, current_cpu_data.dcache.linesz);
-
- build_clear_page();
- build_copy_page();
- tx39h_flush_icache_all();
-}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 33b409391ddb..df1ced4fc3b5 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -14,13 +14,16 @@
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <asm/bcache.h>
#include <asm/cacheflush.h>
-#include <asm/highmem.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/setup.h>
+#include <asm/pgtable.h>
/* Cache operations. */
void (*flush_cache_all)(void);
@@ -36,7 +39,6 @@ EXPORT_SYMBOL_GPL(flush_icache_range);
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(local_flush_icache_range);
void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
-EXPORT_SYMBOL_GPL(__flush_icache_user_range);
void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
@@ -47,14 +49,30 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
/* MIPS specific cache operations */
-void (*local_flush_data_cache_page)(void * addr);
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);
-EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
EXPORT_SYMBOL(flush_icache_all);
+/*
+ * Dummy cache handling routine
+ */
+
+void cache_noop(void) {}
+
+#ifdef CONFIG_BOARD_SCACHE
+
+static struct bcache_ops no_sc_ops = {
+ .bc_enable = (void *)cache_noop,
+ .bc_disable = (void *)cache_noop,
+ .bc_wback_inv = (void *)cache_noop,
+ .bc_inv = (void *)cache_noop
+};
+
+struct bcache_ops *bcops = &no_sc_ops;
+#endif
+
#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
@@ -81,13 +99,15 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
return 0;
}
-void __flush_dcache_page(struct page *page)
+void __flush_dcache_pages(struct page *page, unsigned int nr)
{
- struct address_space *mapping = page_mapping_file(page);
+ struct folio *folio = page_folio(page);
+ struct address_space *mapping = folio_flush_mapping(folio);
unsigned long addr;
+ unsigned int i;
if (mapping && !mapping_mapped(mapping)) {
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
return;
}
@@ -96,25 +116,21 @@ void __flush_dcache_page(struct page *page)
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
- if (PageHighMem(page))
- addr = (unsigned long)kmap_atomic(page);
- else
- addr = (unsigned long)page_address(page);
-
- flush_data_cache_page(addr);
-
- if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ for (i = 0; i < nr; i++) {
+ addr = (unsigned long)kmap_local_page(nth_page(page, i));
+ flush_data_cache_page(addr);
+ kunmap_local((void *)addr);
+ }
}
-
-EXPORT_SYMBOL(__flush_dcache_page);
+EXPORT_SYMBOL(__flush_dcache_pages);
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
unsigned long addr = (unsigned long) page_address(page);
+ struct folio *folio = page_folio(page);
if (pages_do_alias(addr, vmaddr)) {
- if (page_mapcount(page) && !Page_dcache_dirty(page)) {
+ if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *kaddr;
kaddr = kmap_coherent(page, vmaddr);
@@ -129,115 +145,73 @@ EXPORT_SYMBOL(__flush_anon_page);
void __update_cache(unsigned long address, pte_t pte)
{
- struct page *page;
+ struct folio *folio;
unsigned long pfn, addr;
int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
+ unsigned int i;
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
- page = pfn_to_page(pfn);
- if (Page_dcache_dirty(page)) {
- if (PageHighMem(page))
- addr = (unsigned long)kmap_atomic(page);
- else
- addr = (unsigned long)page_address(page);
-
- if (exec || pages_do_alias(addr, address & PAGE_MASK))
- flush_data_cache_page(addr);
- if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ folio = page_folio(pfn_to_page(pfn));
+ address &= PAGE_MASK;
+ address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
+
+ if (folio_test_dcache_dirty(folio)) {
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ addr = (unsigned long)kmap_local_folio(folio, i);
- ClearPageDcacheDirty(page);
+ if (exec || pages_do_alias(addr, address))
+ flush_data_cache_page(addr);
+ kunmap_local((void *)addr);
+ address += PAGE_SIZE;
+ }
+ folio_clear_dcache_dirty(folio);
}
}
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
+#define PM(p) __pgprot(_page_cachable_default | (p))
+
+static pgprot_t protection_map[16] __ro_after_init;
+DECLARE_VM_GET_PAGE_PROT
+
static inline void setup_protection_map(void)
{
- if (cpu_has_rixi) {
- protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
-
- protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
- protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
- protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
-
- } else {
- protection_map[0] = PAGE_NONE;
- protection_map[1] = PAGE_READONLY;
- protection_map[2] = PAGE_COPY;
- protection_map[3] = PAGE_COPY;
- protection_map[4] = PAGE_READONLY;
- protection_map[5] = PAGE_READONLY;
- protection_map[6] = PAGE_COPY;
- protection_map[7] = PAGE_COPY;
- protection_map[8] = PAGE_NONE;
- protection_map[9] = PAGE_READONLY;
- protection_map[10] = PAGE_SHARED;
- protection_map[11] = PAGE_SHARED;
- protection_map[12] = PAGE_READONLY;
- protection_map[13] = PAGE_READONLY;
- protection_map[14] = PAGE_SHARED;
- protection_map[15] = PAGE_SHARED;
- }
+ protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = PM(_PAGE_PRESENT);
+ protection_map[5] = PM(_PAGE_PRESENT);
+ protection_map[6] = PM(_PAGE_PRESENT);
+ protection_map[7] = PM(_PAGE_PRESENT);
+
+ protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ _PAGE_NO_READ);
+ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = PM(_PAGE_PRESENT);
+ protection_map[13] = PM(_PAGE_PRESENT);
+ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}
+#undef PM
+
void cpu_cache_init(void)
{
- if (cpu_has_3k_cache) {
- extern void __weak r3k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache)
r3k_cache_init();
- }
- if (cpu_has_6k_cache) {
- extern void __weak r6k_cache_init(void);
-
- r6k_cache_init();
- }
- if (cpu_has_4k_cache) {
- extern void __weak r4k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache)
r4k_cache_init();
- }
- if (cpu_has_8k_cache) {
- extern void __weak r8k_cache_init(void);
-
- r8k_cache_init();
- }
- if (cpu_has_tx39_cache) {
- extern void __weak tx39_cache_init(void);
-
- tx39_cache_init();
- }
-
- if (cpu_has_octeon_cache) {
- extern void __weak octeon_cache_init(void);
+ if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
octeon_cache_init();
- }
setup_protection_map();
}
-
-int __weak __uncached_access(struct file *file, unsigned long addr)
-{
- if (file->f_flags & O_DSYNC)
- return 1;
-
- return addr >= __pa(high_memory);
-}
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
index 45dff5cd4b8e..e528583d1331 100644
--- a/arch/mips/mm/cex-gen.S
+++ b/arch/mips/mm/cex-gen.S
@@ -25,7 +25,7 @@
* This is a very bad place to be. Our cache error
* detection has triggered. If we have write-back data
* in the cache, we may not be able to recover. As a
- * first-order desperate measure, turn off KSEG0 cacheing.
+ * first-order desperate measure, turn off KSEG0 caching.
*/
mfc0 k0,CP0_CONFIG
li k1,~CONF_CM_CMASK
diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c
index b25564090939..966f40066f03 100644
--- a/arch/mips/mm/context.c
+++ b/arch/mips/mm/context.c
@@ -67,7 +67,7 @@ static void flush_context(void)
int cpu;
/* Update the list of reserved MMIDs and the MMID bitmap */
- bitmap_clear(mmid_map, 0, num_mmids);
+ bitmap_zero(mmid_map, num_mmids);
/* Reserve an MMID for kmap/wired entries */
__set_bit(MMID_KERNEL_WIRED, mmid_map);
@@ -277,8 +277,7 @@ static int mmid_init(void)
WARN_ON(num_mmids <= num_possible_cpus());
atomic64_set(&mmid_version, asid_first_version(0));
- mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
- GFP_KERNEL);
+ mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
if (!mmid_map)
panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index dc42ffc83825..0f3cec663a12 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -5,13 +5,11 @@
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/highmem.h>
#include <asm/cache.h>
#include <asm/cpu-type.h>
-#include <asm/dma-coherence.h>
#include <asm/io.h>
/*
@@ -33,6 +31,8 @@ static inline bool cpu_needs_post_dma_flush(void)
case CPU_R10000:
case CPU_R12000:
case CPU_BMIPS5000:
+ case CPU_LOONGSON2EF:
+ case CPU_XBURST:
return true;
default:
/*
@@ -49,32 +49,39 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
dma_cache_wback_inv((unsigned long)page_address(page), size);
}
-void *uncached_kernel_address(void *addr)
+void *arch_dma_set_uncached(void *addr, size_t size)
{
return (void *)(__pa(addr) + UNCAC_BASE);
}
-void *cached_kernel_address(void *addr)
-{
- return __va(addr) - UNCAC_BASE;
-}
-
-static inline void dma_sync_virt(void *addr, size_t size,
+static inline void dma_sync_virt_for_device(void *addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback((unsigned long)addr, size);
break;
-
case DMA_FROM_DEVICE:
dma_cache_inv((unsigned long)addr, size);
break;
-
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv((unsigned long)addr, size);
break;
+ default:
+ BUG();
+ }
+}
+static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ dma_cache_inv((unsigned long)addr, size);
+ break;
default:
BUG();
}
@@ -86,7 +93,7 @@ static inline void dma_sync_virt(void *addr, size_t size,
* configured then the bulk of this loop gets optimized out.
*/
static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, bool for_device)
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long offset = paddr & ~PAGE_MASK;
@@ -94,18 +101,20 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
do {
size_t len = left;
+ void *addr;
if (PageHighMem(page)) {
- void *addr;
-
if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
+ }
+
+ addr = kmap_atomic(page);
+ if (for_device)
+ dma_sync_virt_for_device(addr + offset, len, dir);
+ else
+ dma_sync_virt_for_cpu(addr + offset, len, dir);
+ kunmap_atomic(addr);
- addr = kmap_atomic(page);
- dma_sync_virt(addr + offset, len, dir);
- kunmap_atomic(addr);
- } else
- dma_sync_virt(page_address(page) + offset, size, dir);
offset = 0;
page++;
left -= len;
@@ -115,7 +124,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
- dma_sync_phys(paddr, size, dir);
+ dma_sync_phys(paddr, size, dir, true);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
@@ -123,21 +132,13 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (cpu_needs_post_dma_flush())
- dma_sync_phys(paddr, size, dir);
+ dma_sync_phys(paddr, size, dir, false);
}
#endif
-void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- dma_sync_virt(vaddr, size, direction);
-}
-
-#ifdef CONFIG_DMA_PERDEV_COHERENT
+#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+ bool coherent)
{
dev->dma_coherent = coherent;
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 1e8d00793784..aaa9a242ebba 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -26,6 +26,7 @@
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
+#include <asm/traps.h>
#include <linux/kdebug.h>
int show_unhandled_signals = 1;
@@ -35,7 +36,7 @@ int show_unhandled_signals = 1;
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
-static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+static void __do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
@@ -44,7 +45,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
const int field = sizeof(unsigned long) * 2;
int si_code;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
@@ -96,22 +97,16 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
si_code = SEGV_ACCERR;
if (write) {
@@ -142,7 +137,7 @@ good_area:
goto bad_area;
}
} else {
- if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ if (unlikely(!vma_is_accessible(vma)))
goto bad_area;
}
}
@@ -152,12 +147,18 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
return;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -167,31 +168,20 @@ good_area:
goto do_sigbus;
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- tsk->maj_flt++;
- } else {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- tsk->min_flt++;
- }
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- /*
- * No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -199,7 +189,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -251,14 +241,14 @@ out_of_memory:
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
@@ -329,8 +319,9 @@ vmalloc_fault:
}
#endif
}
+NOKPROBE_SYMBOL(__do_page_fault);
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+asmlinkage void do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
enum ctx_state prev_state;
@@ -339,3 +330,4 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
__do_page_fault(regs, write, address);
exception_exit(prev_state);
}
+NOKPROBE_SYMBOL(do_page_fault);
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index d08e6d7d533b..57e2f08f00d0 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -8,123 +8,10 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
-static pte_t *kmap_pte;
-
unsigned long highstart_pfn, highend_pfn;
-void *kmap(struct page *page)
-{
- void *addr;
-
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- addr = kmap_high(page);
- flush_tlb_one((unsigned long)addr);
-
- return addr;
-}
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-
-void *kmap_atomic(struct page *page)
-{
- unsigned long vaddr;
- int idx, type;
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
- local_flush_tlb_one((unsigned long)vaddr);
-
- return (void*) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type __maybe_unused;
-
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- preempt_enable();
- return;
- }
-
- type = kmap_atomic_idx();
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
- int idx = type + KM_TYPE_NR * smp_processor_id();
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_one(vaddr);
- }
-#endif
- kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
+void kmap_flush_tlb(unsigned long addr)
{
- unsigned long vaddr;
- int idx, type;
-
- preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ flush_tlb_one(addr);
}
+EXPORT_SYMBOL(kmap_flush_tlb);
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 77ffece9c270..7eaff5b07873 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -21,8 +21,8 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
- unsigned long sz)
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -58,18 +58,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return (pte_t *) pmd;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
int pmd_huge(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 50f9ed8c6c1b..39f129205b0c 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -36,11 +36,10 @@
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/dma.h>
-#include <asm/kmap_types.h>
#include <asm/maar.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
@@ -84,13 +83,13 @@ void setup_zero_pages(void)
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
- unsigned int uninitialized_var(old_mmid);
+ unsigned int old_mmid;
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
int tlbidx;
- BUG_ON(Page_dcache_dirty(page));
+ BUG_ON(folio_test_dcache_dirty(page_folio(page)));
preempt_disable();
pagefault_disable();
@@ -171,11 +170,12 @@ void kunmap_coherent(void)
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
void *vfrom, *vto;
vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
- page_mapcount(from) && !Page_dcache_dirty(from)) {
+ folio_mapped(src) && !folio_test_dcache_dirty(src)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
@@ -196,15 +196,17 @@ void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
+ struct folio *folio = page_folio(page);
+
if (cpu_has_dc_aliases &&
- page_mapcount(page) && !Page_dcache_dirty(page)) {
+ folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
}
if (vma->vm_flags & VM_EXEC)
flush_cache_page(vma, vaddr, page_to_pfn(page));
@@ -214,15 +216,17 @@ void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
+ struct folio *folio = page_folio(page);
+
if (cpu_has_dc_aliases &&
- page_mapcount(page) && !Page_dcache_dirty(page)) {
+ folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
}
}
EXPORT_SYMBOL_GPL(copy_from_user_page);
@@ -358,17 +362,23 @@ void maar_init(void)
write_c0_maari(i);
back_to_back_c0_hazard();
upper = read_c0_maar();
+#ifdef CONFIG_XPA
+ upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
+#endif
write_c0_maari(i + 1);
back_to_back_c0_hazard();
lower = read_c0_maar();
+#ifdef CONFIG_XPA
+ lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
+#endif
attr = lower & upper;
lower = (lower & MIPS_MAAR_ADDR) << 4;
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
pr_info(" [%d]: ", i / 2);
- if (!(attr & MIPS_MAAR_VL)) {
+ if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
pr_cont("disabled\n");
continue;
}
@@ -390,16 +400,13 @@ void maar_init(void)
}
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
pagetable_init();
-#ifdef CONFIG_HIGHMEM
- kmap_init();
-#endif
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
@@ -415,10 +422,19 @@ void __init paging_init(void)
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+
+ max_mapnr = max_low_pfn;
+ } else if (highend_pfn) {
+ max_mapnr = highend_pfn;
+ } else {
+ max_mapnr = max_low_pfn;
}
+#else
+ max_mapnr = max_low_pfn;
#endif
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
}
#ifdef CONFIG_64BIT
@@ -447,26 +463,15 @@ static inline void __init mem_init_free_highmem(void)
void __init mem_init(void)
{
/*
- * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
+ * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
* bits to hold a full 32b physical address on MIPS32 systems.
*/
- BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
-
-#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_DISCONTIGMEM
-#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
-#endif
- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
-#else
- max_mapnr = max_low_pfn;
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
maar_init();
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_free_highmem();
- mem_init_print_info(NULL);
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
@@ -476,7 +481,7 @@ void __init mem_init(void)
0x80000000 - 4, KCORE_TEXT);
#endif
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
@@ -494,6 +499,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
+void __weak __init prom_free_prom_memory(void)
+{
+ /* nothing to do */
+}
+
void __ref free_initmem(void)
{
prom_free_prom_memory();
@@ -508,6 +518,43 @@ void __ref free_initmem(void)
free_initmem_default(POISON_FREE_INITMEM);
}
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ return node_distance(cpu_to_node(from), cpu_to_node(to));
+}
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+ return cpu_to_node(cpu);
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc;
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+ pcpu_cpu_distance,
+ pcpu_cpu_to_node);
+ if (rc < 0)
+ panic("Failed to initialize percpu areas.");
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
@@ -519,7 +566,7 @@ unsigned long pgd_current[NR_CPUS];
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 8317f337a86e..d8243d61ef32 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -14,94 +14,13 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm_types.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#include <asm/tlbflush.h>
+#include <ioremap.h>
-static inline void remap_area_pte(pte_t * pte, unsigned long address,
- phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
-{
- phys_addr_t end;
- unsigned long pfn;
- pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
- | __WRITEABLE | flags);
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- BUG_ON(address >= end);
- pfn = phys_addr >> PAGE_SHIFT;
- do {
- if (!pte_none(*pte)) {
- printk("remap_area_pte: page already exists\n");
- BUG();
- }
- set_pte(pte, pfn_pte(pfn, pgprot));
- address += PAGE_SIZE;
- pfn++;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
- phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
-{
- phys_addr_t end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
- BUG_ON(address >= end);
- do {
- pte_t * pte = pte_alloc_kernel(pmd, address);
- if (!pte)
- return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
- phys_addr_t size, unsigned long flags)
-{
- int error;
- pgd_t * dir;
- unsigned long end = address + size;
-
- phys_addr -= address;
- dir = pgd_offset(&init_mm, address);
- flush_cache_all();
- BUG_ON(address >= end);
- do {
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- error = -ENOMEM;
- p4d = p4d_alloc(&init_mm, dir, address);
- if (!p4d)
- break;
- pud = pud_alloc(&init_mm, p4d, address);
- if (!pud)
- break;
- pmd = pmd_alloc(&init_mm, pud, address);
- if (!pmd)
- break;
- if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags))
- break;
- error = 0;
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- flush_tlb_all();
- return error;
-}
+#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
+#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
@@ -118,27 +37,25 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
}
/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
+ * ioremap_prot - map bus memory into CPU space
+ * @phys_addr: bus address of the memory
+ * @size: size of the resource to map
*
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
+ * ioremap_prot gives the caller control over cache coherency attributes (CCA)
*/
-
-#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
-
-void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
+void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
+ unsigned long prot_val)
{
+ unsigned long flags = prot_val & _CACHE_MASK;
unsigned long offset, pfn, last_pfn;
- struct vm_struct * area;
+ struct vm_struct *area;
phys_addr_t last_addr;
- void * addr;
+ unsigned long vaddr;
+ void __iomem *cpu_addr;
+
+ cpu_addr = plat_ioremap(phys_addr, size, flags);
+ if (cpu_addr)
+ return cpu_addr;
phys_addr = fixup_bigphys_addr(phys_addr, size);
@@ -155,6 +72,10 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
flags == _CACHE_UNCACHED)
return (void __iomem *) CKSEG1ADDR(phys_addr);
+ /* Early remaps should use the unmapped regions til' VM is available */
+ if (WARN_ON_ONCE(!slab_is_available()))
+ return NULL;
+
/*
* Don't allow anybody to remap RAM that may be allocated by the page
* allocator, since that could lead to races & data clobbering.
@@ -181,30 +102,22 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
- addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
- vunmap(addr);
+ vaddr = (unsigned long)area->addr;
+
+ flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
+ if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+ __pgprot(flags))) {
+ free_vm_area(area);
return NULL;
}
- return (void __iomem *) (offset + (char *)addr);
+ return (void __iomem *)(vaddr + offset);
}
+EXPORT_SYMBOL(ioremap_prot);
-#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
-
-void __iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
{
- struct vm_struct *p;
-
- if (IS_KSEG1(addr))
- return;
-
- p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
- if (!p)
- printk(KERN_ERR "iounmap: bad address %p\n", addr);
-
- kfree(p);
+ if (!plat_iounmap(addr) && !IS_KSEG1(addr))
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
-
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/mips/mm/ioremap64.c b/arch/mips/mm/ioremap64.c
new file mode 100644
index 000000000000..15e7820d6a5f
--- /dev/null
+++ b/arch/mips/mm/ioremap64.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/io.h>
+#include <ioremap.h>
+
+void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long prot_val)
+{
+ unsigned long flags = prot_val & _CACHE_MASK;
+ u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE);
+ void __iomem *addr;
+
+ addr = plat_ioremap(offset, size, flags);
+ if (!addr)
+ addr = (void __iomem *)(unsigned long)(base + offset);
+ return addr;
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(const volatile void __iomem *addr)
+{
+ plat_iounmap(addr);
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/mips/mm/maccess.c b/arch/mips/mm/maccess.c
new file mode 100644
index 000000000000..58173842c6be
--- /dev/null
+++ b/arch/mips/mm/maccess.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ /* highest bit set means kernel space */
+ return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
+}
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
index 43181ac0a1af..42d0516ca18a 100644
--- a/arch/mips/mm/page-funcs.S
+++ b/arch/mips/mm/page-funcs.S
@@ -8,8 +8,8 @@
* Copyright (C) 2012 MIPS Technologies, Inc.
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/export.h>
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index c5578897a4fa..1df237bd4a72 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -20,13 +20,12 @@
#include <asm/inst.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/prefetch.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
+#include <asm/regdef.h>
#include <asm/cpu.h>
-#include <asm/war.h>
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
#include <asm/sibyte/sb1250.h>
@@ -36,19 +35,6 @@
#include <asm/uasm.h>
-/* Registers used in the assembled routines. */
-#define ZERO 0
-#define AT 2
-#define A0 4
-#define A1 5
-#define A2 6
-#define T0 8
-#define T1 9
-#define T2 10
-#define T3 11
-#define T9 25
-#define RA 31
-
/* Handle labels (which must be positive integers). */
enum label_id {
label_clear_nopref = 1,
@@ -104,18 +90,20 @@ static int cache_line_size;
static inline void
pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
{
- if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
+ if (cpu_has_64bit_gp_regs &&
+ IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) &&
+ r4k_daddiu_bug()) {
if (off > 0x7fff) {
- uasm_i_lui(buf, T9, uasm_rel_hi(off));
- uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
+ uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off));
+ uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off));
} else
- uasm_i_addiu(buf, T9, ZERO, off);
- uasm_i_daddu(buf, reg1, reg2, T9);
+ uasm_i_addiu(buf, GPR_T9, GPR_ZERO, off);
+ uasm_i_daddu(buf, reg1, reg2, GPR_T9);
} else {
if (off > 0x7fff) {
- uasm_i_lui(buf, T9, uasm_rel_hi(off));
- uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
- UASM_i_ADDU(buf, reg1, reg2, T9);
+ uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off));
+ uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off));
+ UASM_i_ADDU(buf, reg1, reg2, GPR_T9);
} else
UASM_i_ADDIU(buf, reg1, reg2, off);
}
@@ -233,9 +221,9 @@ static void set_prefetch_parameters(void)
static void build_clear_store(u32 **buf, int off)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
- uasm_i_sd(buf, ZERO, off, A0);
+ uasm_i_sd(buf, GPR_ZERO, off, GPR_A0);
} else {
- uasm_i_sw(buf, ZERO, off, A0);
+ uasm_i_sw(buf, GPR_ZERO, off, GPR_A0);
}
}
@@ -246,22 +234,24 @@ static inline void build_clear_pref(u32 **buf, int off)
if (pref_bias_clear_store) {
_uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
- A0);
+ GPR_A0);
} else if (cache_line_size == (half_clear_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
- uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0);
} else if (cpu_has_cache_cdex_p) {
- if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
+ cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
}
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lw(buf, ZERO, ZERO, AT);
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
+ cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT);
- uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0);
}
}
}
@@ -299,12 +289,12 @@ void build_clear_page(void)
off = PAGE_SIZE - pref_bias_clear_store;
if (off > 0xffff || !pref_bias_clear_store)
- pg_addiu(&buf, A2, A0, off);
+ pg_addiu(&buf, GPR_A2, GPR_A0, off);
else
- uasm_i_ori(&buf, A2, A0, off);
+ uasm_i_ori(&buf, GPR_A2, GPR_A0, off);
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
+ uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
* cache_line_size : 0;
@@ -318,36 +308,36 @@ void build_clear_page(void)
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < half_clear_loop_size);
- pg_addiu(&buf, A0, A0, 2 * off);
+ pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
build_clear_pref(&buf, off);
if (off == -clear_word_size)
- uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
+ uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_pref);
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < 0);
if (pref_bias_clear_store) {
- pg_addiu(&buf, A2, A0, pref_bias_clear_store);
+ pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_clear_store);
uasm_l_clear_nopref(&l, buf);
off = 0;
do {
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < half_clear_loop_size);
- pg_addiu(&buf, A0, A0, 2 * off);
+ pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
if (off == -clear_word_size)
- uasm_il_bne(&buf, &r, A0, A2,
+ uasm_il_bne(&buf, &r, GPR_A0, GPR_A2,
label_clear_nopref);
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < 0);
}
- uasm_i_jr(&buf, RA);
+ uasm_i_jr(&buf, GPR_RA);
uasm_i_nop(&buf);
BUG_ON(buf > &__clear_page_end);
@@ -367,18 +357,18 @@ void build_clear_page(void)
static void build_copy_load(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
- uasm_i_ld(buf, reg, off, A1);
+ uasm_i_ld(buf, reg, off, GPR_A1);
} else {
- uasm_i_lw(buf, reg, off, A1);
+ uasm_i_lw(buf, reg, off, GPR_A1);
}
}
static void build_copy_store(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
- uasm_i_sd(buf, reg, off, A0);
+ uasm_i_sd(buf, reg, off, GPR_A0);
} else {
- uasm_i_sw(buf, reg, off, A0);
+ uasm_i_sw(buf, reg, off, GPR_A0);
}
}
@@ -388,7 +378,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
return;
if (pref_bias_copy_load)
- _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+ _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, GPR_A1);
}
static inline void build_copy_store_pref(u32 **buf, int off)
@@ -398,22 +388,24 @@ static inline void build_copy_store_pref(u32 **buf, int off)
if (pref_bias_copy_store) {
_uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
- A0);
+ GPR_A0);
} else if (cache_line_size == (half_copy_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
- uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+ uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0);
} else if (cpu_has_cache_cdex_p) {
- if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
+ cpu_is_r4600_v1_x()) {
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
uasm_i_nop(buf);
}
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lw(buf, ZERO, ZERO, AT);
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
+ cpu_is_r4600_v2_x())
+ uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT);
- uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+ uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0);
}
}
}
@@ -450,12 +442,12 @@ void build_copy_page(void)
off = PAGE_SIZE - pref_bias_copy_load;
if (off > 0xffff || !pref_bias_copy_load)
- pg_addiu(&buf, A2, A0, off);
+ pg_addiu(&buf, GPR_A2, GPR_A0, off);
else
- uasm_i_ori(&buf, A2, A0, off);
+ uasm_i_ori(&buf, GPR_A2, GPR_A0, off);
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
- uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
+ uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
cache_line_size : 0;
@@ -472,126 +464,126 @@ void build_copy_page(void)
uasm_l_copy_pref_both(&l, buf);
do {
build_copy_load_pref(&buf, off);
- build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, GPR_T0, off);
build_copy_load_pref(&buf, off + copy_word_size);
- build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load_pref(&buf, off + 2 * copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load_pref(&buf, off + 3 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
- pg_addiu(&buf, A1, A1, 2 * off);
- pg_addiu(&buf, A0, A0, 2 * off);
+ pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
+ pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
build_copy_load_pref(&buf, off);
- build_copy_load(&buf, T0, off);
+ build_copy_load(&buf, GPR_T0, off);
build_copy_load_pref(&buf, off + copy_word_size);
- build_copy_load(&buf, T1, off + copy_word_size);
+ build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load_pref(&buf, off + 2 * copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load_pref(&buf, off + 3 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
if (off == -(4 * copy_word_size))
- uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_both);
+ build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
if (pref_bias_copy_load - pref_bias_copy_store) {
- pg_addiu(&buf, A2, A0,
+ pg_addiu(&buf, GPR_A2, GPR_A0,
pref_bias_copy_load - pref_bias_copy_store);
uasm_l_copy_pref_store(&l, buf);
off = 0;
do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_load(&buf, GPR_T0, off);
+ build_copy_load(&buf, GPR_T1, off + copy_word_size);
+ build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
- pg_addiu(&buf, A1, A1, 2 * off);
- pg_addiu(&buf, A0, A0, 2 * off);
+ pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
+ pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
+ build_copy_load(&buf, GPR_T0, off);
+ build_copy_load(&buf, GPR_T1, off + copy_word_size);
+ build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
- build_copy_store(&buf, T0, off);
+ build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
- build_copy_store(&buf, T1, off + copy_word_size);
+ build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
if (off == -(4 * copy_word_size))
- uasm_il_bne(&buf, &r, A2, A0,
+ uasm_il_bne(&buf, &r, GPR_A2, GPR_A0,
label_copy_pref_store);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
}
if (pref_bias_copy_store) {
- pg_addiu(&buf, A2, A0, pref_bias_copy_store);
+ pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_store);
uasm_l_copy_nopref(&l, buf);
off = 0;
do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store(&buf, T0, off);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ build_copy_load(&buf, GPR_T0, off);
+ build_copy_load(&buf, GPR_T1, off + copy_word_size);
+ build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, GPR_T0, off);
+ build_copy_store(&buf, GPR_T1, off + copy_word_size);
+ build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
+ build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
- pg_addiu(&buf, A1, A1, 2 * off);
- pg_addiu(&buf, A0, A0, 2 * off);
+ pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
+ pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
- build_copy_load(&buf, T0, off);
- build_copy_load(&buf, T1, off + copy_word_size);
- build_copy_load(&buf, T2, off + 2 * copy_word_size);
- build_copy_load(&buf, T3, off + 3 * copy_word_size);
- build_copy_store(&buf, T0, off);
- build_copy_store(&buf, T1, off + copy_word_size);
- build_copy_store(&buf, T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, GPR_T0, off);
+ build_copy_load(&buf, GPR_T1, off + copy_word_size);
+ build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
+ build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, GPR_T0, off);
+ build_copy_store(&buf, GPR_T1, off + copy_word_size);
+ build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
if (off == -(4 * copy_word_size))
- uasm_il_bne(&buf, &r, A2, A0,
+ uasm_il_bne(&buf, &r, GPR_A2, GPR_A0,
label_copy_nopref);
- build_copy_store(&buf, T3, off + 3 * copy_word_size);
+ build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
}
- uasm_i_jr(&buf, RA);
+ uasm_i_jr(&buf, GPR_RA);
uasm_i_nop(&buf);
BUG_ON(buf > &__copy_page_end);
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 37c7a01427d2..84dd5136d53a 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -10,13 +10,12 @@
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-void pgd_init(unsigned long page)
+void pgd_init(void *addr)
{
- unsigned long *p = (unsigned long *) page;
+ unsigned long *p = (unsigned long *)addr;
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i+=8) {
@@ -36,7 +35,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
- pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+ pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
return pmd;
}
@@ -46,7 +45,6 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- flush_tlb_all();
}
#endif /* defined(CONFIG_TRANSPARENT_HUGEPAGE) */
@@ -63,9 +61,8 @@ void __init pagetable_init(void)
#endif
/* Initialize the entire pgd. */
- pgd_init((unsigned long)swapper_pg_dir);
- pgd_init((unsigned long)swapper_pg_dir
- + sizeof(pgd_t) * USER_PTRS_PER_PGD);
+ pgd_init(swapper_pg_dir);
+ pgd_init(&swapper_pg_dir[USER_PTRS_PER_PGD]);
pgd_base = swapper_pg_dir;
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 6fd6e96fdebb..1e544827dea9 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -10,11 +10,10 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-void pgd_init(unsigned long page)
+void pgd_init(void *addr)
{
unsigned long *p, *end;
unsigned long entry;
@@ -27,7 +26,7 @@ void pgd_init(unsigned long page)
entry = (unsigned long)invalid_pte_table;
#endif
- p = (unsigned long *) page;
+ p = (unsigned long *) addr;
end = p + PTRS_PER_PGD;
do {
@@ -44,11 +43,12 @@ void pgd_init(unsigned long page)
}
#ifndef __PAGETABLE_PMD_FOLDED
-void pmd_init(unsigned long addr, unsigned long pagetable)
+void pmd_init(void *addr)
{
unsigned long *p, *end;
+ unsigned long pagetable = (unsigned long)invalid_pte_table;
- p = (unsigned long *) addr;
+ p = (unsigned long *)addr;
end = p + PTRS_PER_PMD;
do {
@@ -67,9 +67,10 @@ EXPORT_SYMBOL_GPL(pmd_init);
#endif
#ifndef __PAGETABLE_PUD_FOLDED
-void pud_init(unsigned long addr, unsigned long pagetable)
+void pud_init(void *addr)
{
unsigned long *p, *end;
+ unsigned long pagetable = (unsigned long)invalid_pmd_table;
p = (unsigned long *)addr;
end = p + PTRS_PER_PUD;
@@ -88,11 +89,12 @@ void pud_init(unsigned long addr, unsigned long pagetable)
}
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
- pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+ pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
return pmd;
}
@@ -101,8 +103,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- flush_tlb_all();
}
+#endif
void __init pagetable_init(void)
{
@@ -110,12 +112,12 @@ void __init pagetable_init(void)
pgd_t *pgd_base;
/* Initialize the entire pgd. */
- pgd_init((unsigned long)swapper_pg_dir);
+ pgd_init(swapper_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
- pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
+ pud_init(invalid_pud_table);
#endif
#ifndef __PAGETABLE_PMD_FOLDED
- pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
+ pmd_init(invalid_pmd_table);
#endif
pgd_base = swapper_pg_dir;
/*
diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c
index 05560b042d82..1506e458040d 100644
--- a/arch/mips/mm/pgtable.c
+++ b/arch/mips/mm/pgtable.c
@@ -10,12 +10,14 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *ret, *init;
+ pgd_t *init, *ret = NULL;
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM,
+ PGD_TABLE_ORDER);
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
- if (ret) {
+ if (ptdesc) {
+ ret = ptdesc_address(ptdesc);
init = pgd_offset(&init_mm, 0UL);
- pgd_init((unsigned long)ret);
+ pgd_init(ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c
new file mode 100644
index 000000000000..f9b8c85e9843
--- /dev/null
+++ b/arch/mips/mm/physaddr.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/mmdebug.h>
+#include <linux/mm.h>
+
+#include <asm/addrspace.h>
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/dma.h>
+
+static inline bool __debug_virt_addr_valid(unsigned long x)
+{
+ /*
+ * MAX_DMA_ADDRESS is a virtual address that may not correspond to an
+ * actual physical address. Enough code relies on
+ * virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it
+ * and always return true.
+ */
+ if (x == MAX_DMA_ADDRESS)
+ return true;
+
+ return x >= PAGE_OFFSET && (KSEGX(x) < KSEG2 ||
+ IS_ENABLED(CONFIG_EVA) ||
+ !IS_ENABLED(CONFIG_HIGHMEM));
+}
+
+phys_addr_t __virt_to_phys(volatile const void *x)
+{
+ WARN(!__debug_virt_addr_valid((unsigned long)x),
+ "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ x, x);
+
+ return __virt_to_phys_nodebug(x);
+}
+EXPORT_SYMBOL(__virt_to_phys);
+
+phys_addr_t __phys_addr_symbol(unsigned long x)
+{
+ /* This is bounds checking against the kernel image only.
+ * __pa_symbol should only be used on kernel symbol addresses.
+ */
+ VIRTUAL_BUG_ON(x < (unsigned long)_text ||
+ x > (unsigned long)_end);
+
+ return __pa_symbol_nodebug(x);
+}
+EXPORT_SYMBOL(__phys_addr_symbol);
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index ea059cd86496..d7238687d790 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/bootinfo.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/mc.h>
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index dbdbfe5d8408..06ec304ad4d1 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
#include <asm/mips-cps.h>
@@ -147,7 +146,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
return 1;
}
-static int __init mips_sc_probe_cm3(void)
+static int mips_sc_probe_cm3(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned long cfg = read_gcr_l2_config();
@@ -181,7 +180,7 @@ static int __init mips_sc_probe_cm3(void)
return 0;
}
-static inline int __init mips_sc_probe(void)
+static inline int mips_sc_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config1, config2;
@@ -194,9 +193,10 @@ static inline int __init mips_sc_probe(void)
return mips_sc_probe_cm3();
/* Ignore anything but MIPSxx processors */
- if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
- MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
+ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
@@ -228,6 +228,7 @@ static inline int __init mips_sc_probe(void)
* contradicted by all documentation.
*/
case MACH_INGENIC_JZ4770:
+ case MACH_INGENIC_JZ4775:
c->scache.ways = 4;
break;
@@ -236,6 +237,7 @@ static inline int __init mips_sc_probe(void)
* but that is contradicted by all documentation.
*/
case MACH_INGENIC_X1000:
+ case MACH_INGENIC_X1000E:
c->scache.sets = 256;
c->scache.ways = 4;
break;
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index c7b94c951d98..736615d68f7a 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
index 00fef578c8cd..2705d7dcb33e 100644
--- a/arch/mips/mm/tlb-funcs.S
+++ b/arch/mips/mm/tlb-funcs.S
@@ -11,8 +11,8 @@
* Copyright (C) 2012 MIPS Technologies, Inc.
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/export.h>
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define FASTPATH_SIZE 128
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 50f207591b6d..173f7b36033b 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -17,18 +17,17 @@
#include <linux/mm.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/tlbmisc.h>
#include <asm/isadep.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/setup.h>
+#include <asm/tlbex.h>
#undef DEBUG_TLB
-extern void build_tlb_refill_handler(void);
-
/* CP0 hazard avoidance. */
#define BARRIER \
__asm__ __volatile__( \
@@ -37,8 +36,6 @@ extern void build_tlb_refill_handler(void);
"nop\n\t" \
".set pop\n\t")
-int r3k_have_wired_reg; /* Should be in cpu_data? */
-
/* TLB operations. */
static void local_flush_tlb_from(int entry)
{
@@ -63,7 +60,7 @@ void local_flush_tlb_all(void)
printk("[tlball]");
#endif
local_irq_save(flags);
- local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8);
+ local_flush_tlb_from(8);
local_irq_restore(flags);
}
@@ -186,7 +183,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
int idx, pid;
/*
- * Handle debugger faulting in for debugee.
+ * Handle debugger faulting in for debuggee.
*/
if (current->active_mm != vma->vm_mm)
return;
@@ -225,34 +222,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long old_ctx;
static unsigned long wired = 0;
- if (r3k_have_wired_reg) { /* TX39XX */
- unsigned long old_pagemask;
- unsigned long w;
-
-#ifdef DEBUG_TLB
- printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
- entrylo0, entryhi, pagemask);
-#endif
-
- local_irq_save(flags);
- /* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi() & asid_mask;
- old_pagemask = read_c0_pagemask();
- w = read_c0_wired();
- write_c0_wired(w + 1);
- write_c0_index(w << 8);
- write_c0_pagemask(pagemask);
- write_c0_entryhi(entryhi);
- write_c0_entrylo0(entrylo0);
- BARRIER;
- tlb_write_indexed();
-
- write_c0_entryhi(old_ctx);
- write_c0_pagemask(old_pagemask);
- local_flush_tlb_all();
- local_irq_restore(flags);
-
- } else if (wired < 8) {
+ if (wired < 8) {
#ifdef DEBUG_TLB
printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
entrylo0, entryhi);
@@ -273,13 +243,6 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
void tlb_init(void)
{
- switch (current_cpu_type()) {
- case CPU_TX3922:
- case CPU_TX3927:
- r3k_have_wired_reg = 1;
- write_c0_wired(0); /* Set to 8 on reset... */
- break;
- }
local_flush_tlb_from(0);
build_tlb_refill_handler();
}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index d7a9d5f211f0..4106084e57d7 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -21,11 +21,10 @@
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
+#include <asm/tlbex.h>
#include <asm/tlbmisc.h>
-
-extern void build_tlb_refill_handler(void);
+#include <asm/setup.h>
/*
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
@@ -120,7 +119,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
- unsigned long old_entryhi, uninitialized_var(old_mmid);
+ unsigned long old_entryhi, old_mmid;
int newpid = cpu_asid(cpu, mm);
old_entryhi = read_c0_entryhi();
@@ -214,7 +213,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
- unsigned long uninitialized_var(old_mmid);
+ unsigned long old_mmid;
unsigned long flags, old_entryhi;
int idx;
@@ -298,11 +297,11 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
- pte_t *ptep;
+ pte_t *ptep, *ptemap = NULL;
int idx, pid;
/*
- * Handle debugger faulting in for debugee.
+ * Handle debugger faulting in for debuggee.
*/
if (current->active_mm != vma->vm_mm)
return;
@@ -345,7 +344,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
} else
#endif
{
- ptep = pte_offset_map(pmdp, address);
+ ptemap = ptep = pte_offset_map(pmdp, address);
+ /*
+ * update_mmu_cache() is called between pte_offset_map_lock()
+ * and pte_unmap_unlock(), so we can assume that ptep is not
+ * NULL here: and what should be done below if it were NULL?
+ */
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
@@ -374,6 +378,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlbw_use_hazard();
htw_start();
flush_micro_tlb_vm(vma);
+
+ if (ptemap)
+ pte_unmap(ptemap);
local_irq_restore(flags);
}
@@ -383,7 +390,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
- unsigned int uninitialized_var(old_mmid);
+ unsigned int old_mmid;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
@@ -439,6 +446,7 @@ int has_transparent_hugepage(void)
}
return mask == PM_HUGE_MASK;
}
+EXPORT_SYMBOL(has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -450,6 +458,7 @@ int has_transparent_hugepage(void)
int temp_tlb_entry;
+#ifndef CONFIG_64BIT
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
@@ -488,6 +497,7 @@ out:
local_irq_restore(flags);
return ret;
}
+#endif
static int ntlb;
static int __init set_ntlb(char *str)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 344e6e9ea43b..69ea54bdc0c3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -28,12 +28,13 @@
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/cache.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/cpu-type.h>
+#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/war.h>
+#include <asm/regdef.h>
#include <asm/uasm.h>
#include <asm/setup.h>
#include <asm/tlbex.h>
@@ -83,14 +84,18 @@ static inline int r4k_250MHZhwbug(void)
return 0;
}
+extern int sb1250_m3_workaround_needed(void);
+
static inline int __maybe_unused bcm1250_m3_war(void)
{
- return BCM1250_M3_WAR;
+ if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
+ return sb1250_m3_workaround_needed();
+ return 0;
}
static inline int __maybe_unused r10000_llsc_war(void)
{
- return R10000_LLSC_WAR;
+ return IS_ENABLED(CONFIG_WAR_R10000_LLSC);
}
static int use_bbit_insns(void)
@@ -250,7 +255,7 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
- pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+ pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}
@@ -273,27 +278,6 @@ static inline void dump_handler(const char *symbol, const void *start, const voi
pr_debug("\tEND(%s)\n", symbol);
}
-/* The only general purpose registers allowed in TLB handlers. */
-#define K0 26
-#define K1 27
-
-/* Some CP0 registers */
-#define C0_INDEX 0, 0
-#define C0_ENTRYLO0 2, 0
-#define C0_TCBIND 2, 2
-#define C0_ENTRYLO1 3, 0
-#define C0_CONTEXT 4, 0
-#define C0_PAGEMASK 5, 0
-#define C0_PWBASE 5, 5
-#define C0_PWFIELD 5, 6
-#define C0_PWSIZE 5, 7
-#define C0_PWCTL 6, 6
-#define C0_BADVADDR 8, 0
-#define C0_PGD 9, 7
-#define C0_ENTRYHI 10, 0
-#define C0_EPC 14, 0
-#define C0_XCONTEXT 20, 0
-
#ifdef CONFIG_64BIT
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
#else
@@ -321,13 +305,7 @@ static unsigned int kscratch_used_mask;
static inline int __maybe_unused c0_kscratch(void)
{
- switch (current_cpu_type()) {
- case CPU_XLP:
- case CPU_XLR:
- return 22;
- default:
- return 31;
- }
+ return 31;
}
static int allocate_kscratch(void)
@@ -359,30 +337,30 @@ static struct work_registers build_get_work_registers(u32 **p)
if (scratch_reg >= 0) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
- r.r1 = K0;
- r.r2 = K1;
- r.r3 = 1;
+ r.r1 = GPR_K0;
+ r.r2 = GPR_K1;
+ r.r3 = GPR_AT;
return r;
}
if (num_possible_cpus() > 1) {
/* Get smp_processor_id */
- UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
- UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
+ UASM_i_CPUID_MFC0(p, GPR_K0, SMP_CPUID_REG);
+ UASM_i_SRL_SAFE(p, GPR_K0, GPR_K0, SMP_CPUID_REGSHIFT);
- /* handler_reg_save index in K0 */
- UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
+ /* handler_reg_save index in GPR_K0 */
+ UASM_i_SLL(p, GPR_K0, GPR_K0, ilog2(sizeof(struct tlb_reg_save)));
- UASM_i_LA(p, K1, (long)&handler_reg_save);
- UASM_i_ADDU(p, K0, K0, K1);
+ UASM_i_LA(p, GPR_K1, (long)&handler_reg_save);
+ UASM_i_ADDU(p, GPR_K0, GPR_K0, GPR_K1);
} else {
- UASM_i_LA(p, K0, (long)&handler_reg_save);
+ UASM_i_LA(p, GPR_K0, (long)&handler_reg_save);
}
- /* K0 now points to save area, save $1 and $2 */
- UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
- UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+ /* GPR_K0 now points to save area, save $1 and $2 */
+ UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
+ UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
- r.r1 = K1;
+ r.r1 = GPR_K1;
r.r2 = 1;
r.r3 = 2;
return r;
@@ -395,9 +373,9 @@ static void build_restore_work_registers(u32 **p)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return;
}
- /* K0 already points to save area, restore $1 and $2 */
- UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
- UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+ /* GPR_K0 already points to save area, restore $1 and $2 */
+ UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
+ UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -416,22 +394,22 @@ static void build_r3000_tlb_refill_handler(void)
memset(tlb_handler, 0, sizeof(tlb_handler));
p = tlb_handler;
- uasm_i_mfc0(&p, K0, C0_BADVADDR);
- uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
- uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
- uasm_i_srl(&p, K0, K0, 22); /* load delay */
- uasm_i_sll(&p, K0, K0, 2);
- uasm_i_addu(&p, K1, K1, K0);
- uasm_i_mfc0(&p, K0, C0_CONTEXT);
- uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
- uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
- uasm_i_addu(&p, K1, K1, K0);
- uasm_i_lw(&p, K0, 0, K1);
+ uasm_i_mfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_lui(&p, GPR_K1, uasm_rel_hi(pgdc)); /* cp0 delay */
+ uasm_i_lw(&p, GPR_K1, uasm_rel_lo(pgdc), GPR_K1);
+ uasm_i_srl(&p, GPR_K0, GPR_K0, 22); /* load delay */
+ uasm_i_sll(&p, GPR_K0, GPR_K0, 2);
+ uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
+ uasm_i_mfc0(&p, GPR_K0, C0_CONTEXT);
+ uasm_i_lw(&p, GPR_K1, 0, GPR_K1); /* cp0 delay */
+ uasm_i_andi(&p, GPR_K0, GPR_K0, 0xffc); /* load delay */
+ uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
+ uasm_i_lw(&p, GPR_K0, 0, GPR_K1);
uasm_i_nop(&p); /* load delay */
- uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
- uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
+ uasm_i_mtc0(&p, GPR_K0, C0_ENTRYLO0);
+ uasm_i_mfc0(&p, GPR_K1, C0_EPC); /* cp0 delay */
uasm_i_tlbwr(&p); /* cp0 delay */
- uasm_i_jr(&p, K1);
+ uasm_i_jr(&p, GPR_K1);
uasm_i_rfe(&p); /* branch delay */
if (p > tlb_handler + 32)
@@ -545,10 +523,10 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
+ case CPU_R4300:
case CPU_5KC:
case CPU_TX49XX:
case CPU_PR4450:
- case CPU_XLR:
uasm_i_nop(p);
tlbw(p);
break;
@@ -576,7 +554,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
- /* fall through */
+ fallthrough;
case CPU_ALCHEMY:
tlbw(p);
break;
@@ -589,25 +567,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- uasm_i_nop(p);
- uasm_i_nop(p);
- break;
-
- case CPU_VR4131:
- case CPU_VR4133:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- break;
-
case CPU_XBURST:
tlbw(p);
uasm_i_nop(p);
@@ -629,7 +588,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
return;
}
- if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
+ if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
@@ -811,7 +770,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
if (check_for_high_segbits) {
/*
- * The kernel currently implicitely assumes that the
+ * The kernel currently implicitly assumes that the
* MIPS SEGBITS parameter for the processor is
* (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
* allocate virtual addresses outside the maximum
@@ -821,7 +780,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* everything but the lower xuseg addresses goes down
* the module_alloc/vmalloc path.
*/
- uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, ptr, label_vmalloc);
} else {
uasm_il_bltz(p, r, tmp, label_vmalloc);
@@ -844,8 +803,8 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
uasm_i_drotr(p, ptr, ptr, 11);
#elif defined(CONFIG_SMP)
UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
@@ -998,22 +957,6 @@ static void build_adjust_context(u32 **p, unsigned int ctx)
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
- switch (current_cpu_type()) {
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4181:
- case CPU_VR4181A:
- case CPU_VR4133:
- shift += 2;
- break;
-
- default:
- break;
- }
-
if (shift)
UASM_i_SRL(p, ctx, ctx, shift);
uasm_i_andi(p, ctx, ctx, mask);
@@ -1130,7 +1073,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
uasm_i_dsrl_safe(p, scratch, tmp,
- PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, scratch, label_vmalloc);
if (pgd_reg == -1) {
@@ -1160,8 +1103,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
+
uasm_i_drotr(p, ptr, ptr, 11);
}
@@ -1313,11 +1257,11 @@ static void build_r4000_tlb_refill_handler(void)
memset(final_handler, 0, sizeof(final_handler));
if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
- htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
+ htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, GPR_K0, GPR_K1,
scratch_reg);
vmalloc_mode = refill_scratch;
} else {
- htlb_info.huge_pte = K0;
+ htlb_info.huge_pte = GPR_K0;
htlb_info.restore_scratch = 0;
htlb_info.need_reload_pte = true;
vmalloc_mode = refill_noscratch;
@@ -1327,29 +1271,29 @@ static void build_r4000_tlb_refill_handler(void)
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
- uasm_i_xor(&p, K0, K0, K1);
- uasm_i_dsrl_safe(&p, K1, K0, 62);
- uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
- uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
- uasm_i_or(&p, K0, K0, K1);
- uasm_il_bnez(&p, &r, K0, label_leave);
+ uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
+ uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
+ uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
+ uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_il_bnez(&p, &r, GPR_K0, label_leave);
/* No need for uasm_i_nop */
}
#ifdef CONFIG_64BIT
- build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+ build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */
#else
- build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+ build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
+ build_is_huge_pte(&p, &r, GPR_K0, GPR_K1, label_tlb_huge_update);
#endif
- build_get_ptep(&p, K0, K1);
- build_update_entries(&p, K0, K1);
+ build_get_ptep(&p, GPR_K0, GPR_K1);
+ build_update_entries(&p, GPR_K0, GPR_K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
@@ -1357,14 +1301,14 @@ static void build_r4000_tlb_refill_handler(void)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
if (htlb_info.need_reload_pte)
- UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
- build_huge_update_entries(&p, htlb_info.huge_pte, K1);
- build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ UASM_i_LW(&p, htlb_info.huge_pte, 0, GPR_K1);
+ build_huge_update_entries(&p, htlb_info.huge_pte, GPR_K1);
+ build_huge_tlb_write_entry(&p, &l, &r, GPR_K0, tlb_random,
htlb_info.restore_scratch);
#endif
#ifdef CONFIG_64BIT
- build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
+ build_get_pgd_vmalloc64(&p, &l, &r, GPR_K0, GPR_K1, vmalloc_mode);
#endif
/*
@@ -1377,6 +1321,7 @@ static void build_r4000_tlb_refill_handler(void)
switch (boot_cpu_type()) {
default:
if (sizeof(long) == 4) {
+ fallthrough;
case CPU_LOONGSON2EF:
/* Loongson2 ebase is different than r4k, we have more space */
if ((p - tlb_handler) > 64)
@@ -1480,6 +1425,7 @@ static void build_r4000_tlb_refill_handler(void)
static void setup_pw(void)
{
+ unsigned int pwctl;
unsigned long pgd_i, pgd_w;
#ifndef __PAGETABLE_PMD_FOLDED
unsigned long pmd_i, pmd_w;
@@ -1493,12 +1439,12 @@ static void setup_pw(void)
#endif
pgd_i = PGDIR_SHIFT; /* 1st level PGD */
#ifndef __PAGETABLE_PMD_FOLDED
- pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
pmd_i = PMD_SHIFT; /* 2nd level PMD */
pmd_w = PMD_SHIFT - PAGE_SHIFT;
#else
- pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
#endif
pt_i = PAGE_SHIFT; /* 3rd level PTE */
@@ -1506,6 +1452,7 @@ static void setup_pw(void)
pte_i = ilog2(_PAGE_GLOBAL);
pte_w = 0;
+ pwctl = 1 << 30; /* Set PWDirExt */
#ifndef __PAGETABLE_PMD_FOLDED
write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
@@ -1516,8 +1463,9 @@ static void setup_pw(void)
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- write_c0_pwctl(1 << 6 | psn);
+ pwctl |= (1 << 6 | psn);
#endif
+ write_c0_pwctl(pwctl);
write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}
@@ -1533,34 +1481,35 @@ static void build_loongson3_tlb_refill_handler(void)
memset(tlb_handler, 0, sizeof(tlb_handler));
if (check_for_high_segbits) {
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
- uasm_il_beqz(&p, &r, K1, label_vmalloc);
+ uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0,
+ PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
+ uasm_il_beqz(&p, &r, GPR_K1, label_vmalloc);
uasm_i_nop(&p);
- uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
+ uasm_il_bgez(&p, &r, GPR_K0, label_large_segbits_fault);
uasm_i_nop(&p);
uasm_l_vmalloc(&l, p);
}
- uasm_i_dmfc0(&p, K1, C0_PGD);
+ uasm_i_dmfc0(&p, GPR_K1, C0_PGD);
- uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+ uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */
#ifndef __PAGETABLE_PMD_FOLDED
- uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+ uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */
#endif
- uasm_i_ldpte(&p, K1, 0); /* even */
- uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_ldpte(&p, GPR_K1, 0); /* even */
+ uasm_i_ldpte(&p, GPR_K1, 1); /* odd */
uasm_i_tlbwr(&p);
/* restore page mask */
if (PM_DEFAULT_MASK >> 16) {
- uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
- uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
- uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ uasm_i_lui(&p, GPR_K0, PM_DEFAULT_MASK >> 16);
+ uasm_i_ori(&p, GPR_K0, GPR_K0, PM_DEFAULT_MASK & 0xffff);
+ uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
} else if (PM_DEFAULT_MASK) {
- uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
- uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ uasm_i_ori(&p, GPR_K0, 0, PM_DEFAULT_MASK);
+ uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
} else {
uasm_i_mtc0(&p, 0, C0_PAGEMASK);
}
@@ -1569,8 +1518,8 @@ static void build_loongson3_tlb_refill_handler(void)
if (check_for_high_segbits) {
uasm_l_large_segbits_fault(&l, p);
- UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
- uasm_i_jr(&p, K1);
+ UASM_i_LA(&p, GPR_K1, (unsigned long)tlb_do_page_fault_0);
+ uasm_i_jr(&p, GPR_K1);
uasm_i_nop(&p);
}
@@ -1748,7 +1697,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
/*
* Check if PTE is present, if not then jump to LABEL. PTR points to
* the page table where this PTE is located, PTE will be re-loaded
- * with it's original value.
+ * with its original value.
*/
static void
build_pte_present(u32 **p, struct uasm_reloc **r,
@@ -1936,11 +1885,11 @@ static void build_r3000_tlb_load_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
+ build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
+ build_pte_present(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbl);
uasm_i_nop(&p); /* load delay */
- build_make_valid(&p, &r, K0, K1, -1);
- build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+ build_make_valid(&p, &r, GPR_K0, GPR_K1, -1);
+ build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
uasm_l_nopage_tlbl(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
@@ -1966,11 +1915,11 @@ static void build_r3000_tlb_store_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
+ build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
+ build_pte_writable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbs);
uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1, -1);
- build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+ build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
+ build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
uasm_l_nopage_tlbs(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -1996,11 +1945,11 @@ static void build_r3000_tlb_modify_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
+ build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
+ build_pte_modifiable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1, -1);
- build_r3000_pte_reload_tlbwi(&p, K0, K1);
+ build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
+ build_r3000_pte_reload_tlbwi(&p, GPR_K0, GPR_K1);
uasm_l_nopage_tlbm(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -2063,7 +2012,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
UASM_i_LW(p, wr.r2, 0, wr.r2);
- UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
@@ -2116,14 +2065,14 @@ static void build_r4000_tlb_load_handler(void)
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
- uasm_i_xor(&p, K0, K0, K1);
- uasm_i_dsrl_safe(&p, K1, K0, 62);
- uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
- uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
- uasm_i_or(&p, K0, K0, K1);
- uasm_il_bnez(&p, &r, K0, label_leave);
+ uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
+ uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
+ uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
+ uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_il_bnez(&p, &r, GPR_K0, label_leave);
/* No need for uasm_i_nop */
}
@@ -2156,17 +2105,8 @@ static void build_r4000_tlb_load_handler(void)
uasm_i_tlbr(&p);
- switch (current_cpu_type()) {
- default:
- if (cpu_has_mips_r2_exec_hazard) {
- uasm_i_ehb(&p);
-
- case CPU_CAVIUM_OCTEON:
- case CPU_CAVIUM_OCTEON_PLUS:
- case CPU_CAVIUM_OCTEON2:
- break;
- }
- }
+ if (cpu_has_mips_r2_exec_hazard)
+ uasm_i_ehb(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
@@ -2231,17 +2171,8 @@ static void build_r4000_tlb_load_handler(void)
uasm_i_tlbr(&p);
- switch (current_cpu_type()) {
- default:
- if (cpu_has_mips_r2_exec_hazard) {
- uasm_i_ehb(&p);
-
- case CPU_CAVIUM_OCTEON:
- case CPU_CAVIUM_OCTEON_PLUS:
- case CPU_CAVIUM_OCTEON2:
- break;
- }
- }
+ if (cpu_has_mips_r2_exec_hazard)
+ uasm_i_ehb(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
@@ -2284,9 +2215,9 @@ static void build_r4000_tlb_load_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) {
- uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
- uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
- uasm_i_jr(&p, K0);
+ uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+ uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+ uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
@@ -2340,9 +2271,9 @@ static void build_r4000_tlb_store_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
- uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
- uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
- uasm_i_jr(&p, K0);
+ uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -2397,9 +2328,9 @@ static void build_r4000_tlb_modify_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
- uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
- uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
- uasm_i_jr(&p, K0);
+ uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -2565,7 +2496,7 @@ static void check_pabits(void)
unsigned long entry;
unsigned pabits, fillbits;
- if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
+ if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
/*
* We'll only be making use of the fact that we can rotate bits
* into the fill if the CPU supports RIXI, so don't bother
@@ -2611,7 +2542,7 @@ void build_tlb_refill_handler(void)
check_pabits();
#ifdef CONFIG_64BIT
- check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
#endif
if (cpu_has_3kex) {
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 7154a1d99aad..e15c6700cd08 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -90,7 +90,7 @@ static const struct insn insn_table[insn_invalid] = {
RS | RT | RD},
[insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
[insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
- [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op),
+ [insn_dmulu] = {M(spec_op, 0, 0, 0, dmultu_dmulu_op, dmultu_op),
RS | RT | RD},
[insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
[insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
@@ -150,6 +150,8 @@ static const struct insn insn_table[insn_invalid] = {
[insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
[insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op),
RS | RT | RD},
+ [insn_muhu] = {M(spec_op, 0, 0, 0, multu_muhu_op, multu_op),
+ RS | RT | RD},
#ifndef CONFIG_CPU_MIPSR6
[insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
#else
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index c56f129c9a4b..125140979d62 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -59,7 +59,7 @@ enum opcode {
insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld,
insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi,
insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0,
- insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor,
+ insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_muhu, insn_nor,
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc,
insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll,
insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra,
@@ -344,6 +344,7 @@ I_u1(_mtlo)
I_u3u1u2(_mul)
I_u1u2(_multu)
I_u3u1u2(_mulu)
+I_u3u1u2(_muhu)
I_u3u1u2(_nor)
I_u3u1u2(_or)
I_u2u1u3(_ori)
@@ -394,7 +395,7 @@ I_u2u1u3(_lddir)
void uasm_i_pref(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
- if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && a <= 24 && a != 5)
/*
* As per erratum Core-14449, replace prefetches 0-4,
* 6-24 with 'pref 28'.