summaryrefslogtreecommitdiff
path: root/arch/mips/mm/c-r4k.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r--arch/mips/mm/c-r4k.c463
1 files changed, 105 insertions, 358 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index d0b64df51eb2..10413b6f6662 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/bitops.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/bcache.h>
#include <asm/bootinfo.h>
@@ -29,14 +30,11 @@
#include <asm/cpu-type.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
-#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
-#include <asm/dma-coherence.h>
#include <asm/mips-cps.h>
/*
@@ -112,28 +110,15 @@ static unsigned long dcache_size __read_mostly;
static unsigned long vcache_size __read_mostly;
static unsigned long scache_size __read_mostly;
-/*
- * Dummy cache handling routines for machines without boardcaches
- */
-static void cache_noop(void) {}
-
-static struct bcache_ops no_sc_ops = {
- .bc_enable = (void *)cache_noop,
- .bc_disable = (void *)cache_noop,
- .bc_wback_inv = (void *)cache_noop,
- .bc_inv = (void *)cache_noop
-};
-
-struct bcache_ops *bcops = &no_sc_ops;
-
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
#define R4600_HIT_CACHEOP_WAR_IMPL \
do { \
- if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
+ if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \
+ cpu_is_r4600_v2_x()) \
*(volatile unsigned long *)CKSEG1; \
- if (R4600_V1_HIT_CACHEOP_WAR) \
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \
__asm__ __volatile__("nop;nop;nop;nop"); \
} while (0)
@@ -202,24 +187,6 @@ static void r4k_blast_dcache_user_page_setup(void)
#endif
-static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
-
-static void r4k_blast_dcache_page_indexed_setup(void)
-{
- unsigned long dc_lsize = cpu_dcache_line_size();
-
- if (dc_lsize == 0)
- r4k_blast_dcache_page_indexed = (void *)cache_noop;
- else if (dc_lsize == 16)
- r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
- else if (dc_lsize == 32)
- r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
- else if (dc_lsize == 64)
- r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
- else if (dc_lsize == 128)
- r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
-}
-
void (* r4k_blast_dcache)(void);
EXPORT_SYMBOL(r4k_blast_dcache);
@@ -239,7 +206,7 @@ static void r4k_blast_dcache_setup(void)
r4k_blast_dcache = blast_dcache128;
}
-/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
+/* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */
#define JUMP_TO_ALIGN(order) \
__asm__ __volatile__( \
"b\t1f\n\t" \
@@ -271,43 +238,14 @@ static inline void tx49_blast_icache32(void)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
- CACHE32_UNROLL32_ALIGN;
- /* I'm in odd chunk. blast even chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
-}
-
-static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- blast_icache32_page_indexed(page);
- local_irq_restore(flags);
-}
-
-static inline void tx49_blast_icache32_page_indexed(unsigned long page)
-{
- unsigned long indexmask = current_cpu_data.icache.waysize - 1;
- unsigned long start = INDEX_BASE + (page & indexmask);
- unsigned long end = start + PAGE_SIZE;
- unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
- unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
- unsigned long ws, addr;
-
- CACHE32_UNROLL32_ALIGN2;
- /* I'm in even chunk. blast odd chunks */
- for (ws = 0; ws < ws_end; ws += ws_inc)
- for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
}
static void (* r4k_blast_icache_page)(unsigned long addr);
@@ -320,7 +258,7 @@ static void r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page;
- else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
+ else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page;
@@ -352,33 +290,6 @@ static void r4k_blast_icache_user_page_setup(void)
#endif
-static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
-
-static void r4k_blast_icache_page_indexed_setup(void)
-{
- unsigned long ic_lsize = cpu_icache_line_size();
-
- if (ic_lsize == 0)
- r4k_blast_icache_page_indexed = (void *)cache_noop;
- else if (ic_lsize == 16)
- r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
- else if (ic_lsize == 32) {
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
- r4k_blast_icache_page_indexed =
- blast_icache32_r4600_v1_page_indexed;
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
- r4k_blast_icache_page_indexed =
- tx49_blast_icache32_page_indexed;
- else if (current_cpu_type() == CPU_LOONGSON2)
- r4k_blast_icache_page_indexed =
- loongson2_blast_icache32_page_indexed;
- else
- r4k_blast_icache_page_indexed =
- blast_icache32_page_indexed;
- } else if (ic_lsize == 64)
- r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
-}
-
void (* r4k_blast_icache)(void);
EXPORT_SYMBOL(r4k_blast_icache);
@@ -391,11 +302,12 @@ static void r4k_blast_icache_setup(void)
else if (ic_lsize == 16)
r4k_blast_icache = blast_icache16;
else if (ic_lsize == 32) {
- if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
+ cpu_is_r4600_v1_x())
r4k_blast_icache = blast_r4600_v1_icache32;
- else if (TX49XX_ICACHE_INDEX_INV_WAR)
+ else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
r4k_blast_icache = tx49_blast_icache32;
- else if (current_cpu_type() == CPU_LOONGSON2)
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache = loongson2_blast_icache32;
else
r4k_blast_icache = blast_icache32;
@@ -423,24 +335,6 @@ static void r4k_blast_scache_page_setup(void)
r4k_blast_scache_page = blast_scache128_page;
}
-static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
-
-static void r4k_blast_scache_page_indexed_setup(void)
-{
- unsigned long sc_lsize = cpu_scache_line_size();
-
- if (scache_size == 0)
- r4k_blast_scache_page_indexed = (void *)cache_noop;
- else if (sc_lsize == 16)
- r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
- else if (sc_lsize == 32)
- r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
- else if (sc_lsize == 64)
- r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
- else if (sc_lsize == 128)
- r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
-}
-
static void (* r4k_blast_scache)(void);
static void r4k_blast_scache_setup(void)
@@ -465,7 +359,7 @@ static void r4k_blast_scache_node_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache_node = (void *)cache_noop;
else if (sc_lsize == 16)
r4k_blast_scache_node = blast_scache16_node;
@@ -480,7 +374,7 @@ static void r4k_blast_scache_node_setup(void)
static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -497,7 +391,7 @@ static inline void local_r4k___flush_cache_all(void * args)
r4k_blast_scache();
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Use get_ebase_cpunum() for both NUMA=y/n */
r4k_blast_scache_node(get_ebase_cpunum() >> 2);
break;
@@ -540,6 +434,9 @@ static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
unsigned int i;
const cpumask_t *mask = cpu_present_mask;
+ if (cpu_has_mmid)
+ return cpu_context(0, mm) != 0;
+
/* cpu_sibling_map[] undeclared when !CONFIG_SMP */
#ifdef CONFIG_SMP
/*
@@ -646,8 +543,6 @@ static inline void local_r4k_flush_cache_page(void *args)
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
int map_coherent = 0;
- pgd_t *pgdp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
void *vaddr;
@@ -660,10 +555,8 @@ static inline void local_r4k_flush_cache_page(void *args)
return;
addr &= PAGE_MASK;
- pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
/*
* If the page isn't marked valid, the page cannot possibly be
@@ -675,13 +568,14 @@ static inline void local_r4k_flush_cache_page(void *args)
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
vaddr = NULL;
else {
+ struct folio *folio = page_folio(page);
/*
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one.
*/
map_coherent = (cpu_has_dc_aliases &&
- page_mapcount(page) &&
- !Page_dcache_dirty(page));
+ folio_mapped(folio) &&
+ !folio_test_dcache_dirty(folio));
if (map_coherent)
vaddr = kmap_coherent(page, addr);
else
@@ -697,10 +591,7 @@ static inline void local_r4k_flush_cache_page(void *args)
}
if (exec) {
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm, cpu);
+ drop_mmu_context(mm);
} else
vaddr ? r4k_blast_icache_page(addr) :
r4k_blast_icache_user_page(addr);
@@ -770,7 +661,7 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
r4k_blast_icache();
else {
switch (boot_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
protected_loongson2_blast_icache_range(start, end);
break;
@@ -863,7 +754,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
@@ -895,6 +786,31 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
__sync();
}
+static void prefetch_cache_inv(unsigned long addr, unsigned long size)
+{
+ unsigned int linesz = cpu_scache_line_size();
+ unsigned long addr0 = addr, addr1;
+
+ addr0 &= ~(linesz - 1);
+ addr1 = (addr0 + size - 1) & ~(linesz - 1);
+
+ protected_writeback_scache_line(addr0);
+ if (likely(addr1 != addr0))
+ protected_writeback_scache_line(addr1);
+ else
+ return;
+
+ addr0 += linesz;
+ if (likely(addr1 != addr0))
+ protected_writeback_scache_line(addr0);
+ else
+ return;
+
+ addr1 -= linesz;
+ if (likely(addr1 > addr0))
+ protected_writeback_scache_line(addr0);
+}
+
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
@@ -902,9 +818,13 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
return;
preempt_disable();
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ prefetch_cache_inv(addr, size);
+
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
@@ -937,119 +857,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
}
#endif /* CONFIG_DMA_NONCOHERENT */
-struct flush_cache_sigtramp_args {
- struct mm_struct *mm;
- struct page *page;
- unsigned long addr;
-};
-
-/*
- * While we're protected against bad userland addresses we don't care
- * very much about what happens in that case. Usually a segmentation
- * fault will dump the process later on anyway ...
- */
-static void local_r4k_flush_cache_sigtramp(void *args)
-{
- struct flush_cache_sigtramp_args *fcs_args = args;
- unsigned long addr = fcs_args->addr;
- struct page *page = fcs_args->page;
- struct mm_struct *mm = fcs_args->mm;
- int map_coherent = 0;
- void *vaddr;
-
- unsigned long ic_lsize = cpu_icache_line_size();
- unsigned long dc_lsize = cpu_dcache_line_size();
- unsigned long sc_lsize = cpu_scache_line_size();
-
- /*
- * If owns no valid ASID yet, cannot possibly have gotten
- * this page into the cache.
- */
- if (!has_valid_asid(mm, R4K_HIT))
- return;
-
- if (mm == current->active_mm) {
- vaddr = NULL;
- } else {
- /*
- * Use kmap_coherent or kmap_atomic to do flushes for
- * another ASID than the current one.
- */
- map_coherent = (cpu_has_dc_aliases &&
- page_mapcount(page) &&
- !Page_dcache_dirty(page));
- if (map_coherent)
- vaddr = kmap_coherent(page, addr);
- else
- vaddr = kmap_atomic(page);
- addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
- }
-
- R4600_HIT_CACHEOP_WAR_IMPL;
- if (!cpu_has_ic_fills_f_dc) {
- if (dc_lsize)
- vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
- : protected_writeback_dcache_line(
- addr & ~(dc_lsize - 1));
- if (!cpu_icache_snoops_remote_store && scache_size)
- vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
- : protected_writeback_scache_line(
- addr & ~(sc_lsize - 1));
- }
- if (ic_lsize)
- vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
- : protected_flush_icache_line(addr & ~(ic_lsize - 1));
-
- if (vaddr) {
- if (map_coherent)
- kunmap_coherent();
- else
- kunmap_atomic(vaddr);
- }
-
- if (MIPS4K_ICACHE_REFILL_WAR) {
- __asm__ __volatile__ (
- ".set push\n\t"
- ".set noat\n\t"
- ".set "MIPS_ISA_LEVEL"\n\t"
-#ifdef CONFIG_32BIT
- "la $at,1f\n\t"
-#endif
-#ifdef CONFIG_64BIT
- "dla $at,1f\n\t"
-#endif
- "cache %0,($at)\n\t"
- "nop; nop; nop\n"
- "1:\n\t"
- ".set pop"
- :
- : "i" (Hit_Invalidate_I));
- }
- if (MIPS_CACHE_SYNC_WAR)
- __asm__ __volatile__ ("sync");
-}
-
-static void r4k_flush_cache_sigtramp(unsigned long addr)
-{
- struct flush_cache_sigtramp_args args;
- int npages;
-
- down_read(&current->mm->mmap_sem);
-
- npages = get_user_pages_fast(addr, 1, 0, &args.page);
- if (npages < 1)
- goto out;
-
- args.mm = current->mm;
- args.addr = addr;
-
- r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
-
- put_page(args.page);
-out:
- up_read(&current->mm->mmap_sem);
-}
-
static void r4k_flush_icache_all(void)
{
if (cpu_has_vtag_icache)
@@ -1127,7 +934,7 @@ static inline void rm7k_erratum31(void)
"cache\t%1, 0x3000(%0)\n\t"
".set pop\n"
:
- : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
+ : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
}
}
@@ -1151,12 +958,12 @@ static inline int alias_74k_erratum(struct cpuinfo_mips *c)
if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
present = 1;
if (rev == PRID_REV_ENCODE_332(2, 4, 0))
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
break;
case PRID_IMP_1074K:
if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
present = 1;
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
}
break;
default:
@@ -1211,7 +1018,6 @@ static void probe_pcache(void)
c->options |= MIPS_CPU_CACHE_CDEX_P;
break;
- case CPU_R5432:
case CPU_R5500:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
@@ -1278,50 +1084,6 @@ static void probe_pcache(void)
c->options |= MIPS_CPU_PREFETCH;
break;
- case CPU_VR4133:
- write_c0_config(config & ~VR41_CONF_P4K);
- /* fall through */
- case CPU_VR4131:
- /* Workaround for cache instruction bug of VR4131 */
- if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
- c->processor_id == 0x0c82U) {
- config |= 0x00400000U;
- if (c->processor_id == 0x0c80U)
- config |= VR41_CONF_BP;
- write_c0_config(config);
- } else
- c->options |= MIPS_CPU_CACHE_CDEX_P;
-
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 2;
- c->icache.waybit = __ffs(icache_size/2);
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 2;
- c->dcache.waybit = __ffs(dcache_size/2);
- break;
-
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
- c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
- c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
-
- dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
- c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
- c->dcache.ways = 1;
- c->dcache.waybit = 0; /* does not matter */
-
- c->options |= MIPS_CPU_CACHE_CDEX_P;
- break;
-
case CPU_RM7000:
rm7k_erratum31();
@@ -1339,7 +1101,7 @@ static void probe_pcache(void)
c->options |= MIPS_CPU_PREFETCH;
break;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
if (prid & 0x3)
@@ -1357,7 +1119,7 @@ static void probe_pcache(void)
c->dcache.waybit = 0;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
config1 = read_c0_config1();
lsize = (config1 >> 19) & 7;
if (lsize)
@@ -1382,7 +1144,9 @@ static void probe_pcache(void)
c->dcache.ways *
c->dcache.linesz;
c->dcache.waybit = 0;
- if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
+ (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -1491,7 +1255,6 @@ static void probe_pcache(void)
case CPU_I6500:
case CPU_SB1:
case CPU_SB1A:
- case CPU_XLR:
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
@@ -1504,7 +1267,7 @@ static void probe_pcache(void)
case CPU_74K:
case CPU_1074K:
has_74k_erratum = alias_74k_erratum(c);
- /* Fall through. */
+ fallthrough;
case CPU_M14KC:
case CPU_M14KEC:
case CPU_24K:
@@ -1528,7 +1291,7 @@ static void probe_pcache(void)
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
}
- /* fall through */
+ fallthrough;
default:
if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
c->dcache.flags |= MIPS_CACHE_ALIASES;
@@ -1567,7 +1330,7 @@ static void probe_pcache(void)
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
break;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
@@ -1575,17 +1338,17 @@ static void probe_pcache(void)
c->icache.ways = 1;
}
- printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
- icache_size >> 10,
- c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
- way_string[c->icache.ways], c->icache.linesz);
+ pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
+ icache_size >> 10,
+ c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
+ way_string[c->icache.ways], c->icache.linesz);
- printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
- dcache_size >> 10, way_string[c->dcache.ways],
- (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
- (c->dcache.flags & MIPS_CACHE_ALIASES) ?
+ pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+ dcache_size >> 10, way_string[c->dcache.ways],
+ (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
+ (c->dcache.flags & MIPS_CACHE_ALIASES) ?
"cache aliases" : "no aliases",
- c->dcache.linesz);
+ c->dcache.linesz);
}
static void probe_vcache(void)
@@ -1593,7 +1356,7 @@ static void probe_vcache(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config2, lsize;
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
return;
config2 = read_c0_config2();
@@ -1675,7 +1438,7 @@ static int probe_scache(void)
return 1;
}
-static void __init loongson2_sc_init(void)
+static void loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1691,7 +1454,7 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
-static void __init loongson3_sc_init(void)
+static void loongson3_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config2, lsize;
@@ -1705,11 +1468,14 @@ static void __init loongson3_sc_init(void)
c->scache.sets = 64 << ((config2 >> 8) & 15);
c->scache.ways = 1 + (config2 & 15);
- scache_size = c->scache.sets *
- c->scache.ways *
- c->scache.linesz;
- /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
- scache_size *= 4;
+ /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
+ c->scache.sets *= 2;
+ else
+ c->scache.sets *= 4;
+
+ scache_size = c->scache.sets * c->scache.ways * c->scache.linesz;
+
c->scache.waybit = 0;
c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
@@ -1719,10 +1485,6 @@ static void __init loongson3_sc_init(void)
return;
}
-extern int r5k_sc_init(void);
-extern int rm7k_sc_init(void);
-extern int mips_sc_init(void);
-
static void setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1768,30 +1530,34 @@ static void setup_scache(void)
#endif
return;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
loongson2_sc_init();
return;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
loongson3_sc_init();
return;
case CPU_CAVIUM_OCTEON3:
- case CPU_XLP:
/* don't need to worry about L2, fully coherent */
return;
default:
- if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
- MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
@@ -1884,7 +1650,7 @@ static void coherency_setup(void)
/*
* c0_status.cu=0 specifies that updates by the sc instruction use
- * the coherency mode specified by the TLB; 1 means cachable
+ * the coherency mode specified by the TLB; 1 means cacheable
* coherent update on write will be used. Not all processors have
* this bit and; some wire it to zero, others like Toshiba had the
* silly idea of putting something else there ...
@@ -1941,13 +1707,10 @@ void r4k_cache_init(void)
setup_scache();
r4k_blast_dcache_page_setup();
- r4k_blast_dcache_page_indexed_setup();
r4k_blast_dcache_setup();
r4k_blast_icache_page_setup();
- r4k_blast_icache_page_indexed_setup();
r4k_blast_icache_setup();
r4k_blast_scache_page_setup();
- r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
r4k_blast_scache_node_setup();
#ifdef CONFIG_EVA
@@ -1978,9 +1741,7 @@ void r4k_cache_init(void)
__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
- flush_cache_sigtramp = r4k_flush_cache_sigtramp;
flush_icache_all = r4k_flush_icache_all;
- local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
@@ -1988,19 +1749,9 @@ void r4k_cache_init(void)
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#ifdef CONFIG_DMA_NONCOHERENT
-#ifdef CONFIG_DMA_MAYBE_COHERENT
- if (coherentio == IO_COHERENCE_ENABLED ||
- (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
- _dma_cache_wback_inv = (void *)cache_noop;
- _dma_cache_wback = (void *)cache_noop;
- _dma_cache_inv = (void *)cache_noop;
- } else
-#endif /* CONFIG_DMA_MAYBE_COHERENT */
- {
- _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
- _dma_cache_wback = r4k_dma_cache_wback_inv;
- _dma_cache_inv = r4k_dma_cache_inv;
- }
+ _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
+ _dma_cache_wback = r4k_dma_cache_wback_inv;
+ _dma_cache_inv = r4k_dma_cache_inv;
#endif /* CONFIG_DMA_NONCOHERENT */
build_clear_page();
@@ -2033,8 +1784,6 @@ void r4k_cache_init(void)
/* I$ fills from D$ just by emptying the write buffers */
flush_cache_page = (void *)b5k_instruction_hazard;
flush_cache_range = (void *)b5k_instruction_hazard;
- flush_cache_sigtramp = (void *)b5k_instruction_hazard;
- local_flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_data_cache_page = (void *)b5k_instruction_hazard;
flush_icache_range = (void *)b5k_instruction_hazard;
local_flush_icache_range = (void *)b5k_instruction_hazard;
@@ -2043,7 +1792,7 @@ void r4k_cache_init(void)
/* Optimization: an L2 flush implicitly flushes the L1 */
current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Loongson-3 maintains cache coherency by hardware */
__flush_cache_all = cache_noop;
__flush_cache_vmap = cache_noop;
@@ -2052,10 +1801,8 @@ void r4k_cache_init(void)
flush_cache_mm = (void *)cache_noop;
flush_cache_page = (void *)cache_noop;
flush_cache_range = (void *)cache_noop;
- flush_cache_sigtramp = (void *)cache_noop;
flush_icache_all = (void *)cache_noop;
flush_data_cache_page = (void *)cache_noop;
- local_flush_data_cache_page = (void *)cache_noop;
break;
}
}
@@ -2077,7 +1824,7 @@ static struct notifier_block r4k_cache_pm_notifier_block = {
.notifier_call = r4k_cache_pm_notifier,
};
-int __init r4k_cache_init_pm(void)
+static int __init r4k_cache_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
}