summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r3k.c4
-rw-r--r--arch/mips/mm/c-r4k.c51
-rw-r--r--arch/mips/mm/c-tx39.c4
-rw-r--r--arch/mips/mm/dma-noncoherent.c18
-rw-r--r--arch/mips/mm/fault.c12
-rw-r--r--arch/mips/mm/hugetlbpage.c14
-rw-r--r--arch/mips/mm/init.c6
-rw-r--r--arch/mips/mm/ioremap.c6
-rw-r--r--arch/mips/mm/page.c2
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/mips/mm/tlb-r4k.c8
-rw-r--r--arch/mips/mm/tlbex.c29
12 files changed, 96 insertions, 64 deletions
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 0ca401ddf3b7..15bb8cf59828 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -241,6 +241,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -253,7 +254,8 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
return;
pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset(pmdp, addr);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 89b9c851d822..5f3d0103b95d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -271,12 +271,14 @@ static inline void tx49_blast_icache32(void)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
}
static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
@@ -302,12 +304,14 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
/* I'm in even chunk. blast odd chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
CACHE32_UNROLL32_ALIGN;
/* I'm in odd chunk. blast even chunks */
for (ws = 0; ws < ws_end; ws += ws_inc)
for (addr = start; addr < end; addr += 0x400 * 2)
- cache32_unroll32(addr|ws, Index_Invalidate_I);
+ cache_unroll(32, kernel_cache, Index_Invalidate_I,
+ addr | ws, 32);
}
static void (* r4k_blast_icache_page)(unsigned long addr);
@@ -320,7 +324,7 @@ static void r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page;
- else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
+ else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page;
@@ -369,7 +373,7 @@ static void r4k_blast_icache_page_indexed_setup(void)
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache_page_indexed =
tx49_blast_icache32_page_indexed;
- else if (current_cpu_type() == CPU_LOONGSON2)
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache_page_indexed =
loongson2_blast_icache32_page_indexed;
else
@@ -395,7 +399,7 @@ static void r4k_blast_icache_setup(void)
r4k_blast_icache = blast_r4600_v1_icache32;
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache = tx49_blast_icache32;
- else if (current_cpu_type() == CPU_LOONGSON2)
+ else if (current_cpu_type() == CPU_LOONGSON2EF)
r4k_blast_icache = loongson2_blast_icache32;
else
r4k_blast_icache = blast_icache32;
@@ -465,7 +469,7 @@ static void r4k_blast_scache_node_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache_node = (void *)cache_noop;
else if (sc_lsize == 16)
r4k_blast_scache_node = blast_scache16_node;
@@ -480,7 +484,7 @@ static void r4k_blast_scache_node_setup(void)
static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -497,7 +501,7 @@ static inline void local_r4k___flush_cache_all(void * args)
r4k_blast_scache();
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Use get_ebase_cpunum() for both NUMA=y/n */
r4k_blast_scache_node(get_ebase_cpunum() >> 2);
break;
@@ -650,6 +654,7 @@ static inline void local_r4k_flush_cache_page(void *args)
struct mm_struct *mm = vma->vm_mm;
int map_coherent = 0;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -664,7 +669,8 @@ static inline void local_r4k_flush_cache_page(void *args)
addr &= PAGE_MASK;
pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset(pmdp, addr);
@@ -770,7 +776,7 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
r4k_blast_icache();
else {
switch (boot_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
protected_loongson2_blast_icache_range(start, end);
break;
@@ -863,7 +869,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
@@ -904,7 +910,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size) {
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
r4k_blast_scache();
else
r4k_blast_scache_node(pa_to_nid(addr));
@@ -1224,7 +1230,7 @@ static void probe_pcache(void)
c->options |= MIPS_CPU_PREFETCH;
break;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
if (prid & 0x3)
@@ -1242,7 +1248,7 @@ static void probe_pcache(void)
c->dcache.waybit = 0;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
config1 = read_c0_config1();
lsize = (config1 >> 19) & 7;
if (lsize)
@@ -1267,7 +1273,8 @@ static void probe_pcache(void)
c->dcache.ways *
c->dcache.linesz;
c->dcache.waybit = 0;
- if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0))
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -1452,7 +1459,7 @@ static void probe_pcache(void)
c->dcache.flags &= ~MIPS_CACHE_ALIASES;
break;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
@@ -1478,7 +1485,7 @@ static void probe_vcache(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config2, lsize;
- if (current_cpu_type() != CPU_LOONGSON3)
+ if (current_cpu_type() != CPU_LOONGSON64)
return;
config2 = read_c0_config2();
@@ -1653,11 +1660,11 @@ static void setup_scache(void)
#endif
return;
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
loongson2_sc_init();
return;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
loongson3_sc_init();
return;
@@ -1926,7 +1933,7 @@ void r4k_cache_init(void)
/* Optimization: an L2 flush implicitly flushes the L1 */
current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Loongson-3 maintains cache coherency by hardware */
__flush_cache_all = cache_noop;
__flush_cache_vmap = cache_noop;
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index b7c8a9d79c35..686867270627 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -170,6 +170,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -183,7 +184,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
- pudp = pud_offset(pgdp, page);
+ p4dp = p4d_offset(pgdp, page);
+ pudp = pud_offset(p4dp, page);
pmdp = pmd_offset(pudp, page);
ptep = pte_offset(pmdp, page);
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 1d4d57dd9acf..dc42ffc83825 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -27,7 +27,7 @@
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
* SGI IP32 aka O2.
*/
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
+static inline bool cpu_needs_post_dma_flush(void)
{
switch (boot_cpu_type()) {
case CPU_R10000:
@@ -59,12 +59,6 @@ void *cached_kernel_address(void *addr)
return __va(addr) - UNCAC_BASE;
}
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
-}
-
static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir)
{
@@ -118,17 +112,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
} while (left);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
dma_sync_phys(paddr, size, dir);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- if (cpu_needs_post_dma_flush(dev))
+ if (cpu_needs_post_dma_flush())
dma_sync_phys(paddr, size, dir);
}
#endif
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index f589aa8f47d9..1e8d00793784 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -292,8 +292,9 @@ vmalloc_fault:
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
- int offset = __pgd_offset(address);
+ int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -305,8 +306,13 @@ vmalloc_fault:
goto no_context;
set_pgd(pgd, *pgd_k);
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto no_context;
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index cef152234312..77ffece9c270 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -25,11 +25,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
if (pud)
pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -40,14 +42,18 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud))
- pmd = pmd_offset(pud, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_present(*p4d)) {
+ pud = pud_offset(p4d, addr);
+ if (pud_present(*pud))
+ pmd = pmd_offset(pud, addr);
+ }
}
return (pte_t *) pmd;
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 090fa653dfa9..50f9ed8c6c1b 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -239,9 +239,9 @@ void __init fixrange_init(unsigned long start, unsigned long end,
unsigned long vaddr;
vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 1601d90b087b..8317f337a86e 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -78,11 +78,15 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
flush_cache_all();
BUG_ON(address >= end);
do {
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
error = -ENOMEM;
- pud = pud_alloc(&init_mm, dir, address);
+ p4d = p4d_alloc(&init_mm, dir, address);
+ if (!p4d)
+ break;
+ pud = pud_alloc(&init_mm, p4d, address);
if (!pud)
break;
pmd = pmd_alloc(&init_mm, pud, address);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 56e4f8bffd4c..c5578897a4fa 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -187,7 +187,7 @@ static void set_prefetch_parameters(void)
}
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
/* Loongson-3 only support the Pref_Load/Pref_Store. */
pref_bias_clear_store = 128;
pref_bias_copy_load = 128;
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 6416a531a4c3..37c7a01427d2 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -56,6 +56,7 @@ void __init pagetable_init(void)
pgd_t *pgd_base;
#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -81,8 +82,9 @@ void __init pagetable_init(void)
vaddr = PKMAP_BASE;
fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
- pud = pud_offset(pgd, vaddr);
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ p4d = p4d_offset(pgd, vaddr);
+ pud = pud_offset(p4d, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c13e46ced425..d7a9d5f211f0 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -35,10 +35,10 @@ extern void build_tlb_refill_handler(void);
static inline void flush_micro_tlb(void)
{
switch (current_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
write_c0_diag(LOONGSON_DIAG_ITLB);
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
break;
default:
@@ -295,6 +295,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -320,7 +321,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
- pudp = pud_offset(pgdp, address);
+ p4dp = p4d_offset(pgdp, address);
+ pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index e01cb33bfa1a..344e6e9ea43b 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -571,8 +571,8 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_BMIPS4350:
case CPU_BMIPS4380:
case CPU_BMIPS5000:
- case CPU_LOONGSON2:
- case CPU_LOONGSON3:
+ case CPU_LOONGSON2EF:
+ case CPU_LOONGSON64:
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
@@ -653,6 +653,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
int restore_scratch)
{
if (restore_scratch) {
+ /*
+ * Ensure the MFC0 below observes the value written to the
+ * KScratch register by the prior MTC0.
+ */
+ if (scratch_reg >= 0)
+ uasm_i_ehb(p);
+
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -667,12 +674,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
uasm_i_mtc0(p, 0, C0_PAGEMASK);
uasm_il_b(p, r, lid);
}
- if (scratch_reg >= 0) {
- uasm_i_ehb(p);
+ if (scratch_reg >= 0)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
- } else {
+ else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
- }
} else {
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
@@ -921,6 +926,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
}
if (mode != not_refill && check_for_high_segbits) {
uasm_l_large_segbits_fault(l, *p);
+
+ if (mode == refill_scratch && scratch_reg >= 0)
+ uasm_i_ehb(p);
+
/*
* We get here if we are an xsseg address, or if we are
* an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -939,12 +948,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_jr(p, ptr);
if (mode == refill_scratch) {
- if (scratch_reg >= 0) {
- uasm_i_ehb(p);
+ if (scratch_reg >= 0)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
- } else {
+ else
UASM_i_LW(p, 1, scratchpad_offset(0), 0);
- }
} else {
uasm_i_nop(p);
}
@@ -1370,7 +1377,7 @@ static void build_r4000_tlb_refill_handler(void)
switch (boot_cpu_type()) {
default:
if (sizeof(long) == 4) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
/* Loongson2 ebase is different than r4k, we have more space */
if ((p - tlb_handler) > 64)
panic("TLB refill handler space exceeded");