summaryrefslogtreecommitdiff
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c335
1 files changed, 133 insertions, 202 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 344e6e9ea43b..69ea54bdc0c3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -28,12 +28,13 @@
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/cache.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/cpu-type.h>
+#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/war.h>
+#include <asm/regdef.h>
#include <asm/uasm.h>
#include <asm/setup.h>
#include <asm/tlbex.h>
@@ -83,14 +84,18 @@ static inline int r4k_250MHZhwbug(void)
return 0;
}
+extern int sb1250_m3_workaround_needed(void);
+
static inline int __maybe_unused bcm1250_m3_war(void)
{
- return BCM1250_M3_WAR;
+ if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
+ return sb1250_m3_workaround_needed();
+ return 0;
}
static inline int __maybe_unused r10000_llsc_war(void)
{
- return R10000_LLSC_WAR;
+ return IS_ENABLED(CONFIG_WAR_R10000_LLSC);
}
static int use_bbit_insns(void)
@@ -250,7 +255,7 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
- pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+ pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}
@@ -273,27 +278,6 @@ static inline void dump_handler(const char *symbol, const void *start, const voi
pr_debug("\tEND(%s)\n", symbol);
}
-/* The only general purpose registers allowed in TLB handlers. */
-#define K0 26
-#define K1 27
-
-/* Some CP0 registers */
-#define C0_INDEX 0, 0
-#define C0_ENTRYLO0 2, 0
-#define C0_TCBIND 2, 2
-#define C0_ENTRYLO1 3, 0
-#define C0_CONTEXT 4, 0
-#define C0_PAGEMASK 5, 0
-#define C0_PWBASE 5, 5
-#define C0_PWFIELD 5, 6
-#define C0_PWSIZE 5, 7
-#define C0_PWCTL 6, 6
-#define C0_BADVADDR 8, 0
-#define C0_PGD 9, 7
-#define C0_ENTRYHI 10, 0
-#define C0_EPC 14, 0
-#define C0_XCONTEXT 20, 0
-
#ifdef CONFIG_64BIT
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
#else
@@ -321,13 +305,7 @@ static unsigned int kscratch_used_mask;
static inline int __maybe_unused c0_kscratch(void)
{
- switch (current_cpu_type()) {
- case CPU_XLP:
- case CPU_XLR:
- return 22;
- default:
- return 31;
- }
+ return 31;
}
static int allocate_kscratch(void)
@@ -359,30 +337,30 @@ static struct work_registers build_get_work_registers(u32 **p)
if (scratch_reg >= 0) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
- r.r1 = K0;
- r.r2 = K1;
- r.r3 = 1;
+ r.r1 = GPR_K0;
+ r.r2 = GPR_K1;
+ r.r3 = GPR_AT;
return r;
}
if (num_possible_cpus() > 1) {
/* Get smp_processor_id */
- UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
- UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
+ UASM_i_CPUID_MFC0(p, GPR_K0, SMP_CPUID_REG);
+ UASM_i_SRL_SAFE(p, GPR_K0, GPR_K0, SMP_CPUID_REGSHIFT);
- /* handler_reg_save index in K0 */
- UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
+ /* handler_reg_save index in GPR_K0 */
+ UASM_i_SLL(p, GPR_K0, GPR_K0, ilog2(sizeof(struct tlb_reg_save)));
- UASM_i_LA(p, K1, (long)&handler_reg_save);
- UASM_i_ADDU(p, K0, K0, K1);
+ UASM_i_LA(p, GPR_K1, (long)&handler_reg_save);
+ UASM_i_ADDU(p, GPR_K0, GPR_K0, GPR_K1);
} else {
- UASM_i_LA(p, K0, (long)&handler_reg_save);
+ UASM_i_LA(p, GPR_K0, (long)&handler_reg_save);
}
- /* K0 now points to save area, save $1 and $2 */
- UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
- UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+ /* GPR_K0 now points to save area, save $1 and $2 */
+ UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
+ UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
- r.r1 = K1;
+ r.r1 = GPR_K1;
r.r2 = 1;
r.r3 = 2;
return r;
@@ -395,9 +373,9 @@ static void build_restore_work_registers(u32 **p)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return;
}
- /* K0 already points to save area, restore $1 and $2 */
- UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
- UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+ /* GPR_K0 already points to save area, restore $1 and $2 */
+ UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
+ UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -416,22 +394,22 @@ static void build_r3000_tlb_refill_handler(void)
memset(tlb_handler, 0, sizeof(tlb_handler));
p = tlb_handler;
- uasm_i_mfc0(&p, K0, C0_BADVADDR);
- uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
- uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
- uasm_i_srl(&p, K0, K0, 22); /* load delay */
- uasm_i_sll(&p, K0, K0, 2);
- uasm_i_addu(&p, K1, K1, K0);
- uasm_i_mfc0(&p, K0, C0_CONTEXT);
- uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
- uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
- uasm_i_addu(&p, K1, K1, K0);
- uasm_i_lw(&p, K0, 0, K1);
+ uasm_i_mfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_lui(&p, GPR_K1, uasm_rel_hi(pgdc)); /* cp0 delay */
+ uasm_i_lw(&p, GPR_K1, uasm_rel_lo(pgdc), GPR_K1);
+ uasm_i_srl(&p, GPR_K0, GPR_K0, 22); /* load delay */
+ uasm_i_sll(&p, GPR_K0, GPR_K0, 2);
+ uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
+ uasm_i_mfc0(&p, GPR_K0, C0_CONTEXT);
+ uasm_i_lw(&p, GPR_K1, 0, GPR_K1); /* cp0 delay */
+ uasm_i_andi(&p, GPR_K0, GPR_K0, 0xffc); /* load delay */
+ uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
+ uasm_i_lw(&p, GPR_K0, 0, GPR_K1);
uasm_i_nop(&p); /* load delay */
- uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
- uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
+ uasm_i_mtc0(&p, GPR_K0, C0_ENTRYLO0);
+ uasm_i_mfc0(&p, GPR_K1, C0_EPC); /* cp0 delay */
uasm_i_tlbwr(&p); /* cp0 delay */
- uasm_i_jr(&p, K1);
+ uasm_i_jr(&p, GPR_K1);
uasm_i_rfe(&p); /* branch delay */
if (p > tlb_handler + 32)
@@ -545,10 +523,10 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
+ case CPU_R4300:
case CPU_5KC:
case CPU_TX49XX:
case CPU_PR4450:
- case CPU_XLR:
uasm_i_nop(p);
tlbw(p);
break;
@@ -576,7 +554,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
- /* fall through */
+ fallthrough;
case CPU_ALCHEMY:
tlbw(p);
break;
@@ -589,25 +567,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4181:
- case CPU_VR4181A:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- uasm_i_nop(p);
- uasm_i_nop(p);
- break;
-
- case CPU_VR4131:
- case CPU_VR4133:
- uasm_i_nop(p);
- uasm_i_nop(p);
- tlbw(p);
- break;
-
case CPU_XBURST:
tlbw(p);
uasm_i_nop(p);
@@ -629,7 +588,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
return;
}
- if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
+ if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
@@ -811,7 +770,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
if (check_for_high_segbits) {
/*
- * The kernel currently implicitely assumes that the
+ * The kernel currently implicitly assumes that the
* MIPS SEGBITS parameter for the processor is
* (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
* allocate virtual addresses outside the maximum
@@ -821,7 +780,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* everything but the lower xuseg addresses goes down
* the module_alloc/vmalloc path.
*/
- uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, ptr, label_vmalloc);
} else {
uasm_il_bltz(p, r, tmp, label_vmalloc);
@@ -844,8 +803,8 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
uasm_i_drotr(p, ptr, ptr, 11);
#elif defined(CONFIG_SMP)
UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
@@ -998,22 +957,6 @@ static void build_adjust_context(u32 **p, unsigned int ctx)
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
- switch (current_cpu_type()) {
- case CPU_VR41XX:
- case CPU_VR4111:
- case CPU_VR4121:
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4181:
- case CPU_VR4181A:
- case CPU_VR4133:
- shift += 2;
- break;
-
- default:
- break;
- }
-
if (shift)
UASM_i_SRL(p, ctx, ctx, shift);
uasm_i_andi(p, ctx, ctx, mask);
@@ -1130,7 +1073,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
uasm_i_dsrl_safe(p, scratch, tmp,
- PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, scratch, label_vmalloc);
if (pgd_reg == -1) {
@@ -1160,8 +1103,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
+
uasm_i_drotr(p, ptr, ptr, 11);
}
@@ -1313,11 +1257,11 @@ static void build_r4000_tlb_refill_handler(void)
memset(final_handler, 0, sizeof(final_handler));
if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
- htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
+ htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, GPR_K0, GPR_K1,
scratch_reg);
vmalloc_mode = refill_scratch;
} else {
- htlb_info.huge_pte = K0;
+ htlb_info.huge_pte = GPR_K0;
htlb_info.restore_scratch = 0;
htlb_info.need_reload_pte = true;
vmalloc_mode = refill_noscratch;
@@ -1327,29 +1271,29 @@ static void build_r4000_tlb_refill_handler(void)
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
- uasm_i_xor(&p, K0, K0, K1);
- uasm_i_dsrl_safe(&p, K1, K0, 62);
- uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
- uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
- uasm_i_or(&p, K0, K0, K1);
- uasm_il_bnez(&p, &r, K0, label_leave);
+ uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
+ uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
+ uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
+ uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_il_bnez(&p, &r, GPR_K0, label_leave);
/* No need for uasm_i_nop */
}
#ifdef CONFIG_64BIT
- build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
+ build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */
#else
- build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
+ build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
+ build_is_huge_pte(&p, &r, GPR_K0, GPR_K1, label_tlb_huge_update);
#endif
- build_get_ptep(&p, K0, K1);
- build_update_entries(&p, K0, K1);
+ build_get_ptep(&p, GPR_K0, GPR_K1);
+ build_update_entries(&p, GPR_K0, GPR_K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
@@ -1357,14 +1301,14 @@ static void build_r4000_tlb_refill_handler(void)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
if (htlb_info.need_reload_pte)
- UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
- build_huge_update_entries(&p, htlb_info.huge_pte, K1);
- build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ UASM_i_LW(&p, htlb_info.huge_pte, 0, GPR_K1);
+ build_huge_update_entries(&p, htlb_info.huge_pte, GPR_K1);
+ build_huge_tlb_write_entry(&p, &l, &r, GPR_K0, tlb_random,
htlb_info.restore_scratch);
#endif
#ifdef CONFIG_64BIT
- build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
+ build_get_pgd_vmalloc64(&p, &l, &r, GPR_K0, GPR_K1, vmalloc_mode);
#endif
/*
@@ -1377,6 +1321,7 @@ static void build_r4000_tlb_refill_handler(void)
switch (boot_cpu_type()) {
default:
if (sizeof(long) == 4) {
+ fallthrough;
case CPU_LOONGSON2EF:
/* Loongson2 ebase is different than r4k, we have more space */
if ((p - tlb_handler) > 64)
@@ -1480,6 +1425,7 @@ static void build_r4000_tlb_refill_handler(void)
static void setup_pw(void)
{
+ unsigned int pwctl;
unsigned long pgd_i, pgd_w;
#ifndef __PAGETABLE_PMD_FOLDED
unsigned long pmd_i, pmd_w;
@@ -1493,12 +1439,12 @@ static void setup_pw(void)
#endif
pgd_i = PGDIR_SHIFT; /* 1st level PGD */
#ifndef __PAGETABLE_PMD_FOLDED
- pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
pmd_i = PMD_SHIFT; /* 2nd level PMD */
pmd_w = PMD_SHIFT - PAGE_SHIFT;
#else
- pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
#endif
pt_i = PAGE_SHIFT; /* 3rd level PTE */
@@ -1506,6 +1452,7 @@ static void setup_pw(void)
pte_i = ilog2(_PAGE_GLOBAL);
pte_w = 0;
+ pwctl = 1 << 30; /* Set PWDirExt */
#ifndef __PAGETABLE_PMD_FOLDED
write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
@@ -1516,8 +1463,9 @@ static void setup_pw(void)
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
- write_c0_pwctl(1 << 6 | psn);
+ pwctl |= (1 << 6 | psn);
#endif
+ write_c0_pwctl(pwctl);
write_c0_kpgd((long)swapper_pg_dir);
kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
}
@@ -1533,34 +1481,35 @@ static void build_loongson3_tlb_refill_handler(void)
memset(tlb_handler, 0, sizeof(tlb_handler));
if (check_for_high_segbits) {
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
- uasm_il_beqz(&p, &r, K1, label_vmalloc);
+ uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0,
+ PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
+ uasm_il_beqz(&p, &r, GPR_K1, label_vmalloc);
uasm_i_nop(&p);
- uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
+ uasm_il_bgez(&p, &r, GPR_K0, label_large_segbits_fault);
uasm_i_nop(&p);
uasm_l_vmalloc(&l, p);
}
- uasm_i_dmfc0(&p, K1, C0_PGD);
+ uasm_i_dmfc0(&p, GPR_K1, C0_PGD);
- uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+ uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */
#ifndef __PAGETABLE_PMD_FOLDED
- uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+ uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */
#endif
- uasm_i_ldpte(&p, K1, 0); /* even */
- uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_ldpte(&p, GPR_K1, 0); /* even */
+ uasm_i_ldpte(&p, GPR_K1, 1); /* odd */
uasm_i_tlbwr(&p);
/* restore page mask */
if (PM_DEFAULT_MASK >> 16) {
- uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
- uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
- uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ uasm_i_lui(&p, GPR_K0, PM_DEFAULT_MASK >> 16);
+ uasm_i_ori(&p, GPR_K0, GPR_K0, PM_DEFAULT_MASK & 0xffff);
+ uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
} else if (PM_DEFAULT_MASK) {
- uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
- uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ uasm_i_ori(&p, GPR_K0, 0, PM_DEFAULT_MASK);
+ uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
} else {
uasm_i_mtc0(&p, 0, C0_PAGEMASK);
}
@@ -1569,8 +1518,8 @@ static void build_loongson3_tlb_refill_handler(void)
if (check_for_high_segbits) {
uasm_l_large_segbits_fault(&l, p);
- UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
- uasm_i_jr(&p, K1);
+ UASM_i_LA(&p, GPR_K1, (unsigned long)tlb_do_page_fault_0);
+ uasm_i_jr(&p, GPR_K1);
uasm_i_nop(&p);
}
@@ -1748,7 +1697,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
/*
* Check if PTE is present, if not then jump to LABEL. PTR points to
* the page table where this PTE is located, PTE will be re-loaded
- * with it's original value.
+ * with its original value.
*/
static void
build_pte_present(u32 **p, struct uasm_reloc **r,
@@ -1936,11 +1885,11 @@ static void build_r3000_tlb_load_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
+ build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
+ build_pte_present(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbl);
uasm_i_nop(&p); /* load delay */
- build_make_valid(&p, &r, K0, K1, -1);
- build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+ build_make_valid(&p, &r, GPR_K0, GPR_K1, -1);
+ build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
uasm_l_nopage_tlbl(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
@@ -1966,11 +1915,11 @@ static void build_r3000_tlb_store_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
+ build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
+ build_pte_writable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbs);
uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1, -1);
- build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
+ build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
+ build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
uasm_l_nopage_tlbs(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -1996,11 +1945,11 @@ static void build_r3000_tlb_modify_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
+ build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
+ build_pte_modifiable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1, -1);
- build_r3000_pte_reload_tlbwi(&p, K0, K1);
+ build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
+ build_r3000_pte_reload_tlbwi(&p, GPR_K0, GPR_K1);
uasm_l_nopage_tlbm(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -2063,7 +2012,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
UASM_i_LW(p, wr.r2, 0, wr.r2);
- UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
@@ -2116,14 +2065,14 @@ static void build_r4000_tlb_load_handler(void)
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
- uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
- uasm_i_xor(&p, K0, K0, K1);
- uasm_i_dsrl_safe(&p, K1, K0, 62);
- uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
- uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
- uasm_i_or(&p, K0, K0, K1);
- uasm_il_bnez(&p, &r, K0, label_leave);
+ uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
+ uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
+ uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
+ uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
+ uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
+ uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
+ uasm_il_bnez(&p, &r, GPR_K0, label_leave);
/* No need for uasm_i_nop */
}
@@ -2156,17 +2105,8 @@ static void build_r4000_tlb_load_handler(void)
uasm_i_tlbr(&p);
- switch (current_cpu_type()) {
- default:
- if (cpu_has_mips_r2_exec_hazard) {
- uasm_i_ehb(&p);
-
- case CPU_CAVIUM_OCTEON:
- case CPU_CAVIUM_OCTEON_PLUS:
- case CPU_CAVIUM_OCTEON2:
- break;
- }
- }
+ if (cpu_has_mips_r2_exec_hazard)
+ uasm_i_ehb(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
@@ -2231,17 +2171,8 @@ static void build_r4000_tlb_load_handler(void)
uasm_i_tlbr(&p);
- switch (current_cpu_type()) {
- default:
- if (cpu_has_mips_r2_exec_hazard) {
- uasm_i_ehb(&p);
-
- case CPU_CAVIUM_OCTEON:
- case CPU_CAVIUM_OCTEON_PLUS:
- case CPU_CAVIUM_OCTEON2:
- break;
- }
- }
+ if (cpu_has_mips_r2_exec_hazard)
+ uasm_i_ehb(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
@@ -2284,9 +2215,9 @@ static void build_r4000_tlb_load_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) {
- uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
- uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
- uasm_i_jr(&p, K0);
+ uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+ uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+ uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
@@ -2340,9 +2271,9 @@ static void build_r4000_tlb_store_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
- uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
- uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
- uasm_i_jr(&p, K0);
+ uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -2397,9 +2328,9 @@ static void build_r4000_tlb_modify_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
- uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
- uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
- uasm_i_jr(&p, K0);
+ uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+ uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+ uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -2565,7 +2496,7 @@ static void check_pabits(void)
unsigned long entry;
unsigned pabits, fillbits;
- if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
+ if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
/*
* We'll only be making use of the fact that we can rotate bits
* into the fill if the CPU supports RIXI, so don't bother
@@ -2611,7 +2542,7 @@ void build_tlb_refill_handler(void)
check_pabits();
#ifdef CONFIG_64BIT
- check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
#endif
if (cpu_has_3kex) {