summaryrefslogtreecommitdiff
path: root/arch/mips/mm/tlb-r4k.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/tlb-r4k.c')
-rw-r--r--arch/mips/mm/tlb-r4k.c197
1 files changed, 158 insertions, 39 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 0596505770db..44a662536148 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -12,20 +12,21 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
+#include <linux/sort.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
+#include <asm/tlbex.h>
#include <asm/tlbmisc.h>
-
-extern void build_tlb_refill_handler(void);
+#include <asm/setup.h>
/*
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
@@ -35,10 +36,10 @@ extern void build_tlb_refill_handler(void);
static inline void flush_micro_tlb(void)
{
switch (current_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
write_c0_diag(LOONGSON_DIAG_ITLB);
break;
- case CPU_LOONGSON3:
+ case CPU_LOONGSON64:
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
break;
default:
@@ -104,23 +105,6 @@ void local_flush_tlb_all(void)
}
EXPORT_SYMBOL(local_flush_tlb_all);
-/* All entries common to a mm share an asid. To effectively flush
- these entries, we just bump the asid. */
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu;
-
- preempt_disable();
-
- cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
- drop_mmu_context(mm, cpu);
- }
-
- preempt_enable();
-}
-
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
@@ -137,14 +121,23 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (size <= (current_cpu_data.tlbsizeftlbsets ?
current_cpu_data.tlbsize / 8 :
current_cpu_data.tlbsize / 2)) {
- int oldpid = read_c0_entryhi();
+ unsigned long old_entryhi, old_mmid;
int newpid = cpu_asid(cpu, mm);
+ old_entryhi = read_c0_entryhi();
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(newpid);
+ }
+
htw_stop();
while (start < end) {
int idx;
- write_c0_entryhi(start | newpid);
+ if (cpu_has_mmid)
+ write_c0_entryhi(start);
+ else
+ write_c0_entryhi(start | newpid);
start += (PAGE_SIZE << 1);
mtc0_tlbw_hazard();
tlb_probe();
@@ -160,10 +153,12 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
tlb_write_indexed();
}
tlbw_use_hazard();
- write_c0_entryhi(oldpid);
+ write_c0_entryhi(old_entryhi);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
htw_start();
} else {
- drop_mmu_context(mm, cpu);
+ drop_mmu_context(mm);
}
flush_micro_tlb();
local_irq_restore(flags);
@@ -220,15 +215,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
- unsigned long flags;
- int oldpid, newpid, idx;
+ unsigned long old_mmid;
+ unsigned long flags, old_entryhi;
+ int idx;
- newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1);
local_irq_save(flags);
- oldpid = read_c0_entryhi();
+ old_entryhi = read_c0_entryhi();
htw_stop();
- write_c0_entryhi(page | newpid);
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_entryhi(page);
+ write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
+ } else {
+ write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
+ }
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
@@ -244,7 +245,9 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
tlbw_use_hazard();
finish:
- write_c0_entryhi(oldpid);
+ write_c0_entryhi(old_entryhi);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
@@ -293,13 +296,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
- pte_t *ptep;
+ pte_t *ptep, *ptemap = NULL;
int idx, pid;
/*
- * Handle debugger faulting in for debugee.
+ * Handle debugger faulting in for debuggee.
*/
if (current->active_mm != vma->vm_mm)
return;
@@ -307,19 +311,24 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_save(flags);
htw_stop();
- pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
address &= (PAGE_MASK << 1);
- write_c0_entryhi(address | pid);
+ if (cpu_has_mmid) {
+ write_c0_entryhi(address);
+ } else {
+ pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
+ write_c0_entryhi(address | pid);
+ }
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
- pudp = pud_offset(pgdp, address);
+ p4dp = p4d_offset(pgdp, address);
+ pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
- if (pmd_huge(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
ptep = (pte_t *)pmdp;
@@ -337,7 +346,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
} else
#endif
{
- ptep = pte_offset_map(pmdp, address);
+ ptemap = ptep = pte_offset_map(pmdp, address);
+ /*
+ * update_mmu_cache() is called between pte_offset_map_lock()
+ * and pte_unmap_unlock(), so we can assume that ptep is not
+ * NULL here: and what should be done below if it were NULL?
+ */
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
@@ -366,6 +380,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlbw_use_hazard();
htw_start();
flush_micro_tlb_vm(vma);
+
+ if (ptemap)
+ pte_unmap(ptemap);
local_irq_restore(flags);
}
@@ -375,12 +392,17 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#ifdef CONFIG_XPA
panic("Broken for XPA kernels");
#else
+ unsigned int old_mmid;
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
unsigned long old_ctx;
local_irq_save(flags);
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
+ }
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
@@ -398,6 +420,8 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
tlbw_use_hazard(); /* What is the hazard here? */
htw_start();
write_c0_pagemask(old_pagemask);
@@ -424,6 +448,7 @@ int has_transparent_hugepage(void)
}
return mask == PM_HUGE_MASK;
}
+EXPORT_SYMBOL(has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -435,6 +460,7 @@ int has_transparent_hugepage(void)
int temp_tlb_entry;
+#ifndef CONFIG_64BIT
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
@@ -473,6 +499,7 @@ out:
local_irq_restore(flags);
return ret;
}
+#endif
static int ntlb;
static int __init set_ntlb(char *str)
@@ -483,6 +510,97 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb);
+
+/* Comparison function for EntryHi VPN fields. */
+static int r4k_vpn_cmp(const void *a, const void *b)
+{
+ long v = *(unsigned long *)a - *(unsigned long *)b;
+ int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+ return s ? (v != 0) | v >> s : v;
+}
+
+/*
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+static void __ref r4k_tlb_uniquify(void)
+{
+ int tlbsize = current_cpu_data.tlbsize;
+ bool use_slab = slab_is_available();
+ int start = num_wired_entries();
+ phys_addr_t tlb_vpn_size;
+ unsigned long *tlb_vpns;
+ unsigned long vpn_mask;
+ int cnt, ent, idx, i;
+
+ vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+ vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+
+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+ tlb_vpns = (use_slab ?
+ kmalloc(tlb_vpn_size, GFP_KERNEL) :
+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+ if (WARN_ON(!tlb_vpns))
+ return; /* Pray local_flush_tlb_all() is good enough. */
+
+ htw_stop();
+
+ for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+ unsigned long vpn;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+ vpn = read_c0_entryhi();
+ vpn &= vpn_mask & PAGE_MASK;
+ tlb_vpns[cnt] = vpn;
+
+ /* Prevent any large pages from overlapping regular ones. */
+ write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+
+ sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
+
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+ idx = 0;
+ ent = tlbsize;
+ for (i = start; i < tlbsize; i++)
+ while (1) {
+ unsigned long entryhi, vpn;
+
+ entryhi = UNIQUE_ENTRYHI(ent);
+ vpn = entryhi & vpn_mask & PAGE_MASK;
+
+ if (idx >= cnt || vpn < tlb_vpns[idx]) {
+ write_c0_entryhi(entryhi);
+ write_c0_index(i);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ ent++;
+ break;
+ } else if (vpn == tlb_vpns[idx]) {
+ ent++;
+ } else {
+ idx++;
+ }
+ }
+
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+ if (use_slab)
+ kfree(tlb_vpns);
+ else
+ memblock_free(tlb_vpns, tlb_vpn_size);
+}
+
/*
* Configure TLB (for init or after a CPU has been powered off).
*/
@@ -522,6 +640,7 @@ static void r4k_tlb_configure(void)
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
+ r4k_tlb_uniquify();
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */