diff options
Diffstat (limited to 'arch/mips/mm/tlb-r4k.c')
| -rw-r--r-- | arch/mips/mm/tlb-r4k.c | 498 |
1 files changed, 370 insertions, 128 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index c643de4c473a..44a662536148 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -8,112 +8,103 @@ * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. */ +#include <linux/cpu_pm.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/smp.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/hugetlb.h> -#include <linux/module.h> +#include <linux/export.h> +#include <linux/sort.h> #include <asm/cpu.h> +#include <asm/cpu-type.h> #include <asm/bootinfo.h> +#include <asm/hazards.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> +#include <asm/tlb.h> +#include <asm/tlbex.h> #include <asm/tlbmisc.h> - -extern void build_tlb_refill_handler(void); +#include <asm/setup.h> /* - * Make sure all entries differ. If they're not different - * MIPS32 will take revenge ... + * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has + * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately, + * itlb/dtlb are not totally transparent to software. */ -#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) - -/* Atomicity and interruptability */ -#ifdef CONFIG_MIPS_MT_SMTC - -#include <asm/smtc.h> -#include <asm/mipsmtregs.h> - -#define ENTER_CRITICAL(flags) \ - { \ - unsigned int mvpflags; \ - local_irq_save(flags);\ - mvpflags = dvpe() -#define EXIT_CRITICAL(flags) \ - evpe(mvpflags); \ - local_irq_restore(flags); \ +static inline void flush_micro_tlb(void) +{ + switch (current_cpu_type()) { + case CPU_LOONGSON2EF: + write_c0_diag(LOONGSON_DIAG_ITLB); + break; + case CPU_LOONGSON64: + write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); + break; + default: + break; } -#else - -#define ENTER_CRITICAL(flags) local_irq_save(flags) -#define EXIT_CRITICAL(flags) local_irq_restore(flags) - -#endif /* CONFIG_MIPS_MT_SMTC */ - -#if defined(CONFIG_CPU_LOONGSON2) -/* - * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, - * unfortrunately, itlb is not totally transparent to software. - */ -#define FLUSH_ITLB write_c0_diag(4); - -#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); } - -#else - -#define FLUSH_ITLB -#define FLUSH_ITLB_VM(vma) +} -#endif +static inline void flush_micro_tlb_vm(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_EXEC) + flush_micro_tlb(); +} void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; - int entry; + int entry, ftlbhighset; - ENTER_CRITICAL(flags); + local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); + htw_stop(); write_c0_entrylo0(0); write_c0_entrylo1(0); - entry = read_c0_wired(); + entry = num_wired_entries(); - /* Blast 'em all away. */ - while (entry < current_cpu_data.tlbsize) { - /* Make sure all entries differ. */ - write_c0_entryhi(UNIQUE_ENTRYHI(entry)); - write_c0_index(entry); - mtc0_tlbw_hazard(); - tlb_write_indexed(); - entry++; + /* + * Blast 'em all away. + * If there are any wired entries, fall back to iterating + */ + if (cpu_has_tlbinv && !entry) { + if (current_cpu_data.tlbsizevtlb) { + write_c0_index(0); + mtc0_tlbw_hazard(); + tlbinvf(); /* invalidate VTLB */ + } + ftlbhighset = current_cpu_data.tlbsizevtlb + + current_cpu_data.tlbsizeftlbsets; + for (entry = current_cpu_data.tlbsizevtlb; + entry < ftlbhighset; + entry++) { + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlbinvf(); /* invalidate one FTLB set */ + } + } else { + while (entry < current_cpu_data.tlbsize) { + /* Make sure all entries differ. */ + write_c0_entryhi(UNIQUE_ENTRYHI(entry)); + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + entry++; + } } tlbw_use_hazard(); write_c0_entryhi(old_ctx); - FLUSH_ITLB; - EXIT_CRITICAL(flags); + htw_start(); + flush_micro_tlb(); + local_irq_restore(flags); } EXPORT_SYMBOL(local_flush_tlb_all); -/* All entries common to a mm share an asid. To effectively flush - these entries, we just bump the asid. */ -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu; - - preempt_disable(); - - cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { - drop_mmu_context(mm, cpu); - } - - preempt_enable(); -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -123,18 +114,30 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; - ENTER_CRITICAL(flags); + local_irq_save(flags); start = round_down(start, PAGE_SIZE << 1); end = round_up(end, PAGE_SIZE << 1); size = (end - start) >> (PAGE_SHIFT + 1); - if (size <= current_cpu_data.tlbsize/2) { - int oldpid = read_c0_entryhi(); + if (size <= (current_cpu_data.tlbsizeftlbsets ? + current_cpu_data.tlbsize / 8 : + current_cpu_data.tlbsize / 2)) { + unsigned long old_entryhi, old_mmid; int newpid = cpu_asid(cpu, mm); + old_entryhi = read_c0_entryhi(); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(newpid); + } + + htw_stop(); while (start < end) { int idx; - write_c0_entryhi(start | newpid); + if (cpu_has_mmid) + write_c0_entryhi(start); + else + write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); @@ -150,12 +153,15 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, tlb_write_indexed(); } tlbw_use_hazard(); - write_c0_entryhi(oldpid); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); + htw_start(); } else { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } - FLUSH_ITLB; - EXIT_CRITICAL(flags); + flush_micro_tlb(); + local_irq_restore(flags); } } @@ -163,15 +169,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long size, flags; - ENTER_CRITICAL(flags); + local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; - if (size <= current_cpu_data.tlbsize / 2) { + if (size <= (current_cpu_data.tlbsizeftlbsets ? + current_cpu_data.tlbsize / 8 : + current_cpu_data.tlbsize / 2)) { int pid = read_c0_entryhi(); start &= (PAGE_MASK << 1); end += ((PAGE_SIZE << 1) - 1); end &= (PAGE_MASK << 1); + htw_stop(); while (start < end) { int idx; @@ -193,11 +202,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) } tlbw_use_hazard(); write_c0_entryhi(pid); + htw_start(); } else { local_flush_tlb_all(); } - FLUSH_ITLB; - EXIT_CRITICAL(flags); + flush_micro_tlb(); + local_irq_restore(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) @@ -205,14 +215,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) int cpu = smp_processor_id(); if (cpu_context(cpu, vma->vm_mm) != 0) { - unsigned long flags; - int oldpid, newpid, idx; + unsigned long old_mmid; + unsigned long flags, old_entryhi; + int idx; - newpid = cpu_asid(cpu, vma->vm_mm); page &= (PAGE_MASK << 1); - ENTER_CRITICAL(flags); - oldpid = read_c0_entryhi(); - write_c0_entryhi(page | newpid); + local_irq_save(flags); + old_entryhi = read_c0_entryhi(); + htw_stop(); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_entryhi(page); + write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); + } else { + write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); + } mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); @@ -228,9 +245,12 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) tlbw_use_hazard(); finish: - write_c0_entryhi(oldpid); - FLUSH_ITLB_VM(vma); - EXIT_CRITICAL(flags); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); + htw_start(); + flush_micro_tlb_vm(vma); + local_irq_restore(flags); } } @@ -243,8 +263,9 @@ void local_flush_tlb_one(unsigned long page) unsigned long flags; int oldpid, idx; - ENTER_CRITICAL(flags); + local_irq_save(flags); oldpid = read_c0_entryhi(); + htw_stop(); page &= (PAGE_MASK << 1); write_c0_entryhi(page); mtc0_tlbw_hazard(); @@ -261,8 +282,9 @@ void local_flush_tlb_one(unsigned long page) tlbw_use_hazard(); } write_c0_entryhi(oldpid); - FLUSH_ITLB; - EXIT_CRITICAL(flags); + htw_start(); + flush_micro_tlb(); + local_irq_restore(flags); } /* @@ -274,32 +296,39 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) { unsigned long flags; pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; - pte_t *ptep; + pte_t *ptep, *ptemap = NULL; int idx, pid; /* - * Handle debugger faulting in for debugee. + * Handle debugger faulting in for debuggee. */ if (current->active_mm != vma->vm_mm) return; - ENTER_CRITICAL(flags); + local_irq_save(flags); - pid = read_c0_entryhi() & ASID_MASK; + htw_stop(); address &= (PAGE_MASK << 1); - write_c0_entryhi(address | pid); + if (cpu_has_mmid) { + write_c0_entryhi(address); + } else { + pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); + write_c0_entryhi(address | pid); + } pgdp = pgd_offset(vma->vm_mm, address); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); - pudp = pud_offset(pgdp, address); + p4dp = p4d_offset(pgdp, address); + pudp = pud_offset(p4dp, address); pmdp = pmd_offset(pudp, address); idx = read_c0_index(); #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* this could be a huge page */ - if (pmd_huge(*pmdp)) { + if (pmd_leaf(*pmdp)) { unsigned long lo; write_c0_pagemask(PM_HUGE_MASK); ptep = (pte_t *)pmdp; @@ -317,12 +346,27 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) } else #endif { - ptep = pte_offset_map(pmdp, address); + ptemap = ptep = pte_offset_map(pmdp, address); + /* + * update_mmu_cache() is called between pte_offset_map_lock() + * and pte_unmap_unlock(), so we can assume that ptep is not + * NULL here: and what should be done below if it were NULL? + */ -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) +#ifdef CONFIG_XPA + write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); + if (cpu_has_xpa) + writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); + ptep++; + write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); + if (cpu_has_xpa) + writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); +#else write_c0_entrylo0(ptep->pte_high); ptep++; write_c0_entrylo1(ptep->pte_high); +#endif #else write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++))); write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep))); @@ -334,23 +378,36 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlb_write_indexed(); } tlbw_use_hazard(); - FLUSH_ITLB_VM(vma); - EXIT_CRITICAL(flags); + htw_start(); + flush_micro_tlb_vm(vma); + + if (ptemap) + pte_unmap(ptemap); + local_irq_restore(flags); } void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { +#ifdef CONFIG_XPA + panic("Broken for XPA kernels"); +#else + unsigned int old_mmid; unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; - ENTER_CRITICAL(flags); + local_irq_save(flags); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(MMID_KERNEL_WIRED); + } /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); + htw_stop(); old_pagemask = read_c0_pagemask(); - wired = read_c0_wired(); + wired = num_wired_entries(); write_c0_wired(wired + 1); write_c0_index(wired); tlbw_use_hazard(); /* What is the hazard here? */ @@ -363,33 +420,88 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, tlbw_use_hazard(); write_c0_entryhi(old_ctx); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); tlbw_use_hazard(); /* What is the hazard here? */ + htw_start(); write_c0_pagemask(old_pagemask); local_flush_tlb_all(); - EXIT_CRITICAL(flags); + local_irq_restore(flags); +#endif } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -int __init has_transparent_hugepage(void) +int has_transparent_hugepage(void) { - unsigned int mask; - unsigned long flags; + static unsigned int mask = -1; - ENTER_CRITICAL(flags); - write_c0_pagemask(PM_HUGE_MASK); - back_to_back_c0_hazard(); - mask = read_c0_pagemask(); - write_c0_pagemask(PM_DEFAULT_MASK); - - EXIT_CRITICAL(flags); + if (mask == -1) { /* first call comes during __init */ + unsigned long flags; + local_irq_save(flags); + write_c0_pagemask(PM_HUGE_MASK); + back_to_back_c0_hazard(); + mask = read_c0_pagemask(); + write_c0_pagemask(PM_DEFAULT_MASK); + local_irq_restore(flags); + } return mask == PM_HUGE_MASK; } +EXPORT_SYMBOL(has_transparent_hugepage); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static int __cpuinitdata ntlb; +/* + * Used for loading TLB entries before trap_init() has started, when we + * don't actually want to add a wired entry which remains throughout the + * lifetime of the system + */ + +int temp_tlb_entry; + +#ifndef CONFIG_64BIT +__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, + unsigned long entryhi, unsigned long pagemask) +{ + int ret = 0; + unsigned long flags; + unsigned long wired; + unsigned long old_pagemask; + unsigned long old_ctx; + + local_irq_save(flags); + /* Save old context and create impossible VPN2 value */ + htw_stop(); + old_ctx = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + wired = num_wired_entries(); + if (--temp_tlb_entry < wired) { + printk(KERN_WARNING + "No TLB space left for add_temporary_entry\n"); + ret = -ENOSPC; + goto out; + } + + write_c0_index(temp_tlb_entry); + write_c0_pagemask(pagemask); + write_c0_entryhi(entryhi); + write_c0_entrylo0(entrylo0); + write_c0_entrylo1(entrylo1); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + tlbw_use_hazard(); + + write_c0_entryhi(old_ctx); + write_c0_pagemask(old_pagemask); + htw_start(); +out: + local_irq_restore(flags); + return ret; +} +#endif + +static int ntlb; static int __init set_ntlb(char *str) { get_option(&str, &ntlb); @@ -398,7 +510,101 @@ static int __init set_ntlb(char *str) __setup("ntlb=", set_ntlb); -void __cpuinit tlb_init(void) + +/* Comparison function for EntryHi VPN fields. */ +static int r4k_vpn_cmp(const void *a, const void *b) +{ + long v = *(unsigned long *)a - *(unsigned long *)b; + int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0; + return s ? (v != 0) | v >> s : v; +} + +/* + * Initialise all TLB entries with unique values that do not clash with + * what we have been handed over and what we'll be using ourselves. + */ +static void __ref r4k_tlb_uniquify(void) +{ + int tlbsize = current_cpu_data.tlbsize; + bool use_slab = slab_is_available(); + int start = num_wired_entries(); + phys_addr_t tlb_vpn_size; + unsigned long *tlb_vpns; + unsigned long vpn_mask; + int cnt, ent, idx, i; + + vpn_mask = GENMASK(cpu_vmbits - 1, 13); + vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; + + tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); + tlb_vpns = (use_slab ? + kmalloc(tlb_vpn_size, GFP_KERNEL) : + memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns))); + if (WARN_ON(!tlb_vpns)) + return; /* Pray local_flush_tlb_all() is good enough. */ + + htw_stop(); + + for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { + unsigned long vpn; + + write_c0_index(i); + mtc0_tlbr_hazard(); + tlb_read(); + tlb_read_hazard(); + vpn = read_c0_entryhi(); + vpn &= vpn_mask & PAGE_MASK; + tlb_vpns[cnt] = vpn; + + /* Prevent any large pages from overlapping regular ones. */ + write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + tlbw_use_hazard(); + } + + sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL); + + write_c0_pagemask(PM_DEFAULT_MASK); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + + idx = 0; + ent = tlbsize; + for (i = start; i < tlbsize; i++) + while (1) { + unsigned long entryhi, vpn; + + entryhi = UNIQUE_ENTRYHI(ent); + vpn = entryhi & vpn_mask & PAGE_MASK; + + if (idx >= cnt || vpn < tlb_vpns[idx]) { + write_c0_entryhi(entryhi); + write_c0_index(i); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + ent++; + break; + } else if (vpn == tlb_vpns[idx]) { + ent++; + } else { + idx++; + } + } + + tlbw_use_hazard(); + htw_start(); + flush_micro_tlb(); + if (use_slab) + kfree(tlb_vpns); + else + memblock_free(tlb_vpns, tlb_vpn_size); +} + +/* + * Configure TLB (for init or after a CPU has been powered off). + */ +static void r4k_tlb_configure(void) { /* * You should never change this register: @@ -408,28 +614,41 @@ void __cpuinit tlb_init(void) * be set to fixed-size pages. */ write_c0_pagemask(PM_DEFAULT_MASK); + back_to_back_c0_hazard(); + if (read_c0_pagemask() != PM_DEFAULT_MASK) + panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE); + write_c0_wired(0); if (current_cpu_type() == CPU_R10000 || current_cpu_type() == CPU_R12000 || - current_cpu_type() == CPU_R14000) + current_cpu_type() == CPU_R14000 || + current_cpu_type() == CPU_R16000) write_c0_framemask(0); if (cpu_has_rixi) { /* - * Enable the no read, no exec bits, and enable large virtual + * Enable the no read, no exec bits, and enable large physical * address. */ - u32 pg = PG_RIE | PG_XIE; #ifdef CONFIG_64BIT - pg |= PG_ELPA; + set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); +#else + set_c0_pagegrain(PG_RIE | PG_XIE); #endif - write_c0_pagegrain(pg); } + temp_tlb_entry = current_cpu_data.tlbsize - 1; + /* From this point on the ARC firmware is dead. */ + r4k_tlb_uniquify(); local_flush_tlb_all(); /* Did I tell you that ARC SUCKS? */ +} + +void tlb_init(void) +{ + r4k_tlb_configure(); if (ntlb) { if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { @@ -443,3 +662,26 @@ void __cpuinit tlb_init(void) build_tlb_refill_handler(); } + +static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + r4k_tlb_configure(); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block r4k_tlb_pm_notifier_block = { + .notifier_call = r4k_tlb_pm_notifier, +}; + +static int __init r4k_tlb_init_pm(void) +{ + return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); +} +arch_initcall(r4k_tlb_init_pm); |
