summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@mips.com>2019-02-02 01:43:28 +0000
committerPaul Burton <paul.burton@mips.com>2019-02-04 10:56:41 -0800
commitc8790d657b0a8d42801fb4536f6f106b4b6306e8 (patch)
tree3452c36f3e620b6f151b35f6d23f217aaa1154ba /arch/mips/include/asm/mmu_context.h
parent535113896e802e9f8f92c05a887d1761c34ae903 (diff)
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
Diffstat (limited to 'arch/mips/include/asm/mmu_context.h')
-rw-r--r--arch/mips/include/asm/mmu_context.h54
1 files changed, 50 insertions, 4 deletions
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index a0f29df8ced8..cddead91acd4 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -17,8 +17,10 @@
#include <linux/smp.h>
#include <linux/slab.h>
+#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/dsemul.h>
+#include <asm/ginvt.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
@@ -73,6 +75,19 @@ extern unsigned long pgd_current[];
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
/*
+ * The ginvt instruction will invalidate wired entries when its type field
+ * targets anything other than the entire TLB. That means that if we were to
+ * allow the kernel to create wired entries with the MMID of current->active_mm
+ * then those wired entries could be invalidated when we later use ginvt to
+ * invalidate TLB entries with that MMID.
+ *
+ * In order to prevent ginvt from trashing wired entries, we reserve one MMID
+ * for use by the kernel when creating wired entries. This MMID will never be
+ * assigned to a struct mm, and we'll never target it with a ginvt instruction.
+ */
+#define MMID_KERNEL_WIRED 0
+
+/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
@@ -90,13 +105,19 @@ static inline u64 asid_first_version(unsigned int cpu)
static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
{
+ if (cpu_has_mmid)
+ return atomic64_read(&mm->context.mmid);
+
return mm->context.asid[cpu];
}
static inline void set_cpu_context(unsigned int cpu,
struct mm_struct *mm, u64 ctx)
{
- mm->context.asid[cpu] = ctx;
+ if (cpu_has_mmid)
+ atomic64_set(&mm->context.mmid, ctx);
+ else
+ mm->context.asid[cpu] = ctx;
}
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
@@ -120,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
- for_each_possible_cpu(i)
- set_cpu_context(i, mm, 0);
+ if (cpu_has_mmid) {
+ set_cpu_context(0, mm, 0);
+ } else {
+ for_each_possible_cpu(i)
+ set_cpu_context(i, mm, 0);
+ }
mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock);
@@ -168,12 +193,33 @@ drop_mmu_context(struct mm_struct *mm)
{
unsigned long flags;
unsigned int cpu;
+ u32 old_mmid;
+ u64 ctx;
local_irq_save(flags);
cpu = smp_processor_id();
- if (!cpu_context(cpu, mm)) {
+ ctx = cpu_context(cpu, mm);
+
+ if (!ctx) {
/* no-op */
+ } else if (cpu_has_mmid) {
+ /*
+ * Globally invalidating TLB entries associated with the MMID
+ * is pretty cheap using the GINVT instruction, so we'll do
+ * that rather than incur the overhead of allocating a new
+ * MMID. The latter would be especially difficult since MMIDs
+ * are global & other CPUs may be actively using ctx.
+ */
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
+ mtc0_tlbw_hazard();
+ ginvt_mmid();
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
} else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
/*
* mm is currently active, so we can't really drop it.