summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/context.c')
-rw-r--r--arch/arm64/mm/context.c221
1 files changed, 188 insertions, 33 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b5e329fde2dd..188197590fc9 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -6,6 +6,7 @@
* Copyright (C) 2012 ARM Ltd.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -26,35 +27,33 @@ static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+static unsigned long max_pinned_asids;
+static unsigned long nr_pinned_asids;
+static unsigned long *pinned_asid_map;
+
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
-#else
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
-#define asid2idx(asid) ((asid) & ~ASID_MASK)
-#define idx2asid(idx) asid2idx(idx)
-#endif
+#define NUM_USER_ASIDS ASID_FIRST_VERSION
+#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
+#define asid2ctxid(asid, genid) ((asid) | (genid))
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
{
u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
- ID_AA64MMFR0_ASID_SHIFT);
+ ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) {
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
- /* Fallthrough */
- case 0:
+ fallthrough;
+ case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8;
break;
- case 2:
+ case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16;
}
@@ -77,13 +76,38 @@ void verify_cpu_asid_bits(void)
}
}
+static void set_kpti_asid_bits(unsigned long *map)
+{
+ unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
+ /*
+ * In case of KPTI kernel/user ASIDs are allocated in
+ * pairs, the bottom bit distinguishes the two: if it
+ * is set, then the ASID will map only userspace. Thus
+ * mark even as reserved for kernel.
+ */
+ memset(map, 0xaa, len);
+}
+
+static void set_reserved_asid_bits(void)
+{
+ if (pinned_asid_map)
+ bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
+ else if (arm64_kernel_unmapped_at_el0())
+ set_kpti_asid_bits(asid_map);
+ else
+ bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+}
+
+#define asid_gen_match(asid) \
+ (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
+
static void flush_context(void)
{
int i;
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+ set_reserved_asid_bits();
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
@@ -96,7 +120,7 @@ static void flush_context(void)
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
- __set_bit(asid2idx(asid), asid_map);
+ __set_bit(ctxid2asid(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
@@ -138,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
- u64 newasid = generation | (asid & ~ASID_MASK);
+ u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
/*
* If our current ASID was active during a rollover, we
@@ -148,10 +172,18 @@ static u64 new_context(struct mm_struct *mm)
return newasid;
/*
+ * If it is pinned, we can keep using it. Note that reserved
+ * takes priority, because even if it is also pinned, we need to
+ * update the generation into the reserved_asids.
+ */
+ if (refcount_read(&mm->context.pinned))
+ return newasid;
+
+ /*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
- if (!__test_and_set_bit(asid2idx(asid), asid_map))
+ if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
return newasid;
}
@@ -177,12 +209,13 @@ static u64 new_context(struct mm_struct *mm)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return idx2asid(asid) | generation;
+ return asid2ctxid(asid, generation);
}
-void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
+void check_and_switch_context(struct mm_struct *mm)
{
unsigned long flags;
+ unsigned int cpu;
u64 asid, old_active_asid;
if (system_supports_cnp())
@@ -204,25 +237,25 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
* relaxed xchg in flush_context will treat us as reserved
* because atomic RmWs are totally ordered for a given location.
*/
- old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
- if (old_active_asid &&
- !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
- atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
+ old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
+ if (old_active_asid && asid_gen_match(asid) &&
+ atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
old_active_asid, asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
- if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
+ if (!asid_gen_match(asid)) {
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
+ cpu = smp_processor_id();
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
local_flush_tlb_all();
- atomic64_set(&per_cpu(active_asids, cpu), asid);
+ atomic64_set(this_cpu_ptr(&active_asids), asid);
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
@@ -237,31 +270,153 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+unsigned long arm64_mm_context_get(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid;
+
+ if (!pinned_asid_map)
+ return 0;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ asid = atomic64_read(&mm->context.id);
+
+ if (refcount_inc_not_zero(&mm->context.pinned))
+ goto out_unlock;
+
+ if (nr_pinned_asids >= max_pinned_asids) {
+ asid = 0;
+ goto out_unlock;
+ }
+
+ if (!asid_gen_match(asid)) {
+ /*
+ * We went through one or more rollover since that ASID was
+ * used. Ensure that it is still valid, or generate a new one.
+ */
+ asid = new_context(mm);
+ atomic64_set(&mm->context.id, asid);
+ }
+
+ nr_pinned_asids++;
+ __set_bit(ctxid2asid(asid), pinned_asid_map);
+ refcount_set(&mm->context.pinned, 1);
+
+out_unlock:
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+ asid = ctxid2asid(asid);
+
+ /* Set the equivalent of USER_ASID_BIT */
+ if (asid && arm64_kernel_unmapped_at_el0())
+ asid |= 1;
+
+ return asid;
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_get);
+
+void arm64_mm_context_put(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid = atomic64_read(&mm->context.id);
+
+ if (!pinned_asid_map)
+ return;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ if (refcount_dec_and_test(&mm->context.pinned)) {
+ __clear_bit(ctxid2asid(asid), pinned_asid_map);
+ nr_pinned_asids--;
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_put);
+
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
+ if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
+ return;
+
asm(ALTERNATIVE("nop; nop; nop",
"ic iallu; dsb nsh; isb",
- ARM64_WORKAROUND_CAVIUM_27456,
- CONFIG_CAVIUM_ERRATUM_27456));
+ ARM64_WORKAROUND_CAVIUM_27456));
}
-static int asids_init(void)
+void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
{
- asid_bits = get_cpu_asid_bits();
+ unsigned long ttbr1 = read_sysreg(ttbr1_el1);
+ unsigned long asid = ASID(mm);
+ unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
+
+ /* Skip CNP for the reserved ASID */
+ if (system_supports_cnp() && asid)
+ ttbr0 |= TTBR_CNP_BIT;
+
+ /* SW PAN needs a copy of the ASID in TTBR0 for entry */
+ if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
+ ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+
+ /* Set ASID in TTBR1 since TCR.A1 is set */
+ ttbr1 &= ~TTBR_ASID_MASK;
+ ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+
+ cpu_set_reserved_ttbr0_nosync();
+ write_sysreg(ttbr1, ttbr1_el1);
+ write_sysreg(ttbr0, ttbr0_el1);
+ isb();
+ post_ttbr_update_workaround();
+}
+
+static int asids_update_limit(void)
+{
+ unsigned long num_available_asids = NUM_USER_ASIDS;
+
+ if (arm64_kernel_unmapped_at_el0()) {
+ num_available_asids /= 2;
+ if (pinned_asid_map)
+ set_kpti_asid_bits(pinned_asid_map);
+ }
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
- WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
+ WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+ pr_info("ASID allocator initialised with %lu entries\n",
+ num_available_asids);
+
+ /*
+ * There must always be an ASID available after rollover. Ensure that,
+ * even if all CPUs have a reserved ASID and the maximum number of ASIDs
+ * are pinned, there still is at least one empty slot in the ASID map.
+ */
+ max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+ return 0;
+}
+arch_initcall(asids_update_limit);
+
+static int asids_init(void)
+{
+ asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
- asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
- GFP_KERNEL);
+ asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
+ pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
+ nr_pinned_asids = 0;
+
+ /*
+ * We cannot call set_reserved_asid_bits() here because CPU
+ * caps are not finalized yet, so it is safer to assume KPTI
+ * and reserve kernel ASID's from beginning.
+ */
+ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+ set_kpti_asid_bits(asid_map);
return 0;
}
early_initcall(asids_init);