summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorVladimir Murzin <vladimir.murzin@arm.com>2020-01-07 10:28:03 +0000
committerWill Deacon <will@kernel.org>2020-01-16 12:45:05 +0000
commitf88f42f853a80d9b087f0c2035d6fbab504ea54c (patch)
tree8bafca2b2aea690b9f05ca3cd32b9b027c2f5228 /arch/arm64/mm
parentedf90818271b86dcfcc56e0380a8f7fac6eed0cc (diff)
arm64: context: Free up kernel ASIDs if KPTI is not in use
We can extend user ASID space if it turns out that system does not require KPTI. We start with kernel ASIDs reserved because CPU caps are not finalized yet and free them up lazily on the next rollover if we confirm than KPTI is not in use. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/context.c38
1 files changed, 30 insertions, 8 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b5e329fde2dd..8ef73e89d514 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
-#else
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
+#define NUM_USER_ASIDS ASID_FIRST_VERSION
#define asid2idx(asid) ((asid) & ~ASID_MASK)
#define idx2asid(idx) asid2idx(idx)
-#endif
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
@@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void)
}
}
+static void set_kpti_asid_bits(void)
+{
+ unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
+ /*
+ * In case of KPTI kernel/user ASIDs are allocated in
+ * pairs, the bottom bit distinguishes the two: if it
+ * is set, then the ASID will map only userspace. Thus
+ * mark even as reserved for kernel.
+ */
+ memset(asid_map, 0xaa, len);
+}
+
+static void set_reserved_asid_bits(void)
+{
+ if (arm64_kernel_unmapped_at_el0())
+ set_kpti_asid_bits();
+ else
+ bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+}
+
static void flush_context(void)
{
int i;
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+ set_reserved_asid_bits();
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
@@ -261,6 +275,14 @@ static int asids_init(void)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
+ /*
+ * We cannot call set_reserved_asid_bits() here because CPU
+ * caps are not finalized yet, so it is safer to assume KPTI
+ * and reserve kernel ASID's from beginning.
+ */
+ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+ set_kpti_asid_bits();
+
pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}