summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/Makefile10
-rw-r--r--arch/arm64/mm/cache.S205
-rw-r--r--arch/arm64/mm/context.c221
-rw-r--r--arch/arm64/mm/copypage.c30
-rw-r--r--arch/arm64/mm/dma-mapping.c36
-rw-r--r--arch/arm64/mm/extable.c72
-rw-r--r--arch/arm64/mm/fault.c528
-rw-r--r--arch/arm64/mm/fixmap.c203
-rw-r--r--arch/arm64/mm/flush.c46
-rw-r--r--arch/arm64/mm/hugetlbpage.c305
-rw-r--r--arch/arm64/mm/init.c512
-rw-r--r--arch/arm64/mm/ioremap.c99
-rw-r--r--arch/arm64/mm/kasan_init.c116
-rw-r--r--arch/arm64/mm/mmap.c87
-rw-r--r--arch/arm64/mm/mmu.c1107
-rw-r--r--arch/arm64/mm/mteswap.c85
-rw-r--r--arch/arm64/mm/numa.c470
-rw-r--r--arch/arm64/mm/pageattr.c44
-rw-r--r--arch/arm64/mm/physaddr.c2
-rw-r--r--arch/arm64/mm/proc.S457
-rw-r--r--arch/arm64/mm/ptdump.c (renamed from arch/arm64/mm/dump.c)177
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c8
-rw-r--r--arch/arm64/mm/trans_pgd-asm.S65
-rw-r--r--arch/arm64/mm/trans_pgd.c292
24 files changed, 2959 insertions, 2218 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 849c1df3d214..dbd1bc95967d 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -2,12 +2,14 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
- context.o proc.o pageattr.o
+ context.o proc.o pageattr.o fixmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_ARM64_PTDUMP_CORE) += dump.o
-obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS) += ptdump_debugfs.o
-obj-$(CONFIG_NUMA) += numa.o
+obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
+obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
+obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o
+obj-$(CONFIG_TRANS_TABLE) += trans_pgd-asm.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+obj-$(CONFIG_ARM64_MTE) += mteswap.o
KASAN_SANITIZE_physaddr.o += n
obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index db767b072601..503567c864fd 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
#include <asm/asm-uaccess.h>
/*
- * flush_icache_range(start,end)
+ * caches_clean_inval_pou_macro(start,end) [fixup]
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -23,12 +23,27 @@
*
* - start - virtual start address of region
* - end - virtual end address of region
+ * - fixup - optional label to branch to on user fault
*/
-ENTRY(__flush_icache_range)
- /* FALLTHROUGH */
+.macro caches_clean_inval_pou_macro, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+ dsb ishst
+ b .Ldc_skip_\@
+alternative_else_nop_endif
+ mov x2, x0
+ mov x3, x1
+ dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+ isb
+ b .Lic_skip_\@
+alternative_else_nop_endif
+ invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
/*
- * __flush_cache_user_range(start,end)
+ * caches_clean_inval_pou(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -37,117 +52,97 @@ ENTRY(__flush_icache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_pou)
+ caches_clean_inval_pou_macro
+ ret
+SYM_FUNC_END(caches_clean_inval_pou)
+SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou)
+
+/*
+ * caches_clean_inval_user_pou(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified region.
+ * This is typically used when code has been written to a memory region,
+ * and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+SYM_FUNC_START(caches_clean_inval_user_pou)
uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
- dsb ishst
- b 7f
-alternative_else_nop_endif
- dcache_line_size x2, x3
- sub x3, x2, #1
- bic x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
- add x4, x4, x2
- cmp x4, x1
- b.lo 1b
- dsb ish
-7:
-alternative_if ARM64_HAS_CACHE_DIC
- isb
- b 8f
-alternative_else_nop_endif
- invalidate_icache_by_line x0, x1, x2, x3, 9f
-8: mov x0, #0
+ caches_clean_inval_pou_macro 2f
+ mov x0, xzr
1:
uaccess_ttbr0_disable x1, x2
ret
-9:
+2:
mov x0, #-EFAULT
b 1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
/*
- * invalidate_icache_range(start,end)
+ * icache_inval_pou(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
alternative_if ARM64_HAS_CACHE_DIC
- mov x0, xzr
isb
ret
alternative_else_nop_endif
- uaccess_ttbr0_enable x2, x3, x4
-
- invalidate_icache_by_line x0, x1, x2, x3, 2f
- mov x0, xzr
-1:
- uaccess_ttbr0_disable x1, x2
+ invalidate_icache_by_line x0, x1, x2, x3
ret
-2:
- mov x0, #-EFAULT
- b 1b
-ENDPROC(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
/*
- * __flush_dcache_area(kaddr, size)
+ * dcache_clean_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned and invalidated to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-ENTRY(__flush_dcache_area)
+SYM_FUNC_START(__pi_dcache_clean_inval_poc)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-ENDPIPROC(__flush_dcache_area)
+SYM_FUNC_END(__pi_dcache_clean_inval_poc)
+SYM_FUNC_ALIAS(dcache_clean_inval_poc, __pi_dcache_clean_inval_poc)
/*
- * __clean_dcache_area_pou(kaddr, size)
+ * dcache_clean_pou(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoU.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-ENTRY(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
-ENDPROC(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
/*
- * __inval_dcache_area(kaddr, size)
+ * dcache_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are invalidated. Any partial lines at the ends of the interval are
* also cleaned to PoC to prevent data loss.
*
- * - kaddr - kernel address
- * - size - size in question
- */
-ENTRY(__inval_dcache_area)
- /* FALLTHROUGH */
-
-/*
- * __dma_inv_area(start, size)
- * - start - virtual start address of region
- * - size - size in question
+ * - start - kernel start address of region
+ * - end - kernel end address of region
*/
-__dma_inv_area:
- add x1, x1, x0
+SYM_FUNC_START(__pi_dcache_inval_poc)
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -165,82 +160,38 @@ __dma_inv_area:
b.lo 2b
dsb sy
ret
-ENDPIPROC(__inval_dcache_area)
-ENDPROC(__dma_inv_area)
+SYM_FUNC_END(__pi_dcache_inval_poc)
+SYM_FUNC_ALIAS(dcache_inval_poc, __pi_dcache_inval_poc)
/*
- * __clean_dcache_area_poc(kaddr, size)
+ * dcache_clean_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
- */
-ENTRY(__clean_dcache_area_poc)
- /* FALLTHROUGH */
-
-/*
- * __dma_clean_area(start, size)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
-__dma_clean_area:
+SYM_FUNC_START(__pi_dcache_clean_poc)
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
-ENDPIPROC(__clean_dcache_area_poc)
-ENDPROC(__dma_clean_area)
+SYM_FUNC_END(__pi_dcache_clean_poc)
+SYM_FUNC_ALIAS(dcache_clean_poc, __pi_dcache_clean_poc)
/*
- * __clean_dcache_area_pop(kaddr, size)
+ * dcache_clean_pop(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoP.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-ENTRY(__clean_dcache_area_pop)
+SYM_FUNC_START(__pi_dcache_clean_pop)
alternative_if_not ARM64_HAS_DCPOP
- b __clean_dcache_area_poc
+ b dcache_clean_poc
alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
-ENDPIPROC(__clean_dcache_area_pop)
-
-/*
- * __dma_flush_area(start, size)
- *
- * clean & invalidate D / U line
- *
- * - start - virtual start address of region
- * - size - size in question
- */
-ENTRY(__dma_flush_area)
- dcache_by_line_op civac, sy, x0, x1, x2, x3
- ret
-ENDPIPROC(__dma_flush_area)
-
-/*
- * __dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(__dma_map_area)
- cmp w2, #DMA_FROM_DEVICE
- b.eq __dma_inv_area
- b __dma_clean_area
-ENDPIPROC(__dma_map_area)
-
-/*
- * __dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(__dma_unmap_area)
- cmp w2, #DMA_TO_DEVICE
- b.ne __dma_inv_area
- ret
-ENDPIPROC(__dma_unmap_area)
+SYM_FUNC_END(__pi_dcache_clean_pop)
+SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b5e329fde2dd..188197590fc9 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -6,6 +6,7 @@
* Copyright (C) 2012 ARM Ltd.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -26,35 +27,33 @@ static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+static unsigned long max_pinned_asids;
+static unsigned long nr_pinned_asids;
+static unsigned long *pinned_asid_map;
+
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
-#else
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
-#define asid2idx(asid) ((asid) & ~ASID_MASK)
-#define idx2asid(idx) asid2idx(idx)
-#endif
+#define NUM_USER_ASIDS ASID_FIRST_VERSION
+#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
+#define asid2ctxid(asid, genid) ((asid) | (genid))
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
{
u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
- ID_AA64MMFR0_ASID_SHIFT);
+ ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) {
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
- /* Fallthrough */
- case 0:
+ fallthrough;
+ case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8;
break;
- case 2:
+ case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16;
}
@@ -77,13 +76,38 @@ void verify_cpu_asid_bits(void)
}
}
+static void set_kpti_asid_bits(unsigned long *map)
+{
+ unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
+ /*
+ * In case of KPTI kernel/user ASIDs are allocated in
+ * pairs, the bottom bit distinguishes the two: if it
+ * is set, then the ASID will map only userspace. Thus
+ * mark even as reserved for kernel.
+ */
+ memset(map, 0xaa, len);
+}
+
+static void set_reserved_asid_bits(void)
+{
+ if (pinned_asid_map)
+ bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
+ else if (arm64_kernel_unmapped_at_el0())
+ set_kpti_asid_bits(asid_map);
+ else
+ bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+}
+
+#define asid_gen_match(asid) \
+ (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
+
static void flush_context(void)
{
int i;
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+ set_reserved_asid_bits();
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
@@ -96,7 +120,7 @@ static void flush_context(void)
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
- __set_bit(asid2idx(asid), asid_map);
+ __set_bit(ctxid2asid(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
@@ -138,7 +162,7 @@ static u64 new_context(struct mm_struct *mm)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
- u64 newasid = generation | (asid & ~ASID_MASK);
+ u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
/*
* If our current ASID was active during a rollover, we
@@ -148,10 +172,18 @@ static u64 new_context(struct mm_struct *mm)
return newasid;
/*
+ * If it is pinned, we can keep using it. Note that reserved
+ * takes priority, because even if it is also pinned, we need to
+ * update the generation into the reserved_asids.
+ */
+ if (refcount_read(&mm->context.pinned))
+ return newasid;
+
+ /*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
- if (!__test_and_set_bit(asid2idx(asid), asid_map))
+ if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
return newasid;
}
@@ -177,12 +209,13 @@ static u64 new_context(struct mm_struct *mm)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return idx2asid(asid) | generation;
+ return asid2ctxid(asid, generation);
}
-void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
+void check_and_switch_context(struct mm_struct *mm)
{
unsigned long flags;
+ unsigned int cpu;
u64 asid, old_active_asid;
if (system_supports_cnp())
@@ -204,25 +237,25 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
* relaxed xchg in flush_context will treat us as reserved
* because atomic RmWs are totally ordered for a given location.
*/
- old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
- if (old_active_asid &&
- !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
- atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
+ old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
+ if (old_active_asid && asid_gen_match(asid) &&
+ atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
old_active_asid, asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
- if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
+ if (!asid_gen_match(asid)) {
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
+ cpu = smp_processor_id();
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
local_flush_tlb_all();
- atomic64_set(&per_cpu(active_asids, cpu), asid);
+ atomic64_set(this_cpu_ptr(&active_asids), asid);
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
@@ -237,31 +270,153 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+unsigned long arm64_mm_context_get(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid;
+
+ if (!pinned_asid_map)
+ return 0;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ asid = atomic64_read(&mm->context.id);
+
+ if (refcount_inc_not_zero(&mm->context.pinned))
+ goto out_unlock;
+
+ if (nr_pinned_asids >= max_pinned_asids) {
+ asid = 0;
+ goto out_unlock;
+ }
+
+ if (!asid_gen_match(asid)) {
+ /*
+ * We went through one or more rollover since that ASID was
+ * used. Ensure that it is still valid, or generate a new one.
+ */
+ asid = new_context(mm);
+ atomic64_set(&mm->context.id, asid);
+ }
+
+ nr_pinned_asids++;
+ __set_bit(ctxid2asid(asid), pinned_asid_map);
+ refcount_set(&mm->context.pinned, 1);
+
+out_unlock:
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+ asid = ctxid2asid(asid);
+
+ /* Set the equivalent of USER_ASID_BIT */
+ if (asid && arm64_kernel_unmapped_at_el0())
+ asid |= 1;
+
+ return asid;
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_get);
+
+void arm64_mm_context_put(struct mm_struct *mm)
+{
+ unsigned long flags;
+ u64 asid = atomic64_read(&mm->context.id);
+
+ if (!pinned_asid_map)
+ return;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+
+ if (refcount_dec_and_test(&mm->context.pinned)) {
+ __clear_bit(ctxid2asid(asid), pinned_asid_map);
+ nr_pinned_asids--;
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+EXPORT_SYMBOL_GPL(arm64_mm_context_put);
+
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
+ if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
+ return;
+
asm(ALTERNATIVE("nop; nop; nop",
"ic iallu; dsb nsh; isb",
- ARM64_WORKAROUND_CAVIUM_27456,
- CONFIG_CAVIUM_ERRATUM_27456));
+ ARM64_WORKAROUND_CAVIUM_27456));
}
-static int asids_init(void)
+void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
{
- asid_bits = get_cpu_asid_bits();
+ unsigned long ttbr1 = read_sysreg(ttbr1_el1);
+ unsigned long asid = ASID(mm);
+ unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
+
+ /* Skip CNP for the reserved ASID */
+ if (system_supports_cnp() && asid)
+ ttbr0 |= TTBR_CNP_BIT;
+
+ /* SW PAN needs a copy of the ASID in TTBR0 for entry */
+ if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
+ ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+
+ /* Set ASID in TTBR1 since TCR.A1 is set */
+ ttbr1 &= ~TTBR_ASID_MASK;
+ ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+
+ cpu_set_reserved_ttbr0_nosync();
+ write_sysreg(ttbr1, ttbr1_el1);
+ write_sysreg(ttbr0, ttbr0_el1);
+ isb();
+ post_ttbr_update_workaround();
+}
+
+static int asids_update_limit(void)
+{
+ unsigned long num_available_asids = NUM_USER_ASIDS;
+
+ if (arm64_kernel_unmapped_at_el0()) {
+ num_available_asids /= 2;
+ if (pinned_asid_map)
+ set_kpti_asid_bits(pinned_asid_map);
+ }
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
- WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
+ WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+ pr_info("ASID allocator initialised with %lu entries\n",
+ num_available_asids);
+
+ /*
+ * There must always be an ASID available after rollover. Ensure that,
+ * even if all CPUs have a reserved ASID and the maximum number of ASIDs
+ * are pinned, there still is at least one empty slot in the ASID map.
+ */
+ max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+ return 0;
+}
+arch_initcall(asids_update_limit);
+
+static int asids_init(void)
+{
+ asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
- asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
- GFP_KERNEL);
+ asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
+ pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
+ nr_pinned_asids = 0;
+
+ /*
+ * We cannot call set_reserved_asid_bits() here because CPU
+ * caps are not finalized yet, so it is safer to assume KPTI
+ * and reserve kernel ASID's from beginning.
+ */
+ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+ set_kpti_asid_bits(asid_map);
return 0;
}
early_initcall(asids_init);
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 2ee7b73433a5..a7bb20055ce0 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -6,21 +6,37 @@
* Copyright (C) 2012 ARM Ltd.
*/
+#include <linux/bitops.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/mte.h>
-void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+void copy_highpage(struct page *to, struct page *from)
{
- struct page *page = virt_to_page(kto);
+ void *kto = page_address(to);
+ void *kfrom = page_address(from);
+
copy_page(kto, kfrom);
- flush_dcache_page(page);
+
+ if (kasan_hw_tags_enabled())
+ page_kasan_tag_reset(to);
+
+ if (system_supports_mte() && page_mte_tagged(from)) {
+ /* It's a new page, shouldn't have been tagged yet */
+ WARN_ON_ONCE(!try_page_mte_tagging(to));
+ mte_copy_page_tags(kto, kfrom);
+ set_page_mte_tagged(to);
+ }
}
-EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
+EXPORT_SYMBOL(copy_highpage);
-void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
- clear_page(kaddr);
+ copy_highpage(to, from);
+ flush_dcache_page(to);
}
-EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
+EXPORT_SYMBOL_GPL(copy_user_highpage);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6c45350e33aa..61886e43e3a1 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -6,28 +6,37 @@
#include <linux/gfp.h>
#include <linux/cache.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/dma-iommu.h>
+#include <linux/dma-map-ops.h>
+#include <linux/iommu.h>
#include <xen/xen.h>
-#include <xen/swiotlb-xen.h>
#include <asm/cacheflush.h>
+#include <asm/xen/xen-ops.h>
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_map_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ dcache_clean_poc(start, start + size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_unmap_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ if (dir == DMA_TO_DEVICE)
+ return;
+
+ dcache_inval_poc(start, start + size);
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
- __dma_flush_area(page_address(page), size);
+ unsigned long start = (unsigned long)page_address(page);
+
+ dcache_clean_poc(start, start + size);
}
#ifdef CONFIG_IOMMU_DMA
@@ -38,7 +47,7 @@ void arch_teardown_dma_ops(struct device *dev)
#endif
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+ bool coherent)
{
int cls = cache_line_size_of_cpu();
@@ -49,11 +58,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
ARCH_DMA_MINALIGN, cls);
dev->dma_coherent = coherent;
- if (iommu)
- iommu_setup_dma_ops(dev, dma_base, size);
+ if (device_iommu_mapped(dev))
+ iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
-#ifdef CONFIG_XEN
- if (xen_initial_domain())
- dev->dma_ops = &xen_swiotlb_dma_ops;
-#endif
+ xen_setup_dma_ops(dev);
}
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 81e694af5f8c..228d681a8715 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -3,16 +3,76 @@
* Based on arch/arm/mm/extable.c
*/
+#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
-int fixup_exception(struct pt_regs *regs)
+#include <asm/asm-extable.h>
+#include <asm/ptrace.h>
+
+static inline unsigned long
+get_ex_fixup(const struct exception_table_entry *ex)
+{
+ return ((unsigned long)&ex->fixup + ex->fixup);
+}
+
+static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+ int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
+
+ pt_regs_write_reg(regs, reg_err, -EFAULT);
+ pt_regs_write_reg(regs, reg_zero, 0);
+
+ regs->pc = get_ex_fixup(ex);
+ return true;
+}
+
+static bool
+ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
{
- const struct exception_table_entry *fixup;
+ int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
+ int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+ unsigned long data, addr, offset;
+
+ addr = pt_regs_read_reg(regs, reg_addr);
+
+ offset = addr & 0x7UL;
+ addr &= ~0x7UL;
+
+ data = *(unsigned long*)addr;
+
+#ifndef __AARCH64EB__
+ data >>= 8 * offset;
+#else
+ data <<= 8 * offset;
+#endif
+
+ pt_regs_write_reg(regs, reg_data, data);
+
+ regs->pc = get_ex_fixup(ex);
+ return true;
+}
+
+bool fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *ex;
+
+ ex = search_exception_tables(instruction_pointer(regs));
+ if (!ex)
+ return false;
- fixup = search_exception_tables(instruction_pointer(regs));
- if (fixup)
- regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
+ switch (ex->type) {
+ case EX_TYPE_BPF:
+ return ex_handler_bpf(ex, regs);
+ case EX_TYPE_UACCESS_ERR_ZERO:
+ case EX_TYPE_KACCESS_ERR_ZERO:
+ return ex_handler_uaccess_err_zero(ex, regs);
+ case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
+ return ex_handler_load_unaligned_zeropad(ex, regs);
+ }
- return fixup != NULL;
+ BUG();
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 077b02a2d4d3..55f6455a8284 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -10,10 +10,12 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/extable.h>
+#include <linux/kfence.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
+#include <linux/kasan.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
@@ -28,20 +30,21 @@
#include <asm/bug.h>
#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
+#include <asm/efi.h>
#include <asm/exception.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/kprobes.h>
+#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
struct fault_info {
- int (*fn)(unsigned long addr, unsigned int esr,
+ int (*fn)(unsigned long far, unsigned long esr,
struct pt_regs *regs);
int sig;
int code;
@@ -51,18 +54,20 @@ struct fault_info {
static const struct fault_info fault_info[];
static struct fault_info debug_fault_info[];
-static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
+static inline const struct fault_info *esr_to_fault_info(unsigned long esr)
{
return fault_info + (esr & ESR_ELx_FSC);
}
-static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
+static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr)
{
return debug_fault_info + DBG_ESR_EVT(esr);
}
-static void data_abort_decode(unsigned int esr)
+static void data_abort_decode(unsigned long esr)
{
+ unsigned long iss2 = ESR_ELx_ISS2(esr);
+
pr_alert("Data abort info:\n");
if (esr & ESR_ELx_ISV) {
@@ -75,19 +80,28 @@ static void data_abort_decode(unsigned int esr)
(esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
(esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
} else {
- pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
+ pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n",
+ esr & ESR_ELx_ISS_MASK, iss2);
}
- pr_alert(" CM = %lu, WnR = %lu\n",
+ pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n",
(esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
- (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
+ (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT,
+ (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT,
+ (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT);
+
+ pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n",
+ (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT,
+ (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT,
+ (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT,
+ (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT);
}
-static void mem_abort_decode(unsigned int esr)
+static void mem_abort_decode(unsigned long esr)
{
pr_alert("Mem abort info:\n");
- pr_alert(" ESR = 0x%08x\n", esr);
+ pr_alert(" ESR = 0x%016lx\n", esr);
pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n",
ESR_ELx_EC(esr), esr_get_class_string(esr),
(esr & ESR_ELx_IL) ? 32 : 16);
@@ -97,6 +111,8 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
+ pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC),
+ esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
data_abort_decode(esr);
@@ -145,6 +161,7 @@ static void show_pte(unsigned long addr)
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
do {
+ p4d_t *p4dp, p4d;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep, pte;
@@ -152,7 +169,13 @@ static void show_pte(unsigned long addr)
if (pgd_none(pgd) || pgd_bad(pgd))
break;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = READ_ONCE(*p4dp);
+ pr_cont(", p4d=%016llx", p4d_val(p4d));
+ if (p4d_none(p4d) || p4d_bad(p4d))
+ break;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
pr_cont(", pud=%016llx", pud_val(pud));
if (pud_none(pud) || pud_bad(pud))
@@ -165,6 +188,9 @@ static void show_pte(unsigned long addr)
break;
ptep = pte_offset_map(pmdp, addr);
+ if (!ptep)
+ break;
+
pte = READ_ONCE(*ptep);
pr_cont(", pte=%016llx", pte_val(pte));
pte_unmap(ptep);
@@ -212,22 +238,28 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);
- flush_tlb_fix_spurious_fault(vma, address);
+ /* Invalidate a stale read-only entry */
+ if (dirty)
+ flush_tlb_page(vma, address);
return 1;
}
-static bool is_el1_instruction_abort(unsigned int esr)
+static bool is_el1_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
-static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
+static bool is_el1_data_abort(unsigned long esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
+}
+
+static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
- unsigned int ec = ESR_ELx_EC(esr);
- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
+ unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE;
- if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
if (fsc_type == ESR_ELx_FSC_PERM)
@@ -241,20 +273,20 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
}
static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
- unsigned int esr,
+ unsigned long esr,
struct pt_regs *regs)
{
unsigned long flags;
u64 par, dfsc;
- if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+ if (!is_el1_data_abort(esr) ||
(esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
return false;
local_irq_save(flags);
asm volatile("at s1e1r, %0" :: "r" (addr));
isb();
- par = read_sysreg(par_el1);
+ par = read_sysreg_par();
local_irq_restore(flags);
/*
@@ -273,22 +305,75 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
}
static void die_kernel_fault(const char *msg, unsigned long addr,
- unsigned int esr, struct pt_regs *regs)
+ unsigned long esr, struct pt_regs *regs)
{
bust_spinlocks(1);
pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg,
addr);
+ kasan_non_canonical_hook(addr);
+
mem_abort_decode(esr);
show_pte(addr);
die("Oops", regs, esr);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
+}
+
+#ifdef CONFIG_KASAN_HW_TAGS
+static void report_tag_fault(unsigned long addr, unsigned long esr,
+ struct pt_regs *regs)
+{
+ /*
+ * SAS bits aren't set for all faults reported in EL1, so we can't
+ * find out access size.
+ */
+ bool is_write = !!(esr & ESR_ELx_WNR);
+ kasan_report((void *)addr, 0, is_write, regs->pc);
+}
+#else
+/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */
+static inline void report_tag_fault(unsigned long addr, unsigned long esr,
+ struct pt_regs *regs) { }
+#endif
+
+static void do_tag_recovery(unsigned long addr, unsigned long esr,
+ struct pt_regs *regs)
+{
+
+ report_tag_fault(addr, esr, regs);
+
+ /*
+ * Disable MTE Tag Checking on the local CPU for the current EL.
+ * It will be done lazily on the other CPUs when they will hit a
+ * tag fault.
+ */
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
+ SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE));
+ isb();
+}
+
+static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
+{
+ unsigned long fsc = esr & ESR_ELx_FSC;
+
+ if (!is_el1_data_abort(esr))
+ return false;
+
+ if (fsc == ESR_ELx_FSC_MTE)
+ return true;
+
+ return false;
+}
+
+static bool is_translation_fault(unsigned long esr)
+{
+ return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
}
-static void __do_kernel_fault(unsigned long addr, unsigned int esr,
+static void __do_kernel_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
const char *msg;
@@ -304,6 +389,12 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
"Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
return;
+ if (is_el1_mte_sync_tag_check_fault(esr)) {
+ do_tag_recovery(addr, esr, regs);
+
+ return;
+ }
+
if (is_el1_permission_fault(addr, esr, regs)) {
if (esr & ESR_ELx_WNR)
msg = "write to read-only memory";
@@ -314,13 +405,20 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
} else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
+ if (is_translation_fault(esr) &&
+ kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
+ return;
+
msg = "paging request";
}
+ if (efi_runtime_fixup_exception(regs, msg))
+ return;
+
die_kernel_fault(msg, addr, esr, regs);
}
-static void set_thread_esr(unsigned long address, unsigned int esr)
+static void set_thread_esr(unsigned long address, unsigned long esr)
{
current->thread.fault_address = address;
@@ -368,7 +466,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
* exception level). Fail safe by not providing an ESR
* context record at all.
*/
- WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
+ WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr);
esr = 0;
break;
}
@@ -377,8 +475,11 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
current->thread.fault_code = esr;
}
-static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long far, unsigned long esr,
+ struct pt_regs *regs)
{
+ unsigned long addr = untagged_addr(far);
+
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
@@ -387,45 +488,32 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
const struct fault_info *inf = esr_to_fault_info(esr);
set_thread_esr(addr, esr);
- arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr,
- inf->name);
+ arm64_force_sig_fault(inf->sig, inf->code, far, inf->name);
} else {
__do_kernel_fault(addr, esr, regs);
}
}
-#define VM_FAULT_BADMAP 0x010000
-#define VM_FAULT_BADACCESS 0x020000
+#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
-static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
- unsigned int mm_flags, unsigned long vm_flags)
+static vm_fault_t __do_page_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long addr,
+ unsigned int mm_flags, unsigned long vm_flags,
+ struct pt_regs *regs)
{
- struct vm_area_struct *vma = find_vma(mm, addr);
-
- if (unlikely(!vma))
- return VM_FAULT_BADMAP;
-
/*
* Ok, we have a good vm_area for this memory access, so we can handle
* it.
- */
- if (unlikely(vma->vm_start > addr)) {
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return VM_FAULT_BADMAP;
- if (expand_stack(vma, addr))
- return VM_FAULT_BADMAP;
- }
-
- /*
* Check that the permissions on the VMA allow for the fault which
* occurred.
*/
if (!(vma->vm_flags & vm_flags))
return VM_FAULT_BADACCESS;
- return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
+ return handle_mm_fault(vma, addr, mm_flags, regs);
}
-static bool is_el0_instruction_abort(unsigned int esr)
+static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
}
@@ -434,19 +522,21 @@ static bool is_el0_instruction_abort(unsigned int esr)
* Note: not valid for EL1 DC IVAC, but we never use that such that it
* should fault. EL0 cannot issue DC IVAC (undef).
*/
-static bool is_write_abort(unsigned int esr)
+static bool is_write_abort(unsigned long esr)
{
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}
-static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
+static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
const struct fault_info *inf;
struct mm_struct *mm = current->mm;
- vm_fault_t fault, major = 0;
- unsigned long vm_flags = VM_READ | VM_WRITE;
- unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ vm_fault_t fault;
+ unsigned long vm_flags;
+ unsigned int mm_flags = FAULT_FLAG_DEFAULT;
+ unsigned long addr = untagged_addr(far);
+ struct vm_area_struct *vma;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -461,20 +551,31 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (user_mode(regs))
mm_flags |= FAULT_FLAG_USER;
+ /*
+ * vm_flags tells us what bits we must have in vma->vm_flags
+ * for the fault to be benign, __do_page_fault() would check
+ * vma->vm_flags & vm_flags and returns an error if the
+ * intersection is empty
+ */
if (is_el0_instruction_abort(esr)) {
+ /* It was exec fault */
vm_flags = VM_EXEC;
mm_flags |= FAULT_FLAG_INSTRUCTION;
} else if (is_write_abort(esr)) {
+ /* It was write fault */
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
+ } else {
+ /* It was read fault */
+ vm_flags = VM_READ;
+ /* Write implies read */
+ vm_flags |= VM_WRITE;
+ /* If EPAN is absent then exec implies read */
+ if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN))
+ vm_flags |= VM_EXEC;
}
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
- /* regs->orig_addr_limit may be 0 if we entered from EL0 */
- if (regs->orig_addr_limit == KERNEL_DS)
- die_kernel_fault("access to user memory with fs=KERNEL_DS",
- addr, esr, regs);
-
if (is_el1_instruction_abort(esr))
die_kernel_fault("execution of user memory",
addr, esr, regs);
@@ -486,81 +587,70 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- /*
- * As per x86, we may deadlock here. However, since the kernel only
- * validly references user space from well defined areas of the code,
- * we can bug out early if this is from code which shouldn't.
- */
- if (!down_read_trylock(&mm->mmap_sem)) {
- if (!user_mode(regs) && !search_exception_tables(regs->pc))
+ if (!(mm_flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, addr);
+ if (!vma)
+ goto lock_mmap;
+
+ if (!(vma->vm_flags & vm_flags)) {
+ vma_end_read(vma);
+ goto lock_mmap;
+ }
+ fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ mm_flags |= FAULT_FLAG_TRIED;
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
goto no_context;
+ return 0;
+ }
+lock_mmap:
+
retry:
- down_read(&mm->mmap_sem);
- } else {
- /*
- * The above down_read_trylock() might have succeeded in which
- * case, we'll have missed the might_sleep() from down_read().
- */
- might_sleep();
-#ifdef CONFIG_DEBUG_VM
- if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
- up_read(&mm->mmap_sem);
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
+ fault = VM_FAULT_BADMAP;
+ goto done;
+ }
+
+ fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
goto no_context;
- }
-#endif
+ return 0;
}
- fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
- major |= fault & VM_FAULT_MAJOR;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
if (fault & VM_FAULT_RETRY) {
- /*
- * If we need to retry but a fatal signal is pending,
- * handle the signal first. We do not need to release
- * the mmap_sem because it would already be released
- * in __lock_page_or_retry in mm/filemap.c.
- */
- if (fatal_signal_pending(current)) {
- if (!user_mode(regs))
- goto no_context;
- return 0;
- }
-
- /*
- * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
- * starvation.
- */
- if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
- mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
- mm_flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ mm_flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
+done:
/*
* Handle the "normal" (no error) case first.
*/
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
- VM_FAULT_BADACCESS)))) {
- /*
- * Major/minor page fault accounting is only done
- * once. If we go through a retry, it is extremely
- * likely that the page will be found in page cache at
- * that point.
- */
- if (major) {
- current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
- addr);
- } else {
- current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
- addr);
- }
-
+ VM_FAULT_BADACCESS))))
return 0;
- }
/*
* If we are in kernel mode at this point, we have no context to
@@ -586,8 +676,7 @@ retry:
* We had some memory, but were unable to successfully fix up
* this page fault.
*/
- arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr,
- inf->name);
+ arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name);
} else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
unsigned int lsb;
@@ -595,8 +684,7 @@ retry:
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
- arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb,
- inf->name);
+ arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
} else {
/*
* Something tried to access memory that isn't in our memory
@@ -604,8 +692,7 @@ retry:
*/
arm64_force_sig_fault(SIGSEGV,
fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
- (void __user *)addr,
- inf->name);
+ far, inf->name);
}
return 0;
@@ -615,51 +702,77 @@ no_context:
return 0;
}
-static int __kprobes do_translation_fault(unsigned long addr,
- unsigned int esr,
+static int __kprobes do_translation_fault(unsigned long far,
+ unsigned long esr,
struct pt_regs *regs)
{
+ unsigned long addr = untagged_addr(far);
+
if (is_ttbr0_addr(addr))
- return do_page_fault(addr, esr, regs);
+ return do_page_fault(far, esr, regs);
- do_bad_area(addr, esr, regs);
+ do_bad_area(far, esr, regs);
return 0;
}
-static int do_alignment_fault(unsigned long addr, unsigned int esr,
+static int do_alignment_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
- do_bad_area(addr, esr, regs);
+ if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) &&
+ compat_user_mode(regs))
+ return do_compat_alignment_fixup(far, regs);
+ do_bad_area(far, esr, regs);
return 0;
}
-static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
return 1; /* "fault" */
}
-static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf;
- void __user *siaddr;
+ unsigned long siaddr;
inf = esr_to_fault_info(esr);
- /*
- * Return value ignored as we rely on signal merging.
- * Future patches will make this more robust.
- */
- apei_claim_sea(regs);
+ if (user_mode(regs) && apei_claim_sea(regs) == 0) {
+ /*
+ * APEI claimed this as a firmware-first notification.
+ * Some processing deferred to task_work before ret_to_user().
+ */
+ return 0;
+ }
- if (esr & ESR_ELx_FnV)
- siaddr = NULL;
- else
- siaddr = (void __user *)addr;
+ if (esr & ESR_ELx_FnV) {
+ siaddr = 0;
+ } else {
+ /*
+ * The architecture specifies that the tag bits of FAR_EL1 are
+ * UNKNOWN for synchronous external aborts. Mask them out now
+ * so that userspace doesn't see them.
+ */
+ siaddr = untagged_addr(far);
+ }
arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
return 0;
}
+static int do_tag_check_fault(unsigned long far, unsigned long esr,
+ struct pt_regs *regs)
+{
+ /*
+ * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
+ * for tag check faults. Set them to corresponding bits in the untagged
+ * address.
+ */
+ far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
+ do_bad_area(far, esr, regs);
+ return 0;
+}
+
static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" },
{ do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" },
@@ -678,7 +791,7 @@ static const struct fault_info fault_info[] = {
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
- { do_bad, SIGKILL, SI_KERNEL, "unknown 17" },
+ { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
{ do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
{ do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
@@ -727,41 +840,33 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
};
-void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_fault_info(esr);
+ unsigned long addr = untagged_addr(far);
- if (!inf->fn(addr, esr, regs))
+ if (!inf->fn(far, esr, regs))
return;
- if (!user_mode(regs)) {
- pr_alert("Unhandled fault at 0x%016lx\n", addr);
- mem_abort_decode(esr);
- show_pte(addr);
- }
+ if (!user_mode(regs))
+ die_kernel_fault(inf->name, addr, esr, regs);
- arm64_notify_die(inf->name, regs,
- inf->sig, inf->code, (void __user *)addr, esr);
+ /*
+ * At this point we have an unrecognized fault type whose tag bits may
+ * have been defined as UNKNOWN. Therefore we only expose the untagged
+ * address to the signal handler.
+ */
+ arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr);
}
NOKPROBE_SYMBOL(do_mem_abort);
-void do_el0_irq_bp_hardening(void)
-{
- /* PC has already been checked in entry.S */
- arm64_apply_bp_hardening();
-}
-NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
-
-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
{
- arm64_notify_die("SP/PC alignment exception", regs,
- SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
+ arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
+ addr, esr);
}
NOKPROBE_SYMBOL(do_sp_pc_abort);
-int __init early_brk64(unsigned long addr, unsigned int esr,
- struct pt_regs *regs);
-
/*
* __refdata because early_brk64 is __init, but the reference to it is
* clobbered at arch_initcall time.
@@ -779,7 +884,7 @@ static struct fault_info __refdata debug_fault_info[] = {
};
void __init hook_debug_fault_code(int nr,
- int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int (*fn)(unsigned long, unsigned long, struct pt_regs *),
int sig, int code, const char *name)
{
BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
@@ -799,25 +904,6 @@ void __init hook_debug_fault_code(int nr,
*/
static void debug_exception_enter(struct pt_regs *regs)
{
- /*
- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
- * already disabled to preserve the last enabled/disabled addresses.
- */
- if (interrupts_enabled(regs))
- trace_hardirqs_off();
-
- if (user_mode(regs)) {
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- } else {
- /*
- * We might have interrupted pretty much anything. In
- * fact, if we're a debug exception, we can even interrupt
- * NMI processing. We don't want this code makes in_nmi()
- * to return true, but we need to notify RCU.
- */
- rcu_nmi_enter();
- }
-
preempt_disable();
/* This code is a bit fragile. Test it. */
@@ -828,63 +914,51 @@ NOKPROBE_SYMBOL(debug_exception_enter);
static void debug_exception_exit(struct pt_regs *regs)
{
preempt_enable_no_resched();
-
- if (!user_mode(regs))
- rcu_nmi_exit();
-
- if (interrupts_enabled(regs))
- trace_hardirqs_on();
}
NOKPROBE_SYMBOL(debug_exception_exit);
-#ifdef CONFIG_ARM64_ERRATUM_1463225
-DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
-
-static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
-{
- if (user_mode(regs))
- return 0;
-
- if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
- return 0;
-
- /*
- * We've taken a dummy step exception from the kernel to ensure
- * that interrupts are re-enabled on the syscall path. Return back
- * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
- * masked so that we can safely restore the mdscr and get on with
- * handling the syscall.
- */
- regs->pstate |= PSR_D_BIT;
- return 1;
-}
-#else
-static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
-{
- return 0;
-}
-#endif /* CONFIG_ARM64_ERRATUM_1463225 */
-NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler);
-
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
unsigned long pc = instruction_pointer(regs);
- if (cortex_a76_erratum_1463225_debug_handler(regs))
- return;
-
debug_exception_enter(regs);
if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
if (inf->fn(addr_if_watchpoint, esr, regs)) {
- arm64_notify_die(inf->name, regs,
- inf->sig, inf->code, (void __user *)pc, esr);
+ arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr);
}
debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
+
+/*
+ * Used during anonymous page fault handling.
+ */
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+
+ /*
+ * If the page is mapped with PROT_MTE, initialise the tags at the
+ * point of allocation and page zeroing as this is usually faster than
+ * separate DC ZVA and STGM.
+ */
+ if (vma->vm_flags & VM_MTE)
+ flags |= __GFP_ZEROTAGS;
+
+ return vma_alloc_folio(flags, 0, vma, vaddr, false);
+}
+
+void tag_clear_highpage(struct page *page)
+{
+ /* Newly allocated page, shouldn't have been tagged yet */
+ WARN_ON_ONCE(!try_page_mte_tagging(page));
+ mte_zero_clear_page_tags(page_address(page));
+ set_page_mte_tagged(page);
+}
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
new file mode 100644
index 000000000000..c0a3301203bd
--- /dev/null
+++ b/arch/arm64/mm/fixmap.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Fixmap manipulation code
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
+
+#include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define NR_BM_PTE_TABLES \
+ SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
+#define NR_BM_PMD_TABLES \
+ SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
+
+static_assert(NR_BM_PMD_TABLES == 1);
+
+#define __BM_TABLE_IDX(addr, shift) \
+ (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
+
+#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT)
+
+static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
+static inline pte_t *fixmap_pte(unsigned long addr)
+{
+ return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)];
+}
+
+static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
+{
+ pmd_t pmd = READ_ONCE(*pmdp);
+ pte_t *ptep;
+
+ if (pmd_none(pmd)) {
+ ptep = bm_pte[BM_PTE_TABLE_IDX(addr)];
+ __pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE);
+ }
+}
+
+static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr,
+ unsigned long end)
+{
+ unsigned long next;
+ pud_t pud = READ_ONCE(*pudp);
+ pmd_t *pmdp;
+
+ if (pud_none(pud))
+ __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
+
+ pmdp = pmd_offset_kimg(pudp, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ early_fixmap_init_pte(pmdp, addr);
+ } while (pmdp++, addr = next, addr != end);
+}
+
+
+static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr,
+ unsigned long end)
+{
+ p4d_t p4d = READ_ONCE(*p4dp);
+ pud_t *pudp;
+
+ if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) &&
+ p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) {
+ /*
+ * We only end up here if the kernel mapping and the fixmap
+ * share the top level pgd entry, which should only happen on
+ * 16k/4 levels configurations.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ }
+
+ if (p4d_none(p4d))
+ __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
+
+ pudp = pud_offset_kimg(p4dp, addr);
+ early_fixmap_init_pmd(pudp, addr, end);
+}
+
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
+void __init early_fixmap_init(void)
+{
+ unsigned long addr = FIXADDR_TOT_START;
+ unsigned long end = FIXADDR_TOP;
+
+ pgd_t *pgdp = pgd_offset_k(addr);
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+
+ early_fixmap_init_pud(p4dp, addr, end);
+}
+
+/*
+ * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
+ * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
+ */
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *ptep;
+
+ BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+
+ ptep = fixmap_pte(addr);
+
+ if (pgprot_val(flags)) {
+ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
+ } else {
+ pte_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+{
+ const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
+ phys_addr_t dt_phys_base;
+ int offset;
+ void *dt_virt;
+
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+ * at least 8 bytes so that we can always access the magic and size
+ * fields of the FDT header after mapping the first chunk, double check
+ * here if that is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+ return NULL;
+
+ dt_phys_base = round_down(dt_phys, PAGE_SIZE);
+ offset = dt_phys % PAGE_SIZE;
+ dt_virt = (void *)dt_virt_base + offset;
+
+ /* map the first chunk so we can read the size from the header */
+ create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot);
+
+ if (fdt_magic(dt_virt) != FDT_MAGIC)
+ return NULL;
+
+ *size = fdt_totalsize(dt_virt);
+ if (*size > MAX_FDT_SIZE)
+ return NULL;
+
+ if (offset + *size > PAGE_SIZE) {
+ create_mapping_noalloc(dt_phys_base, dt_virt_base,
+ offset + *size, prot);
+ }
+
+ return dt_virt;
+}
+
+/*
+ * Copy the fixmap region into a new pgdir.
+ */
+void __init fixmap_copy(pgd_t *pgdir)
+{
+ if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
+ /*
+ * The fixmap falls in a separate pgd to the kernel, and doesn't
+ * live in the carveout for the swapper_pg_dir. We can simply
+ * re-use the existing dir for the fixmap.
+ */
+ set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
+ READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
+ } else if (CONFIG_PGTABLE_LEVELS > 3) {
+ pgd_t *bm_pgdp;
+ p4d_t *bm_p4dp;
+ pud_t *bm_pudp;
+ /*
+ * The fixmap shares its top level pgd entry with the kernel
+ * mapping. This can really only occur when we are running
+ * with 16k/4 levels, so we can simply reuse the pud level
+ * entry instead.
+ */
+ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+ bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
+ bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
+ bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
+ pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
+ pud_clear_fixmap();
+ } else {
+ BUG();
+ }
+}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..013eead9b695 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -8,34 +8,32 @@
#include <linux/export.h>
#include <linux/mm.h>
+#include <linux/libnvdimm.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
#include <asm/tlbflush.h>
-void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(unsigned long start, unsigned long end)
{
- unsigned long addr = (unsigned long)kaddr;
-
if (icache_is_aliasing()) {
- __clean_dcache_area_pou(kaddr, len);
- __flush_icache_all();
+ dcache_clean_pou(start, end);
+ icache_inval_all_pou();
} else {
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
- __flush_icache_range(addr, addr + len);
+ caches_clean_inval_pou(start, end);
}
}
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len)
+static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
if (vma->vm_flags & VM_EXEC)
- sync_icache_aliases(kaddr, len);
+ sync_icache_aliases(start, end);
}
/*
@@ -48,15 +46,19 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
memcpy(dst, src, len);
- flush_ptrace_access(vma, page, uaddr, dst, len);
+ flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
}
void __sync_icache_dcache(pte_t pte)
{
- struct page *page = pte_page(pte);
+ struct folio *folio = page_folio(pte_page(pte));
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- sync_icache_aliases(page_address(page), page_size(page));
+ if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ sync_icache_aliases((unsigned long)folio_address(folio),
+ (unsigned long)folio_address(folio) +
+ folio_size(folio));
+ set_bit(PG_dcache_clean, &folio->flags);
+ }
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -65,30 +67,36 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
* it as dirty for later flushing when mapped in user space (if executable,
* see __sync_icache_dcache).
*/
+void flush_dcache_folio(struct folio *folio)
+{
+ if (test_bit(PG_dcache_clean, &folio->flags))
+ clear_bit(PG_dcache_clean, &folio->flags);
+}
+EXPORT_SYMBOL(flush_dcache_folio);
+
void flush_dcache_page(struct page *page)
{
- if (test_bit(PG_dcache_clean, &page->flags))
- clear_bit(PG_dcache_clean, &page->flags);
+ flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
{
/* Ensure order against any prior non-cacheable writes */
dmb(osh);
- __clean_dcache_area_pop(addr, size);
+ dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
- __inval_dcache_area(addr, size);
+ dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index bbeb6a5a6ba6..8116ac599f80 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -17,26 +17,73 @@
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
-#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
-bool arch_hugetlb_migration_supported(struct hstate *h)
+/*
+ * HugeTLB Support Matrix
+ *
+ * ---------------------------------------------------
+ * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
+ * ---------------------------------------------------
+ * | 4K | 64K | 2M | 32M | 1G |
+ * | 16K | 2M | 32M | 1G | |
+ * | 64K | 2M | 512M | 16G | |
+ * ---------------------------------------------------
+ */
+
+/*
+ * Reserve CMA areas for the largest supported gigantic
+ * huge page when requested. Any other smaller gigantic
+ * huge pages could still be served from those areas.
+ */
+#ifdef CONFIG_CMA
+void __init arm64_hugetlb_cma_reserve(void)
{
- size_t pagesize = huge_page_size(h);
+ int order;
+
+ if (pud_sect_supported())
+ order = PUD_SHIFT - PAGE_SHIFT;
+ else
+ order = CONT_PMD_SHIFT - PAGE_SHIFT;
+
+ /*
+ * HugeTLB CMA reservation is required for gigantic
+ * huge pages which could not be allocated via the
+ * page allocator. Just warn if there is any change
+ * breaking this assumption.
+ */
+ WARN_ON(order <= MAX_PAGE_ORDER);
+ hugetlb_cma_reserve(order);
+}
+#endif /* CONFIG_CMA */
- switch (pagesize) {
-#ifdef CONFIG_ARM64_4K_PAGES
+static bool __hugetlb_valid_size(unsigned long size)
+{
+ switch (size) {
+#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
+ return pud_sect_supported();
#endif
- case PMD_SIZE:
case CONT_PMD_SIZE:
+ case PMD_SIZE:
case CONT_PTE_SIZE:
return true;
}
- pr_warn("%s: unrecognized huge page size 0x%lx\n",
- __func__, pagesize);
+
return false;
}
+
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+bool arch_hugetlb_migration_supported(struct hstate *h)
+{
+ size_t pagesize = huge_page_size(h);
+
+ if (!__hugetlb_valid_size(pagesize)) {
+ pr_warn("%s: unrecognized huge page size 0x%lx\n",
+ __func__, pagesize);
+ return false;
+ }
+ return true;
+}
#endif
int pmd_huge(pmd_t pmd)
@@ -53,25 +100,17 @@ int pud_huge(pud_t pud)
#endif
}
-/*
- * Select all bits except the pfn
- */
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
- unsigned long pfn = pte_pfn(pte);
-
- return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
-}
-
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, size_t *pgsize)
{
pgd_t *pgdp = pgd_offset(mm, addr);
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
*pgsize = PAGE_SIZE;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
if ((pte_t *)pmdp == ptep) {
*pgsize = PMD_SIZE;
@@ -87,8 +126,11 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = size;
switch (size) {
-#ifdef CONFIG_ARM64_4K_PAGES
+#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
+ if (pud_sect_supported())
+ contig_ptes = 1;
+ break;
#endif
case PMD_SIZE:
contig_ptes = 1;
@@ -106,6 +148,28 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
return contig_ptes;
}
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ int ncontig, i;
+ size_t pgsize;
+ pte_t orig_pte = ptep_get(ptep);
+
+ if (!pte_present(orig_pte) || !pte_cont(orig_pte))
+ return orig_pte;
+
+ ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
+ for (i = 0; i < ncontig; i++, ptep++) {
+ pte_t pte = ptep_get(ptep);
+
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
+ }
+ return orig_pte;
+}
+
/*
* Changing some bits of contiguous entries requires us to follow a
* Break-Before-Make approach, breaking the whole contiguous set
@@ -114,15 +178,14 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*
* This helper performs the break step.
*/
-static pte_t get_clear_flush(struct mm_struct *mm,
+static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pgsize,
unsigned long ncontig)
{
- pte_t orig_pte = huge_ptep_get(ptep);
- bool valid = pte_valid(orig_pte);
- unsigned long i, saddr = addr;
+ pte_t orig_pte = ptep_get(ptep);
+ unsigned long i;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
@@ -138,11 +201,19 @@ static pte_t get_clear_flush(struct mm_struct *mm,
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
+ return orig_pte;
+}
- if (valid) {
- struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
- flush_tlb_range(&vma, saddr, addr);
- }
+static pte_t get_clear_contig_flush(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep,
+ unsigned long pgsize,
+ unsigned long ncontig)
+{
+ pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
+ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+
+ flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
return orig_pte;
}
@@ -165,13 +236,13 @@ static void clear_flush(struct mm_struct *mm,
unsigned long i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
- pte_clear(mm, addr, ptep);
+ ptep_clear(mm, addr, ptep);
flush_tlb_range(&vma, saddr, addr);
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
size_t pgsize;
int i;
@@ -179,18 +250,19 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
unsigned long pfn, dpfn;
pgprot_t hugeprot;
- /*
- * Code needs to be expanded to handle huge swap and migration
- * entries. Needed for HUGETLB and MEMORY_FAILURE.
- */
- WARN_ON(!pte_present(pte));
+ ncontig = num_contig_ptes(sz, &pgsize);
+
+ if (!pte_present(pte)) {
+ for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
+ set_pte_at(mm, addr, ptep, pte);
+ return;
+ }
if (!pte_cont(pte)) {
set_pte_at(mm, addr, ptep, pte);
return;
}
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
pfn = pte_pfn(pte);
dpfn = pgsize >> PAGE_SHIFT;
hugeprot = pte_pgprot(pte);
@@ -201,28 +273,18 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
-void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
- int i, ncontig;
- size_t pgsize;
-
- ncontig = num_contig_ptes(sz, &pgsize);
-
- for (i = 0; i < ncontig; i++, ptep++)
- set_pte(ptep, pte);
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep = NULL;
pgdp = pgd_offset(mm, addr);
- pudp = pud_alloc(mm, pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_alloc(mm, p4dp, addr);
if (!pudp)
return NULL;
@@ -230,20 +292,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
ptep = (pte_t *)pudp;
} else if (sz == (CONT_PTE_SIZE)) {
pmdp = pmd_alloc(mm, pudp, addr);
+ if (!pmdp)
+ return NULL;
WARN_ON(addr & (sz - 1));
- /*
- * Note that if this code were ever ported to the
- * 32-bit arm platform then it will cause trouble in
- * the case where CONFIG_HIGHPTE is set, since there
- * will be no pte_unmap() to correspond with this
- * pte_alloc_map().
- */
- ptep = pte_alloc_map(mm, pmdp, addr);
+ ptep = pte_alloc_huge(mm, pmdp, addr);
} else if (sz == PMD_SIZE) {
- if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
- pud_none(READ_ONCE(*pudp)))
- ptep = huge_pmd_share(mm, addr, pudp);
+ if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
+ ptep = huge_pmd_share(mm, vma, addr, pudp);
else
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
} else if (sz == (CONT_PMD_SIZE)) {
@@ -259,6 +315,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
@@ -266,7 +323,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
if (!pgd_present(READ_ONCE(*pgdp)))
return NULL;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ if (!p4d_present(READ_ONCE(*p4dp)))
+ return NULL;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
if (sz != PUD_SIZE && pud_none(pud))
return NULL;
@@ -287,16 +348,38 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return (pte_t *)pmdp;
if (sz == CONT_PTE_SIZE)
- return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
+ return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
return NULL;
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+ unsigned long hp_size = huge_page_size(h);
+
+ switch (hp_size) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ return PGDIR_SIZE - PUD_SIZE;
+#endif
+ case CONT_PMD_SIZE:
+ return PUD_SIZE - CONT_PMD_SIZE;
+ case PMD_SIZE:
+ return PUD_SIZE - PMD_SIZE;
+ case CONT_PTE_SIZE:
+ return PMD_SIZE - CONT_PTE_SIZE;
+ default:
+ break;
+ }
+
+ return 0UL;
+}
+
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- size_t pagesize = huge_page_size(hstate_vma(vma));
+ size_t pagesize = 1UL << shift;
+ entry = pte_mkhuge(entry);
if (pagesize == CONT_PTE_SIZE) {
entry = pte_mkcont(entry);
} else if (pagesize == CONT_PMD_SIZE) {
@@ -325,14 +408,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
{
int ncontig;
size_t pgsize;
- pte_t orig_pte = huge_ptep_get(ptep);
+ pte_t orig_pte = ptep_get(ptep);
if (!pte_cont(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
/*
@@ -348,11 +431,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
int i;
- if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+ if (pte_write(pte) != pte_write(ptep_get(ptep)))
return 1;
for (i = 0; i < ncontig; i++) {
- pte_t orig_pte = huge_ptep_get(ptep + i);
+ pte_t orig_pte = ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte))
return 1;
@@ -371,19 +454,20 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int ncontig, i;
size_t pgsize = 0;
unsigned long pfn = pte_pfn(pte), dpfn;
+ struct mm_struct *mm = vma->vm_mm;
pgprot_t hugeprot;
pte_t orig_pte;
if (!pte_cont(pte))
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
- ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
if (!__cont_access_flags_changed(ptep, pte, ncontig))
return 0;
- orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
/* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
@@ -394,7 +478,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
hugeprot = pte_pgprot(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
- set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
+ set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
return 1;
}
@@ -416,7 +500,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
- pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
pte = pte_wrprotect(pte);
hugeprot = pte_pgprot(pte);
@@ -426,59 +510,56 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
-void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
+ struct mm_struct *mm = vma->vm_mm;
size_t pgsize;
int ncontig;
- if (!pte_cont(READ_ONCE(*ptep))) {
- ptep_clear_flush(vma, addr, ptep);
- return;
- }
+ if (!pte_cont(READ_ONCE(*ptep)))
+ return ptep_clear_flush(vma, addr, ptep);
- ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
- clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
-}
-
-static void __init add_huge_page_size(unsigned long size)
-{
- if (size_to_hstate(size))
- return;
-
- hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
}
static int __init hugetlbpage_init(void)
{
-#ifdef CONFIG_ARM64_4K_PAGES
- add_huge_page_size(PUD_SIZE);
-#endif
- add_huge_page_size(CONT_PMD_SIZE);
- add_huge_page_size(PMD_SIZE);
- add_huge_page_size(CONT_PTE_SIZE);
+ if (pud_sect_supported())
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+
+ hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
+ hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
+ hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
return 0;
}
arch_initcall(hugetlbpage_init);
-static __init int setup_hugepagesz(char *opt)
+bool __init arch_hugetlb_valid_size(unsigned long size)
{
- unsigned long ps = memparse(opt, &opt);
+ return __hugetlb_valid_size(size);
+}
- switch (ps) {
-#ifdef CONFIG_ARM64_4K_PAGES
- case PUD_SIZE:
-#endif
- case CONT_PMD_SIZE:
- case PMD_SIZE:
- case CONT_PTE_SIZE:
- add_huge_page_size(ps);
- return 1;
+pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
+ /*
+ * Break-before-make (BBM) is required for all user space mappings
+ * when the permission changes from executable to non-executable
+ * in cases where cpu is affected with errata #2645198.
+ */
+ if (pte_user_exec(READ_ONCE(*ptep)))
+ return huge_ptep_clear_flush(vma, addr, ptep);
}
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
- hugetlb_bad_size();
- pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
- return 0;
+void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
+{
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
-__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index b65dffdfb201..74c1db8ce271 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -16,24 +16,28 @@
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
+#include <linux/math.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/dma-direct.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/efi.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
+#include <linux/hugetlb.h>
+#include <linux/acpi_iort.h>
+#include <linux/kmemleak.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_host.h>
#include <asm/memory.h>
#include <asm/numa.h>
#include <asm/sections.h>
@@ -41,8 +45,7 @@
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
-
-#define ARM64_ZONE_DMA_BITS 30
+#include <asm/xen/swiotlb-xen.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
@@ -53,232 +56,120 @@
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
-s64 physvirt_offset __ro_after_init;
-EXPORT_SYMBOL(physvirt_offset);
-
-struct page *vmemmap __ro_after_init;
-EXPORT_SYMBOL(vmemmap);
-
/*
- * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
- * memory as some devices, namely the Raspberry Pi 4, have peripherals with
- * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
- * bit addressable memory area.
+ * If the corresponding config options are enabled, we create both ZONE_DMA
+ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
+ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
+ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
+ * otherwise it is empty.
*/
-phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
+phys_addr_t __ro_after_init arm64_dma_phys_limit;
-#ifdef CONFIG_KEXEC_CORE
/*
- * reserve_crashkernel() - reserves memory for crash kernel
- *
- * This function reserves memory area given in "crashkernel=" kernel command
- * line parameter. The memory reserved is used by dump capture kernel when
- * primary kernel is crashing.
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
*/
-static void __init reserve_crashkernel(void)
-{
- unsigned long long crash_base, crash_size;
- int ret;
-
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
- &crash_size, &crash_base);
- /* no crashkernel= or invalid value specified */
- if (ret || !crash_size)
- return;
-
- crash_size = PAGE_ALIGN(crash_size);
-
- if (crash_base == 0) {
- /* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
- crash_size, SZ_2M);
- if (crash_base == 0) {
- pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
- crash_size);
- return;
- }
- } else {
- /* User specifies base address explicitly. */
- if (!memblock_is_region_memory(crash_base, crash_size)) {
- pr_warn("cannot reserve crashkernel: region is not memory\n");
- return;
- }
-
- if (memblock_is_region_reserved(crash_base, crash_size)) {
- pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
- return;
- }
-
- if (!IS_ALIGNED(crash_base, SZ_2M)) {
- pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
- return;
- }
- }
- memblock_reserve(crash_base, crash_size);
-
- pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- crash_base, crash_base + crash_size, crash_size >> 20);
-
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
-}
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ARM64_MEMSTART_SHIFT PUD_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
#else
-static void __init reserve_crashkernel(void)
-{
-}
-#endif /* CONFIG_KEXEC_CORE */
-
-#ifdef CONFIG_CRASH_DUMP
-static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
- const char *uname, int depth, void *data)
-{
- const __be32 *reg;
- int len;
-
- if (depth != 1 || strcmp(uname, "chosen") != 0)
- return 0;
-
- reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
- if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
- return 1;
-
- elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
- elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
- return 1;
-}
+#define ARM64_MEMSTART_SHIFT PMD_SHIFT
+#endif
/*
- * reserve_elfcorehdr() - reserves memory for elf core header
- *
- * This function reserves the memory occupied by an elf core header
- * described in the device tree. This region contains all the
- * information about primary kernel's core image and is used by a dump
- * capture kernel to access the system memory on primary kernel.
+ * sparsemem vmemmap imposes an additional requirement on the alignment of
+ * memstart_addr, due to the fact that the base of the vmemmap region
+ * has a direct correspondence, and needs to appear sufficiently aligned
+ * in the virtual address space.
*/
-static void __init reserve_elfcorehdr(void)
+#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
+#else
+#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
+#endif
+
+static void __init arch_reserve_crashkernel(void)
{
- of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
+ unsigned long long low_size = 0;
+ unsigned long long crash_base, crash_size;
+ char *cmdline = boot_command_line;
+ bool high = false;
+ int ret;
- if (!elfcorehdr_size)
+ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
- pr_warn("elfcorehdr is overlapped\n");
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ &crash_size, &crash_base,
+ &low_size, &high);
+ if (ret)
return;
- }
-
- memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
- pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
- elfcorehdr_size >> 10, elfcorehdr_addr);
-}
-#else
-static void __init reserve_elfcorehdr(void)
-{
+ reserve_crashkernel_generic(cmdline, crash_size, crash_base,
+ low_size, high);
}
-#endif /* CONFIG_CRASH_DUMP */
/*
- * Return the maximum physical address for a zone with a given address size
- * limit. It currently assumes that for memory starting above 4G, 32-bit
- * devices will use a DMA offset.
+ * Return the maximum physical address for a zone accessible by the given bits
+ * limit. If DRAM starts above 32-bit, expand the zone to the maximum
+ * available memory, otherwise cap it at 32-bit.
*/
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{
- phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits);
- return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
-}
-
-#ifdef CONFIG_NUMA
+ phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
+ phys_addr_t phys_start = memblock_start_of_DRAM();
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+ if (phys_start > U32_MAX)
+ zone_mask = PHYS_ADDR_MAX;
+ else if (phys_start > zone_mask)
+ zone_mask = U32_MAX;
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
-#endif
-#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
-#endif
- max_zone_pfns[ZONE_NORMAL] = max;
-
- free_area_init_nodes(max_zone_pfns);
+ return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
}
-#else
-
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
+static void __init zone_sizes_init(void)
{
- struct memblock_region *reg;
- unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long __maybe_unused max_dma, max_dma32;
-
- memset(zone_size, 0, sizeof(zone_size));
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+ unsigned int __maybe_unused acpi_zone_dma_bits;
+ unsigned int __maybe_unused dt_zone_dma_bits;
+ phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
- max_dma = max_dma32 = min;
#ifdef CONFIG_ZONE_DMA
- max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit);
- zone_size[ZONE_DMA] = max_dma - min;
+ acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
+ dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
+ zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
+ arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
+ max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
- max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
- zone_size[ZONE_DMA32] = max_dma32 - max_dma;
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = dma32_phys_limit;
#endif
- zone_size[ZONE_NORMAL] = max - max_dma32;
-
- memcpy(zhole_size, zone_size, sizeof(zhole_size));
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = PHYS_MASK + 1;
+ max_zone_pfns[ZONE_NORMAL] = max_pfn;
- for_each_memblock(memory, reg) {
- unsigned long start = memblock_region_memory_base_pfn(reg);
- unsigned long end = memblock_region_memory_end_pfn(reg);
-
-#ifdef CONFIG_ZONE_DMA
- if (start >= min && start < max_dma) {
- unsigned long dma_end = min(end, max_dma);
- zhole_size[ZONE_DMA] -= dma_end - start;
- start = dma_end;
- }
-#endif
-#ifdef CONFIG_ZONE_DMA32
- if (start >= max_dma && start < max_dma32) {
- unsigned long dma32_end = min(end, max_dma32);
- zhole_size[ZONE_DMA32] -= dma32_end - start;
- start = dma32_end;
- }
-#endif
- if (start >= max_dma32 && start < max) {
- unsigned long normal_end = min(end, max);
- zhole_size[ZONE_NORMAL] -= normal_end - start;
- }
- }
-
- free_area_init_node(0, zone_size, min, zhole_size);
+ free_area_init(max_zone_pfns);
}
-#endif /* CONFIG_NUMA */
-
-int pfn_valid(unsigned long pfn)
+int pfn_is_map_memory(unsigned long pfn)
{
- phys_addr_t addr = pfn << PAGE_SHIFT;
+ phys_addr_t addr = PFN_PHYS(pfn);
- if ((addr >> PAGE_SHIFT) != pfn)
+ /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
+ if (PHYS_PFN(addr) != pfn)
return 0;
-#ifdef CONFIG_SPARSEMEM
- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
- return 0;
-
- if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn))))
- return 0;
-#endif
return memblock_is_map_memory(addr);
}
-EXPORT_SYMBOL(pfn_valid);
+EXPORT_SYMBOL(pfn_is_map_memory);
-static phys_addr_t memory_limit = PHYS_ADDR_MAX;
+static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
/*
* Limit the memory size that was specified via FDT.
@@ -295,44 +186,23 @@ static int __init early_mem(char *p)
}
early_param("mem", early_mem);
-static int __init early_init_dt_scan_usablemem(unsigned long node,
- const char *uname, int depth, void *data)
-{
- struct memblock_region *usablemem = data;
- const __be32 *reg;
- int len;
-
- if (depth != 1 || strcmp(uname, "chosen") != 0)
- return 0;
-
- reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
- if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
- return 1;
-
- usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
- usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
- return 1;
-}
-
-static void __init fdt_enforce_memory_region(void)
-{
- struct memblock_region reg = {
- .size = 0,
- };
-
- of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
-
- if (reg.size)
- memblock_cap_memory_range(reg.base, reg.size);
-}
-
void __init arm64_memblock_init(void)
{
- const s64 linear_region_size = BIT(vabits_actual - 1);
+ s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
- /* Handle linux,usable-memory-range property */
- fdt_enforce_memory_region();
+ /*
+ * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
+ * be limited in their ability to support a linear map that exceeds 51
+ * bits of VA space, depending on the placement of the ID map. Given
+ * that the placement of the ID map may be randomized, let's simply
+ * limit the kernel's linear map to 51 bits as well if we detect this
+ * configuration.
+ */
+ if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
+ is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+ pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
+ linear_region_size = min_t(u64, linear_region_size, BIT(51));
+ }
/* Remove memory above our supported physical address size */
memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
@@ -343,19 +213,8 @@ void __init arm64_memblock_init(void)
memstart_addr = round_down(memblock_start_of_DRAM(),
ARM64_MEMSTART_ALIGN);
- physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
-
- vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
-
- /*
- * If we are running with a 52-bit kernel VA config on a system that
- * does not support it, we have to offset our vmemmap and physvirt_offset
- * s.t. we avoid the 52-bit portion of the direct linear map
- */
- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
- vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
- physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
- }
+ if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
+ pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
/*
* Remove the memory that we will not be able to cover with the
@@ -372,6 +231,16 @@ void __init arm64_memblock_init(void)
}
/*
+ * If we are running with a 52-bit kernel VA config on a system that
+ * does not support it, we have to place the available physical
+ * memory in the 48-bit addressable part of the linear region, i.e.,
+ * we have to move it upward. Since memstart_addr represents the
+ * physical address of PAGE_OFFSET, we have to *subtract* from it.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
+ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
+
+ /*
* Apply the memory limit if it was set. Since the kernel may be loaded
* high up in memory, add back the kernel region that must be accessible
* via the linear mapping.
@@ -404,23 +273,26 @@ void __init arm64_memblock_init(void)
"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
phys_initrd_size = 0;
} else {
- memblock_remove(base, size); /* clear MEMBLOCK_ flags */
memblock_add(base, size);
+ memblock_clear_nomap(base, size);
memblock_reserve(base, size);
}
}
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
- u64 range = linear_region_size -
- (memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ int parange = cpuid_feature_extract_unsigned_field(
+ mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ s64 range = linear_region_size -
+ BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
/*
* If the size of the linear region exceeds, by a sufficient
- * margin, the size of the region that the available physical
- * memory spans, randomize the linear region as well.
+ * margin, the size of the region that the physical memory can
+ * span, randomize the linear region as well.
*/
- if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+ if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
range /= ARM64_MEMSTART_ALIGN;
memstart_addr -= ARM64_MEMSTART_ALIGN *
((range * memstart_offset_seed) >> 16);
@@ -431,7 +303,7 @@ void __init arm64_memblock_init(void)
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
- memblock_reserve(__pa_symbol(_text), _end - _text);
+ memblock_reserve(__pa_symbol(_stext), _end - _stext);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
/* the generic initrd code expects virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
@@ -440,23 +312,7 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (IS_ENABLED(CONFIG_ZONE_DMA)) {
- zone_dma_bits = ARM64_ZONE_DMA_BITS;
- arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
- }
-
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- arm64_dma32_phys_limit = max_zone_phys(32);
- else
- arm64_dma32_phys_limit = PHYS_MASK + 1;
-
- reserve_crashkernel();
-
- reserve_elfcorehdr();
-
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
-
- dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -471,86 +327,39 @@ void __init bootmem_init(void)
max_pfn = max_low_pfn = max;
min_low_pfn = min;
- arm64_numa_init();
+ arch_numa_init();
+
/*
- * Sparsemem tries to allocate bootmem in memory_present(), so must be
- * done after the fixed reservations.
+ * must be done after arch_numa_init() which calls numa_init() to
+ * initialize node_online_map that gets used in hugetlb_cma_reserve()
+ * while allocating required CMA size across online nodes.
*/
- memblocks_present();
-
- sparse_init();
- zone_sizes_init(min, max);
-
- memblock_dump_all();
-}
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
+ arm64_hugetlb_cma_reserve();
+#endif
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
-static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
-{
- struct page *start_pg, *end_pg;
- unsigned long pg, pgend;
+ kvm_hyp_reserve();
/*
- * Convert start_pfn/end_pfn to a struct page pointer.
+ * sparse_init() tries to allocate memory from memblock, so must be
+ * done after the fixed reservations
*/
- start_pg = pfn_to_page(start_pfn - 1) + 1;
- end_pg = pfn_to_page(end_pfn - 1) + 1;
+ sparse_init();
+ zone_sizes_init();
/*
- * Convert to physical addresses, and round start upwards and end
- * downwards.
+ * Reserve the CMA area after arm64_dma_phys_limit was initialised.
*/
- pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
- pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
+ dma_contiguous_reserve(arm64_dma_phys_limit);
/*
- * If there are free pages between these, free the section of the
- * memmap array.
+ * request_standard_resources() depends on crashkernel's memory being
+ * reserved, so do it here.
*/
- if (pg < pgend)
- memblock_free(pg, pgend - pg);
-}
-
-/*
- * The mem_map array can get very big. Free the unused area of the memory map.
- */
-static void __init free_unused_memmap(void)
-{
- unsigned long start, prev_end = 0;
- struct memblock_region *reg;
-
- for_each_memblock(memory, reg) {
- start = __phys_to_pfn(reg->base);
-
-#ifdef CONFIG_SPARSEMEM
- /*
- * Take care not to free memmap entries that don't exist due
- * to SPARSEMEM sections which aren't present.
- */
- start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
- /*
- * If we had a previous bank, and there is a space between the
- * current bank and the previous, free it.
- */
- if (prev_end && prev_end < start)
- free_memmap(prev_end, start);
-
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
- MAX_ORDER_NR_PAGES);
- }
+ arch_reserve_crashkernel();
-#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
- free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
+ memblock_dump_all();
}
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
/*
* mem_init() marks the free areas in the mem_map and tells us how much memory
@@ -559,22 +368,24 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
- if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
- swiotlb_init(1);
- else
- swiotlb_force = SWIOTLB_NO_FORCE;
+ bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
+
+ if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
+ /*
+ * If no bouncing needed for ZONE_DMA, reduce the swiotlb
+ * buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
+ */
+ unsigned long size =
+ DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
+ swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
+ swiotlb = true;
+ }
- set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
+ swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
- free_unused_memmap();
-#endif
/* this will put all unused low memory onto the freelists */
memblock_free_all();
- mem_init_print_info(NULL);
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
@@ -583,6 +394,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
+ /*
+ * Selected page table levels should match when derived from
+ * scratch using the virtual address range and page size.
+ */
+ BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+ CONFIG_PGTABLE_LEVELS);
+
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
@@ -603,30 +421,14 @@ void free_initmem(void)
* prevents the region from being reused for kernel modules, which
* is not supported by kallsyms.
*/
- unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
+ vunmap_range((u64)__init_begin, (u64)__init_end);
}
-/*
- * Dump out memory limit information on panic.
- */
-static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
+void dump_mem_limit(void)
{
if (memory_limit != PHYS_ADDR_MAX) {
pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
} else {
pr_emerg("Memory Limit: none\n");
}
- return 0;
-}
-
-static struct notifier_block mem_limit_notifier = {
- .notifier_call = dump_mem_limit,
-};
-
-static int __init register_mem_limit_dumper(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &mem_limit_notifier);
- return 0;
}
-__initcall(register_mem_limit_dumper);
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 9be71bee902c..269f2f63ab7d 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -1,97 +1,24 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Based on arch/arm/mm/ioremap.c
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- * Hacked for ARM by Phil Blundell <philb@gnu.org>
- * Hacked to allow all architectures to build, and various cleanups
- * by Russell King
- * Copyright (C) 2012 ARM Ltd.
- */
-#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/io.h>
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
-
-static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
- pgprot_t prot, void *caller)
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
{
- unsigned long last_addr;
- unsigned long offset = phys_addr & ~PAGE_MASK;
- int err;
- unsigned long addr;
- struct vm_struct *area;
-
- /*
- * Page align the mapping address and size, taking account of any
- * offset.
- */
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(size + offset);
-
- /*
- * Don't allow wraparound, zero size or outside PHYS_MASK.
- */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
- return NULL;
+ unsigned long last_addr = phys_addr + size - 1;
- /*
- * Don't allow RAM to be mapped.
- */
- if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
+ /* Don't allow outside PHYS_MASK */
+ if (last_addr & ~PHYS_MASK)
return NULL;
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
+ /* Don't allow RAM to be mapped. */
+ if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
return NULL;
- addr = (unsigned long)area->addr;
- area->phys_addr = phys_addr;
- err = ioremap_page_range(addr, addr + size, phys_addr, prot);
- if (err) {
- vunmap((void *)addr);
- return NULL;
- }
-
- return (void __iomem *)(offset + addr);
-}
-
-void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
-{
- return __ioremap_caller(phys_addr, size, prot,
- __builtin_return_address(0));
+ return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
}
-EXPORT_SYMBOL(__ioremap);
-
-void iounmap(volatile void __iomem *io_addr)
-{
- unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
-
- /*
- * We could get an address outside vmalloc range in case
- * of ioremap_cache() reusing a RAM mapping.
- */
- if (is_vmalloc_addr((void *)addr))
- vunmap((void *)addr);
-}
-EXPORT_SYMBOL(iounmap);
-
-void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
-{
- /* For normal memory we already have a cacheable mapping. */
- if (pfn_valid(__phys_to_pfn(phys_addr)))
- return (void __iomem *)__phys_to_virt(phys_addr);
-
- return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ioremap_cache);
+EXPORT_SYMBOL(ioremap_prot);
/*
* Must be called after early_fixmap_init
@@ -100,3 +27,11 @@ void __init early_ioremap_init(void)
{
early_ioremap_setup();
}
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long pfn = PHYS_PFN(offset);
+
+ return pfn_is_map_memory(pfn);
+}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index f87a32484ea8..4c7ad574b946 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -18,10 +18,11 @@
#include <asm/kernel-pgtable.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
/*
@@ -35,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_KASAN, node);
+ MEMBLOCK_ALLOC_NOLEAKTRACE, node);
if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node,
@@ -48,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
{
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_KASAN, node);
+ MEMBLOCK_ALLOC_NOLEAKTRACE,
+ node);
if (!p)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
__func__, PAGE_SIZE, PAGE_SIZE, node,
@@ -78,23 +80,23 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd)
: kasan_alloc_zeroed_page(node);
- __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
}
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
}
-static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
+static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
bool early)
{
- if (pgd_none(READ_ONCE(*pgdp))) {
+ if (p4d_none(READ_ONCE(*p4dp))) {
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud)
: kasan_alloc_zeroed_page(node);
- __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
+ __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
}
- return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
+ return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
}
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
@@ -126,11 +128,11 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
}
-static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
+static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
+ pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
do {
next = pud_addr_end(addr, end);
@@ -138,6 +140,18 @@ static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
}
+static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+
+ do {
+ next = p4d_addr_end(addr, end);
+ kasan_pud_populate(p4dp, addr, next, node, early);
+ } while (p4dp++, addr = next, addr != end);
+}
+
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
int node, bool early)
{
@@ -147,7 +161,7 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
pgdp = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- kasan_pud_populate(pgdp, addr, next, node, early);
+ kasan_p4d_populate(pgdp, addr, next, node, early);
} while (pgdp++, addr = next, addr != end);
}
@@ -156,6 +170,11 @@ asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
+ /*
+ * We cannot check the actual value of KASAN_SHADOW_START during build,
+ * as it depends on vabits_actual. As a best-effort approach, check
+ * potential values calculated based on VA_BITS and VA_BITS_MIN.
+ */
BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
@@ -179,7 +198,7 @@ void __init kasan_copy_shadow(pgd_t *pgdir)
pgdp = pgd_offset_k(KASAN_SHADOW_START);
pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
- pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+ pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
do {
set_pgd(pgdp_new, READ_ONCE(*pgdp));
} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
@@ -197,18 +216,20 @@ static void __init clear_pgds(unsigned long start,
set_pgd(pgd_offset_k(start), __pgd(0));
}
-void __init kasan_init(void)
+static void __init kasan_init_shadow(void)
{
u64 kimg_shadow_start, kimg_shadow_end;
- u64 mod_shadow_start, mod_shadow_end;
- struct memblock_region *reg;
- int i;
+ u64 mod_shadow_start;
+ u64 vmalloc_shadow_end;
+ phys_addr_t pa_start, pa_end;
+ u64 i;
- kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
- kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
+ kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
+ kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
- mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
+
+ vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
/*
* We are going to perform proper setup of shadow memory.
@@ -219,25 +240,23 @@ void __init kasan_init(void)
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
dsb(ishst);
- cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
+ cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
- early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
+ early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
(void *)mod_shadow_start);
- kasan_populate_early_shadow((void *)kimg_shadow_end,
- (void *)KASAN_SHADOW_END);
- if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_early_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ BUILD_BUG_ON(VMALLOC_START != MODULES_END);
+ kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+ (void *)KASAN_SHADOW_END);
- for_each_memblock(memory, reg) {
- void *start = (void *)__phys_to_virt(reg->base);
- void *end = (void *)__phys_to_virt(reg->base + reg->size);
+ for_each_mem_range(i, &pa_start, &pa_end) {
+ void *start = (void *)__phys_to_virt(pa_start);
+ void *end = (void *)__phys_to_virt(pa_end);
if (start >= end)
break;
@@ -257,9 +276,42 @@ void __init kasan_init(void)
PAGE_KERNEL_RO));
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
+}
- /* At this point kasan is fully initialized. Enable error messages */
+static void __init kasan_init_depth(void)
+{
init_task.kasan_depth = 0;
- pr_info("KernelAddressSanitizer initialized\n");
}
+
+#ifdef CONFIG_KASAN_VMALLOC
+void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
+{
+ unsigned long shadow_start, shadow_end;
+
+ if (!is_vmalloc_or_module_addr(start))
+ return;
+
+ shadow_start = (unsigned long)kasan_mem_to_shadow(start);
+ shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
+ shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
+ shadow_end = ALIGN(shadow_end, PAGE_SIZE);
+ kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE);
+}
+#endif
+
+void __init kasan_init(void)
+{
+ kasan_init_shadow();
+ kasan_init_depth();
+#if defined(CONFIG_KASAN_GENERIC)
+ /*
+ * Generic KASAN is now fully initialized.
+ * Software and Hardware Tag-Based modes still require
+ * kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly.
+ */
+ pr_info("KernelAddressSanitizer initialized (generic)\n");
+#endif
+}
+
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 3028bacbc4e9..645fe60d000f 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -5,20 +5,34 @@
* Copyright (C) 2012 ARM Ltd.
*/
-#include <linux/elf.h>
-#include <linux/fs.h>
+#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/export.h>
-#include <linux/shm.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
-#include <linux/io.h>
-#include <linux/personality.h>
-#include <linux/random.h>
+#include <linux/types.h>
+
+#include <asm/cpufeature.h>
+#include <asm/page.h>
-#include <asm/cputype.h>
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_READONLY,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
@@ -48,23 +62,42 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
}
-#ifdef CONFIG_STRICT_DEVMEM
-
-#include <linux/ioport.h>
-
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number. We mimic x86 here by
- * disallowing access to system RAM as well as device-exclusive MMIO regions.
- * This effectively disable read()/write() on /dev/mem.
- */
-int devmem_is_allowed(unsigned long pfn)
+static int __init adjust_protection_map(void)
{
- if (iomem_is_exclusive(pfn << PAGE_SHIFT))
- return 0;
- if (!page_is_ram(pfn))
- return 1;
+ /*
+ * With Enhanced PAN we can honour the execute-only permissions as
+ * there is no PAN override with such mappings.
+ */
+ if (cpus_have_cap(ARM64_HAS_EPAN)) {
+ protection_map[VM_EXEC] = PAGE_EXECONLY;
+ protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
+ }
+
return 0;
}
+arch_initcall(adjust_protection_map);
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ pteval_t prot = pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+
+ if (vm_flags & VM_ARM64_BTI)
+ prot |= PTE_GP;
-#endif
+ /*
+ * There are two conditions required for returning a Normal Tagged
+ * memory type: (1) the user requested it via PROT_MTE passed to
+ * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+ * register (1) as VM_MTE in the vma->vm_flags and (2) as
+ * VM_MTE_ALLOWED. Note that the latter can only be set during the
+ * mmap() call since mprotect() does not accept MAP_* flags.
+ * Checking for VM_MTE only is sufficient since arch_validate_flags()
+ * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+ */
+ if (vm_flags & VM_MTE)
+ prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5a3b15a14a7f..1ac7467d34c9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -17,10 +17,14 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
+#include <linux/memremap.h>
+#include <linux/memory.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/set_memory.h>
+#include <linux/kfence.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -34,19 +38,31 @@
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm/kfence.h>
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
+#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
-u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+int idmap_t0sz __ro_after_init;
-u64 __section(".mmuoff.data.write") vabits_actual;
+#if VA_BITS > 48
+u64 vabits_actual __ro_after_init = VA_BITS_MIN;
EXPORT_SYMBOL(vabits_actual);
+#endif
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
+u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 };
+
+/*
+ * The booting CPU updates the failed status @__early_cpu_boot_status,
+ * with MMU turned off.
+ */
+long __section(".mmuoff.data.write") __early_cpu_boot_status;
+
/*
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
@@ -54,11 +70,8 @@ EXPORT_SYMBOL(kimage_voffset);
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
-
static DEFINE_SPINLOCK(swapper_pgdir_lock);
+static DEFINE_MUTEX(fixmap_lock);
void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
{
@@ -79,7 +92,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
- if (!pfn_valid(pfn))
+ if (!pfn_is_map_memory(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
@@ -92,7 +105,8 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
phys_addr_t phys;
void *ptr;
- phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
+ MEMBLOCK_ALLOC_NOLEAKTRACE);
if (!phys)
panic("Failed to allocate page table page\n");
@@ -114,18 +128,22 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
return phys;
}
-static bool pgattr_change_is_safe(u64 old, u64 new)
+bool pgattr_change_is_safe(u64 old, u64 new)
{
/*
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
- static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+ pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
/* creating or taking down mappings is always safe */
- if (old == 0 || new == 0)
+ if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
return true;
+ /* A live entry's pfn should not change */
+ if (pte_pfn(__pte(old)) != pte_pfn(__pte(new)))
+ return false;
+
/* live contiguous mappings may not be manipulated at all */
if ((old | new) & PTE_CONT)
return false;
@@ -134,6 +152,17 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
if (old & ~new & PTE_NG)
return false;
+ /*
+ * Changing the memory type between Normal and Normal-Tagged is safe
+ * since Tagged is considered a permission attribute from the
+ * mismatched attribute aliases perspective.
+ */
+ if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+ (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
+ ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+ (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
+ mask |= PTE_ATTRINDX_MASK;
+
return ((old ^ new) & ~mask) == 0;
}
@@ -172,10 +201,14 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
BUG_ON(pmd_sect(pmd));
if (pmd_none(pmd)) {
+ pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
phys_addr_t pte_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ pmdval |= PMD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pte_phys = pgtable_alloc(PAGE_SHIFT);
- __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+ __pmd_populate(pmdp, pte_phys, pmdval);
pmd = READ_ONCE(*pmdp);
}
BUG_ON(pmd_bad(pmd));
@@ -210,7 +243,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ if (((addr | next | phys) & ~PMD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pmd_set_huge(pmdp, phys, prot);
@@ -246,10 +279,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
*/
BUG_ON(pud_sect(pud));
if (pud_none(pud)) {
+ pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
phys_addr_t pmd_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ pudval |= PUD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pmd_phys = pgtable_alloc(PMD_SHIFT);
- __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, pudval);
pud = READ_ONCE(*pudp);
}
BUG_ON(pud_bad(pud));
@@ -270,18 +307,6 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
} while (addr = next, addr != end);
}
-static inline bool use_1G_block(unsigned long addr, unsigned long next,
- unsigned long phys)
-{
- if (PAGE_SHIFT != 12)
- return false;
-
- if (((addr | next | phys) & ~PUD_MASK) != 0)
- return false;
-
- return true;
-}
-
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int),
@@ -289,18 +314,23 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
{
unsigned long next;
pud_t *pudp;
- pgd_t pgd = READ_ONCE(*pgdp);
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t p4d = READ_ONCE(*p4dp);
- if (pgd_none(pgd)) {
+ if (p4d_none(p4d)) {
+ p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
phys_addr_t pud_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ p4dval |= P4D_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pud_phys = pgtable_alloc(PUD_SHIFT);
- __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
- pgd = READ_ONCE(*pgdp);
+ __p4d_populate(p4dp, pud_phys, p4dval);
+ p4d = READ_ONCE(*p4dp);
}
- BUG_ON(pgd_bad(pgd));
+ BUG_ON(p4d_bad(p4d));
- pudp = pud_set_fixmap_offset(pgdp, addr);
+ pudp = pud_set_fixmap_offset(p4dp, addr);
do {
pud_t old_pud = READ_ONCE(*pudp);
@@ -309,7 +339,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (use_1G_block(addr, next, phys) &&
+ if (pud_sect_supported() &&
+ ((addr | next | phys) & ~PUD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pud_set_huge(pudp, phys, prot);
@@ -332,14 +363,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
pud_clear_fixmap();
}
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
- int flags)
+static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags)
{
unsigned long addr, end, next;
- pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
+ pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
/*
* If the virtual and physical address don't have the same offset
@@ -360,6 +391,25 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
} while (pgdp++, addr = next, addr != end);
}
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags)
+{
+ mutex_lock(&fixmap_lock);
+ __create_pgd_mapping_locked(pgdir, phys, virt, size, prot,
+ pgtable_alloc, flags);
+ mutex_unlock(&fixmap_lock);
+}
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+extern __alias(__create_pgd_mapping_locked)
+void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int), int flags);
+#endif
+
static phys_addr_t __pgd_pgtable_alloc(int shift)
{
void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
@@ -373,6 +423,7 @@ static phys_addr_t __pgd_pgtable_alloc(int shift)
static phys_addr_t pgd_pgtable_alloc(int shift)
{
phys_addr_t pa = __pgd_pgtable_alloc(shift);
+ struct ptdesc *ptdesc = page_ptdesc(phys_to_page(pa));
/*
* Call proper page table ctor in case later we need to
@@ -380,12 +431,12 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
* this pre-allocated page table.
*
* We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
- * folded, and if so pgtable_pmd_page_ctor() becomes nop.
+ * folded, and if so pagetable_pte_ctor() becomes nop.
*/
if (shift == PAGE_SHIFT)
- BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
+ BUG_ON(!pagetable_pte_ctor(ptdesc));
else if (shift == PMD_SHIFT)
- BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
+ BUG_ON(!pagetable_pmd_ctor(ptdesc));
return pa;
}
@@ -395,10 +446,10 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
* without allocating new levels of table. Note that this permits the
* creation of new section or page entries.
*/
-static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
- if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
+ if (virt < PAGE_OFFSET) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
@@ -425,7 +476,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
- if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
+ if (virt < PAGE_OFFSET) {
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
@@ -450,20 +501,88 @@ void __init mark_linear_text_alias_ro(void)
/*
* Remove the write permissions from the linear alias of .text/.rodata
*/
- update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
- (unsigned long)__init_begin - (unsigned long)_text,
+ update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
+ (unsigned long)__init_begin - (unsigned long)_stext,
PAGE_KERNEL_RO);
}
+#ifdef CONFIG_KFENCE
+
+bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
+
+/* early_param() will be parsed before map_mem() below. */
+static int __init parse_kfence_early_init(char *arg)
+{
+ int val;
+
+ if (get_option(&arg, &val))
+ kfence_early_init = !!val;
+ return 0;
+}
+early_param("kfence.sample_interval", parse_kfence_early_init);
+
+static phys_addr_t __init arm64_kfence_alloc_pool(void)
+{
+ phys_addr_t kfence_pool;
+
+ if (!kfence_early_init)
+ return 0;
+
+ kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+ if (!kfence_pool) {
+ pr_err("failed to allocate kfence pool\n");
+ kfence_early_init = false;
+ return 0;
+ }
+
+ /* Temporarily mark as NOMAP. */
+ memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
+
+ return kfence_pool;
+}
+
+static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
+{
+ if (!kfence_pool)
+ return;
+
+ /* KFENCE pool needs page-level mapping. */
+ __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
+ pgprot_tagged(PAGE_KERNEL),
+ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+ memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+ __kfence_pool = phys_to_virt(kfence_pool);
+}
+#else /* CONFIG_KFENCE */
+
+static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
+static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
+
+#endif /* CONFIG_KFENCE */
+
static void __init map_mem(pgd_t *pgdp)
{
- phys_addr_t kernel_start = __pa_symbol(_text);
+ static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
+ phys_addr_t kernel_start = __pa_symbol(_stext);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
- struct memblock_region *reg;
- int flags = 0;
+ phys_addr_t start, end;
+ phys_addr_t early_kfence_pool;
+ int flags = NO_EXEC_MAPPINGS;
+ u64 i;
- if (rodata_full || debug_pagealloc_enabled())
- flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ /*
+ * Setting hierarchical PXNTable attributes on table entries covering
+ * the linear region is only possible if it is guaranteed that no table
+ * entries at any level are being shared between the linear region and
+ * the vmalloc region. Check whether this is true for the PGD level, in
+ * which case it is guaranteed to be true for all other levels as well.
+ */
+ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+
+ early_kfence_pool = arm64_kfence_alloc_pool();
+
+ if (can_set_direct_map())
+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
* Take care not to create a writable alias for the
@@ -472,27 +591,22 @@ static void __init map_mem(pgd_t *pgdp)
* the following for-loop
*/
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
-#ifdef CONFIG_KEXEC_CORE
- if (crashk_res.end)
- memblock_mark_nomap(crashk_res.start,
- resource_size(&crashk_res));
-#endif
/* map all the memory banks */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
-
+ for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
- if (memblock_is_nomap(reg))
- continue;
-
- __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
+ /*
+ * The linear map must allow allocation tags reading/writing
+ * if MTE is present. Otherwise, it has the same attributes as
+ * PAGE_KERNEL.
+ */
+ __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
+ flags);
}
/*
- * Map the linear alias of the [_text, __init_begin) interval
+ * Map the linear alias of the [_stext, __init_begin) interval
* as non-executable now, and remove the write permission in
* mark_linear_text_alias_ro() below (which will be called after
* alternative patching has completed). This makes the contents
@@ -504,21 +618,7 @@ static void __init map_mem(pgd_t *pgdp)
__map_memblock(pgdp, kernel_start, kernel_end,
PAGE_KERNEL, NO_CONT_MAPPINGS);
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
-
-#ifdef CONFIG_KEXEC_CORE
- /*
- * Use page-level mappings here so that we can shrink the region
- * in page granularity and put back unused memory to buddy system
- * through /sys/kernel/kexec_crash_size interface.
- */
- if (crashk_res.end) {
- __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
- PAGE_KERNEL,
- NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
- memblock_clear_nomap(crashk_res.start,
- resource_size(&crashk_res));
- }
-#endif
+ arm64_kfence_map_pool(early_kfence_pool, pgdp);
}
void mark_rodata_ro(void)
@@ -561,28 +661,20 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
vm_area_add_early(vma);
}
-static int __init parse_rodata(char *arg)
+static pgprot_t kernel_exec_prot(void)
{
- int ret = strtobool(arg, &rodata_enabled);
- if (!ret) {
- rodata_full = false;
- return 0;
- }
-
- /* permit 'full' in addition to boolean options */
- if (strcmp(arg, "full"))
- return -EINVAL;
-
- rodata_enabled = true;
- rodata_full = true;
- return 0;
+ return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
}
-early_param("rodata", parse_rodata);
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
- pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ int i;
+
+ if (!arm64_kernel_unmapped_at_el0())
+ return 0;
+
+ pgprot_t prot = kernel_exec_prot();
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
/* The trampoline is always mapped and can therefore be global */
@@ -590,18 +682,18 @@ static int __init map_entry_trampoline(void)
/* Map only the text into the trampoline page table */
memset(tramp_pg_dir, 0, PGD_SIZE);
- __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
- prot, __pgd_pgtable_alloc, 0);
+ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
+ entry_tramp_text_size(), prot,
+ __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
/* Map both the text and data into the kernel page table */
- __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- extern char __entry_tramp_data_start[];
+ for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
+ pa_start + i * PAGE_SIZE, prot);
- __set_fixmap(FIX_ENTRY_TRAMP_DATA,
- __pa_symbol(__entry_tramp_data_start),
- PAGE_KERNEL_RO);
- }
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
+ pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
return 0;
}
@@ -609,6 +701,22 @@ core_initcall(map_entry_trampoline);
#endif
/*
+ * Open coded check for BTI, only for use to determine configuration
+ * for early mappings for before the cpufeature code has run.
+ */
+static bool arm64_early_this_cpu_has_bti(void)
+{
+ u64 pfr1;
+
+ if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
+ return false;
+
+ pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
+ return cpuid_feature_extract_unsigned_field(pfr1,
+ ID_AA64PFR1_EL1_BT_SHIFT);
+}
+
+/*
* Create fine-grained mappings for the kernel.
*/
static void __init map_kernel(pgd_t *pgdp)
@@ -621,13 +729,21 @@ static void __init map_kernel(pgd_t *pgdp)
* mapping to install SW breakpoints. Allow this (only) when
* explicitly requested with rodata=off.
*/
- pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ pgprot_t text_prot = kernel_exec_prot();
+
+ /*
+ * If we have a CPU that supports BTI and a kernel built for
+ * BTI then mark the kernel executable text as guarded pages
+ * now so we don't have to rewrite the page tables later.
+ */
+ if (arm64_early_this_cpu_has_bti())
+ text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
/*
* Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings.
*/
- map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
+ map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
VM_NO_GUARD);
map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
&vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
@@ -637,320 +753,420 @@ static void __init map_kernel(pgd_t *pgdp)
&vmlinux_initdata, 0, VM_NO_GUARD);
map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
- if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
- /*
- * The fixmap falls in a separate pgd to the kernel, and doesn't
- * live in the carveout for the swapper_pg_dir. We can simply
- * re-use the existing dir for the fixmap.
- */
- set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
- READ_ONCE(*pgd_offset_k(FIXADDR_START)));
- } else if (CONFIG_PGTABLE_LEVELS > 3) {
- pgd_t *bm_pgdp;
- pud_t *bm_pudp;
+ fixmap_copy(pgdp);
+ kasan_copy_shadow(pgdp);
+}
+
+static void __init create_idmap(void)
+{
+ u64 start = __pa_symbol(__idmap_text_start);
+ u64 size = __pa_symbol(__idmap_text_end) - start;
+ pgd_t *pgd = idmap_pg_dir;
+ u64 pgd_phys;
+
+ /* check if we need an additional level of translation */
+ if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) {
+ pgd_phys = early_pgtable_alloc(PAGE_SHIFT);
+ set_pgd(&idmap_pg_dir[start >> VA_BITS],
+ __pgd(pgd_phys | P4D_TYPE_TABLE));
+ pgd = __va(pgd_phys);
+ }
+ __create_pgd_mapping(pgd, start, start, size, PAGE_KERNEL_ROX,
+ early_pgtable_alloc, 0);
+
+ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ extern u32 __idmap_kpti_flag;
+ u64 pa = __pa_symbol(&__idmap_kpti_flag);
+
/*
- * The fixmap shares its top level pgd entry with the kernel
- * mapping. This can really only occur when we are running
- * with 16k/4 levels, so we can simply reuse the pud level
- * entry instead.
+ * The KPTI G-to-nG conversion code needs a read-write mapping
+ * of its synchronization flag in the ID map.
*/
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
- bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START);
- pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
- pud_clear_fixmap();
- } else {
- BUG();
+ __create_pgd_mapping(pgd, pa, pa, sizeof(u32), PAGE_KERNEL,
+ early_pgtable_alloc, 0);
}
-
- kasan_copy_shadow(pgdp);
}
void __init paging_init(void)
{
pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
+ extern pgd_t init_idmap_pg_dir[];
+
+ idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0));
map_kernel(pgdp);
map_mem(pgdp);
pgd_clear_fixmap();
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir);
init_mm.pgd = swapper_pg_dir;
- memblock_free(__pa_symbol(init_pg_dir),
- __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+ memblock_phys_free(__pa_symbol(init_pg_dir),
+ __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
memblock_allow_resize();
+
+ create_idmap();
}
-/*
- * Check whether a kernel address is valid (derived from arch/x86/).
- */
-int kern_addr_valid(unsigned long addr)
+#ifdef CONFIG_MEMORY_HOTPLUG
+static void free_hotplug_page_range(struct page *page, size_t size,
+ struct vmem_altmap *altmap)
{
- pgd_t *pgdp;
- pud_t *pudp, pud;
- pmd_t *pmdp, pmd;
- pte_t *ptep, pte;
-
- if ((((long)addr) >> VA_BITS) != -1UL)
- return 0;
+ if (altmap) {
+ vmem_altmap_free(altmap, size >> PAGE_SHIFT);
+ } else {
+ WARN_ON(PageReserved(page));
+ free_pages((unsigned long)page_address(page), get_order(size));
+ }
+}
- pgdp = pgd_offset_k(addr);
- if (pgd_none(READ_ONCE(*pgdp)))
- return 0;
+static void free_hotplug_pgtable_page(struct page *page)
+{
+ free_hotplug_page_range(page, PAGE_SIZE, NULL);
+}
- pudp = pud_offset(pgdp, addr);
- pud = READ_ONCE(*pudp);
- if (pud_none(pud))
- return 0;
+static bool pgtable_range_aligned(unsigned long start, unsigned long end,
+ unsigned long floor, unsigned long ceiling,
+ unsigned long mask)
+{
+ start &= mask;
+ if (start < floor)
+ return false;
- if (pud_sect(pud))
- return pfn_valid(pud_pfn(pud));
+ if (ceiling) {
+ ceiling &= mask;
+ if (!ceiling)
+ return false;
+ }
- pmdp = pmd_offset(pudp, addr);
- pmd = READ_ONCE(*pmdp);
- if (pmd_none(pmd))
- return 0;
+ if (end - 1 > ceiling - 1)
+ return false;
+ return true;
+}
- if (pmd_sect(pmd))
- return pfn_valid(pmd_pfn(pmd));
+static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, bool free_mapped,
+ struct vmem_altmap *altmap)
+{
+ pte_t *ptep, pte;
- ptep = pte_offset_kernel(pmdp, addr);
- pte = READ_ONCE(*ptep);
- if (pte_none(pte))
- return 0;
+ do {
+ ptep = pte_offset_kernel(pmdp, addr);
+ pte = READ_ONCE(*ptep);
+ if (pte_none(pte))
+ continue;
- return pfn_valid(pte_pfn(pte));
-}
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-#if !ARM64_SWAPPER_USES_SECTION_MAPS
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
- struct vmem_altmap *altmap)
-{
- return vmemmap_populate_basepages(start, end, node);
+ WARN_ON(!pte_present(pte));
+ pte_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ if (free_mapped)
+ free_hotplug_page_range(pte_page(pte),
+ PAGE_SIZE, altmap);
+ } while (addr += PAGE_SIZE, addr < end);
}
-#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
- struct vmem_altmap *altmap)
+
+static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
+ unsigned long end, bool free_mapped,
+ struct vmem_altmap *altmap)
{
- unsigned long addr = start;
unsigned long next;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
+ pmd_t *pmdp, pmd;
do {
next = pmd_addr_end(addr, end);
+ pmdp = pmd_offset(pudp, addr);
+ pmd = READ_ONCE(*pmdp);
+ if (pmd_none(pmd))
+ continue;
- pgdp = vmemmap_pgd_populate(addr, node);
- if (!pgdp)
- return -ENOMEM;
+ WARN_ON(!pmd_present(pmd));
+ if (pmd_sect(pmd)) {
+ pmd_clear(pmdp);
- pudp = vmemmap_pud_populate(pgdp, addr, node);
- if (!pudp)
- return -ENOMEM;
+ /*
+ * One TLBI should be sufficient here as the PMD_SIZE
+ * range is mapped with a single block entry.
+ */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ if (free_mapped)
+ free_hotplug_page_range(pmd_page(pmd),
+ PMD_SIZE, altmap);
+ continue;
+ }
+ WARN_ON(!pmd_table(pmd));
+ unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
+ } while (addr = next, addr < end);
+}
- pmdp = pmd_offset(pudp, addr);
- if (pmd_none(READ_ONCE(*pmdp))) {
- void *p = NULL;
+static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
+ unsigned long end, bool free_mapped,
+ struct vmem_altmap *altmap)
+{
+ unsigned long next;
+ pud_t *pudp, pud;
- p = vmemmap_alloc_block_buf(PMD_SIZE, node);
- if (!p)
- return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+ pudp = pud_offset(p4dp, addr);
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
+ continue;
- pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
- } else
- vmemmap_verify((pte_t *)pmdp, node, addr, next);
- } while (addr = next, addr != end);
+ WARN_ON(!pud_present(pud));
+ if (pud_sect(pud)) {
+ pud_clear(pudp);
- return 0;
-}
-#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
-void vmemmap_free(unsigned long start, unsigned long end,
- struct vmem_altmap *altmap)
-{
+ /*
+ * One TLBI should be sufficient here as the PUD_SIZE
+ * range is mapped with a single block entry.
+ */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ if (free_mapped)
+ free_hotplug_page_range(pud_page(pud),
+ PUD_SIZE, altmap);
+ continue;
+ }
+ WARN_ON(!pud_table(pud));
+ unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
+ } while (addr = next, addr < end);
}
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
-static inline pud_t * fixmap_pud(unsigned long addr)
+static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, bool free_mapped,
+ struct vmem_altmap *altmap)
{
- pgd_t *pgdp = pgd_offset_k(addr);
- pgd_t pgd = READ_ONCE(*pgdp);
+ unsigned long next;
+ p4d_t *p4dp, p4d;
- BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
+ do {
+ next = p4d_addr_end(addr, end);
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = READ_ONCE(*p4dp);
+ if (p4d_none(p4d))
+ continue;
- return pud_offset_kimg(pgdp, addr);
+ WARN_ON(!p4d_present(p4d));
+ unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
+ } while (addr = next, addr < end);
}
-static inline pmd_t * fixmap_pmd(unsigned long addr)
+static void unmap_hotplug_range(unsigned long addr, unsigned long end,
+ bool free_mapped, struct vmem_altmap *altmap)
{
- pud_t *pudp = fixmap_pud(addr);
- pud_t pud = READ_ONCE(*pudp);
+ unsigned long next;
+ pgd_t *pgdp, pgd;
- BUG_ON(pud_none(pud) || pud_bad(pud));
+ /*
+ * altmap can only be used as vmemmap mapping backing memory.
+ * In case the backing memory itself is not being freed, then
+ * altmap is irrelevant. Warn about this inconsistency when
+ * encountered.
+ */
+ WARN_ON(!free_mapped && altmap);
- return pmd_offset_kimg(pudp, addr);
-}
+ do {
+ next = pgd_addr_end(addr, end);
+ pgdp = pgd_offset_k(addr);
+ pgd = READ_ONCE(*pgdp);
+ if (pgd_none(pgd))
+ continue;
-static inline pte_t * fixmap_pte(unsigned long addr)
-{
- return &bm_pte[pte_index(addr)];
+ WARN_ON(!pgd_present(pgd));
+ unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
+ } while (addr = next, addr < end);
}
-/*
- * The p*d_populate functions call virt_to_phys implicitly so they can't be used
- * directly on kernel symbols (bm_p*d). This function is called too early to use
- * lm_alias so __p*d_populate functions must be used to populate with the
- * physical address from __pa_symbol.
- */
-void __init early_fixmap_init(void)
+static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling)
{
- pgd_t *pgdp, pgd;
- pud_t *pudp;
- pmd_t *pmdp;
- unsigned long addr = FIXADDR_START;
+ pte_t *ptep, pte;
+ unsigned long i, start = addr;
+
+ do {
+ ptep = pte_offset_kernel(pmdp, addr);
+ pte = READ_ONCE(*ptep);
- pgdp = pgd_offset_k(addr);
- pgd = READ_ONCE(*pgdp);
- if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
/*
- * We only end up here if the kernel mapping and the fixmap
- * share the top level pgd entry, which should only happen on
- * 16k/4 levels configurations.
+ * This is just a sanity check here which verifies that
+ * pte clearing has been done by earlier unmap loops.
*/
- BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- pudp = pud_offset_kimg(pgdp, addr);
- } else {
- if (pgd_none(pgd))
- __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
- pudp = fixmap_pud(addr);
- }
- if (pud_none(READ_ONCE(*pudp)))
- __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
- pmdp = fixmap_pmd(addr);
- __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
+ WARN_ON(!pte_none(pte));
+ } while (addr += PAGE_SIZE, addr < end);
+
+ if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
+ return;
/*
- * The boot-ioremap range spans multiple pmds, for which
- * we are not prepared:
+ * Check whether we can free the pte page if the rest of the
+ * entries are empty. Overlap with other regions have been
+ * handled by the floor/ceiling check.
*/
- BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
- != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
-
- if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
- || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
- WARN_ON(1);
- pr_warn("pmdp %p != %p, %p\n",
- pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
- fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
- pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
- fix_to_virt(FIX_BTMAP_BEGIN));
- pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
- fix_to_virt(FIX_BTMAP_END));
-
- pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
- pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
+ ptep = pte_offset_kernel(pmdp, 0UL);
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ if (!pte_none(READ_ONCE(ptep[i])))
+ return;
}
+
+ pmd_clear(pmdp);
+ __flush_tlb_kernel_pgtable(start);
+ free_hotplug_pgtable_page(virt_to_page(ptep));
}
-/*
- * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
- * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
- */
-void __set_fixmap(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags)
+static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling)
{
- unsigned long addr = __fix_to_virt(idx);
- pte_t *ptep;
+ pmd_t *pmdp, pmd;
+ unsigned long i, next, start = addr;
- BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+ do {
+ next = pmd_addr_end(addr, end);
+ pmdp = pmd_offset(pudp, addr);
+ pmd = READ_ONCE(*pmdp);
+ if (pmd_none(pmd))
+ continue;
- ptep = fixmap_pte(addr);
+ WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
+ free_empty_pte_table(pmdp, addr, next, floor, ceiling);
+ } while (addr = next, addr < end);
- if (pgprot_val(flags)) {
- set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
- } else {
- pte_clear(&init_mm, addr, ptep);
- flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ if (CONFIG_PGTABLE_LEVELS <= 2)
+ return;
+
+ if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
+ return;
+
+ /*
+ * Check whether we can free the pmd page if the rest of the
+ * entries are empty. Overlap with other regions have been
+ * handled by the floor/ceiling check.
+ */
+ pmdp = pmd_offset(pudp, 0UL);
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(READ_ONCE(pmdp[i])))
+ return;
}
+
+ pud_clear(pudp);
+ __flush_tlb_kernel_pgtable(start);
+ free_hotplug_pgtable_page(virt_to_page(pmdp));
}
-void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling)
{
- const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
- int offset;
- void *dt_virt;
+ pud_t *pudp, pud;
+ unsigned long i, next, start = addr;
- /*
- * Check whether the physical FDT address is set and meets the minimum
- * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
- * at least 8 bytes so that we can always access the magic and size
- * fields of the FDT header after mapping the first chunk, double check
- * here if that is indeed the case.
- */
- BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
- if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
- return NULL;
+ do {
+ next = pud_addr_end(addr, end);
+ pudp = pud_offset(p4dp, addr);
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
+ continue;
+
+ WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
+ free_empty_pmd_table(pudp, addr, next, floor, ceiling);
+ } while (addr = next, addr < end);
+
+ if (CONFIG_PGTABLE_LEVELS <= 3)
+ return;
+
+ if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
+ return;
/*
- * Make sure that the FDT region can be mapped without the need to
- * allocate additional translation table pages, so that it is safe
- * to call create_mapping_noalloc() this early.
- *
- * On 64k pages, the FDT will be mapped using PTEs, so we need to
- * be in the same PMD as the rest of the fixmap.
- * On 4k pages, we'll use section mappings for the FDT so we only
- * have to be in the same PUD.
+ * Check whether we can free the pud page if the rest of the
+ * entries are empty. Overlap with other regions have been
+ * handled by the floor/ceiling check.
*/
- BUILD_BUG_ON(dt_virt_base % SZ_2M);
+ pudp = pud_offset(p4dp, 0UL);
+ for (i = 0; i < PTRS_PER_PUD; i++) {
+ if (!pud_none(READ_ONCE(pudp[i])))
+ return;
+ }
- BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
- __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
+ p4d_clear(p4dp);
+ __flush_tlb_kernel_pgtable(start);
+ free_hotplug_pgtable_page(virt_to_page(pudp));
+}
- offset = dt_phys % SWAPPER_BLOCK_SIZE;
- dt_virt = (void *)dt_virt_base + offset;
+static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling)
+{
+ unsigned long next;
+ p4d_t *p4dp, p4d;
- /* map the first chunk so we can read the size from the header */
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
- dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
+ do {
+ next = p4d_addr_end(addr, end);
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = READ_ONCE(*p4dp);
+ if (p4d_none(p4d))
+ continue;
- if (fdt_magic(dt_virt) != FDT_MAGIC)
- return NULL;
+ WARN_ON(!p4d_present(p4d));
+ free_empty_pud_table(p4dp, addr, next, floor, ceiling);
+ } while (addr = next, addr < end);
+}
- *size = fdt_totalsize(dt_virt);
- if (*size > MAX_FDT_SIZE)
- return NULL;
+static void free_empty_tables(unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ unsigned long next;
+ pgd_t *pgdp, pgd;
- if (offset + *size > SWAPPER_BLOCK_SIZE)
- create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
- round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
+ do {
+ next = pgd_addr_end(addr, end);
+ pgdp = pgd_offset_k(addr);
+ pgd = READ_ONCE(*pgdp);
+ if (pgd_none(pgd))
+ continue;
- return dt_virt;
+ WARN_ON(!pgd_present(pgd));
+ free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
+ } while (addr = next, addr < end);
}
+#endif
-int __init arch_ioremap_p4d_supported(void)
+void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
+ unsigned long addr, unsigned long next)
{
- return 0;
+ pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
}
-int __init arch_ioremap_pud_supported(void)
+int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+ unsigned long addr, unsigned long next)
{
- /*
- * Only 4k granule supports level 1 block mappings.
- * SW table walks can't handle removal of intermediate entries.
- */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
- !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
+ vmemmap_verify((pte_t *)pmdp, node, addr, next);
+ return 1;
}
-int __init arch_ioremap_pmd_supported(void)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap)
+{
+ WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
+
+ if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+ return vmemmap_populate_basepages(start, end, node, altmap);
+ else
+ return vmemmap_populate_hugepages(start, end, node, altmap);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void vmemmap_free(unsigned long start, unsigned long end,
+ struct vmem_altmap *altmap)
{
- /* See arch_ioremap_pud_supported() */
- return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
+ WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
+
+ unmap_hotplug_range(start, end, true, altmap);
+ free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
}
+#endif /* CONFIG_MEMORY_HOTPLUG */
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
@@ -1043,43 +1259,230 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
return 1;
}
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+#ifdef CONFIG_MEMORY_HOTPLUG
+static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
{
- return 0; /* Don't attempt a block mapping */
+ unsigned long end = start + size;
+
+ WARN_ON(pgdir != init_mm.pgd);
+ WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
+
+ unmap_hotplug_range(start, end, false, NULL);
+ free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
+}
+
+struct range arch_get_mappable_range(void)
+{
+ struct range mhp_range;
+ u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
+ u64 end_linear_pa = __pa(PAGE_END - 1);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ /*
+ * Check for a wrap, it is possible because of randomized linear
+ * mapping the start physical address is actually bigger than
+ * the end physical address. In this case set start to zero
+ * because [0, end_linear_pa] range must still be able to cover
+ * all addressable physical addresses.
+ */
+ if (start_linear_pa > end_linear_pa)
+ start_linear_pa = 0;
+ }
+
+ WARN_ON(start_linear_pa > end_linear_pa);
+
+ /*
+ * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
+ * accommodating both its ends but excluding PAGE_END. Max physical
+ * range which can be mapped inside this linear mapping range, must
+ * also be derived from its end points.
+ */
+ mhp_range.start = start_linear_pa;
+ mhp_range.end = end_linear_pa;
+
+ return mhp_range;
}
-#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size,
- struct mhp_restrictions *restrictions)
+ struct mhp_params *params)
{
- int flags = 0;
+ int ret, flags = NO_EXEC_MAPPINGS;
- if (rodata_full || debug_pagealloc_enabled())
- flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ VM_BUG_ON(!mhp_range_allowed(start, size, true));
+
+ if (can_set_direct_map())
+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
- size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
+ size, params->pgprot, __pgd_pgtable_alloc,
+ flags);
memblock_clear_nomap(start, size);
- return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
- restrictions);
+ ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
+ params);
+ if (ret)
+ __remove_pgd_mapping(swapper_pg_dir,
+ __phys_to_virt(start), size);
+ else {
+ max_pfn = PFN_UP(start + size);
+ max_low_pfn = max_pfn;
+ }
+
+ return ret;
}
-void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct zone *zone;
+
+ __remove_pages(start_pfn, nr_pages, altmap);
+ __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
+}
+
+/*
+ * This memory hotplug notifier helps prevent boot memory from being
+ * inadvertently removed as it blocks pfn range offlining process in
+ * __offline_pages(). Hence this prevents both offlining as well as
+ * removal process for boot memory which is initially always online.
+ * In future if and when boot memory could be removed, this notifier
+ * should be dropped and free_hotplug_page_range() should handle any
+ * reserved pages allocated during boot.
+ */
+static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct mem_section *ms;
+ struct memory_notify *arg = data;
+ unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
+ unsigned long pfn = arg->start_pfn;
+
+ if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
+ return NOTIFY_OK;
+
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long start = PFN_PHYS(pfn);
+ unsigned long end = start + (1UL << PA_SECTION_SHIFT);
+
+ ms = __pfn_to_section(pfn);
+ if (!early_section(ms))
+ continue;
+
+ if (action == MEM_GOING_OFFLINE) {
+ /*
+ * Boot memory removal is not supported. Prevent
+ * it via blocking any attempted offline request
+ * for the boot memory and just report it.
+ */
+ pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
+ return NOTIFY_BAD;
+ } else if (action == MEM_OFFLINE) {
+ /*
+ * This should have never happened. Boot memory
+ * offlining should have been prevented by this
+ * very notifier. Probably some memory removal
+ * procedure might have changed which would then
+ * require further debug.
+ */
+ pr_err("Boot memory [%lx %lx] offlined\n", start, end);
+
+ /*
+ * Core memory hotplug does not process a return
+ * code from the notifier for MEM_OFFLINE events.
+ * The error condition has been reported. Return
+ * from here as if ignored.
+ */
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block prevent_bootmem_remove_nb = {
+ .notifier_call = prevent_bootmem_remove_notifier,
+};
+
+/*
+ * This ensures that boot memory sections on the platform are online
+ * from early boot. Memory sections could not be prevented from being
+ * offlined, unless for some reason they are not online to begin with.
+ * This helps validate the basic assumption on which the above memory
+ * event notifier works to prevent boot memory section offlining and
+ * its possible removal.
+ */
+static void validate_bootmem_online(void)
+{
+ phys_addr_t start, end, addr;
+ struct mem_section *ms;
+ u64 i;
/*
- * FIXME: Cleanup page tables (also in arch_add_memory() in case
- * adding fails). Until then, this function should only be used
- * during memory hotplug (adding memory), not for memory
- * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
- * unlocked yet.
+ * Scanning across all memblock might be expensive
+ * on some big memory systems. Hence enable this
+ * validation only with DEBUG_VM.
*/
- zone = page_zone(pfn_to_page(start_pfn));
- __remove_pages(zone, start_pfn, nr_pages, altmap);
+ if (!IS_ENABLED(CONFIG_DEBUG_VM))
+ return;
+
+ for_each_mem_range(i, &start, &end) {
+ for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
+ ms = __pfn_to_section(PHYS_PFN(addr));
+
+ /*
+ * All memory ranges in the system at this point
+ * should have been marked as early sections.
+ */
+ WARN_ON(!early_section(ms));
+
+ /*
+ * Memory notifier mechanism here to prevent boot
+ * memory offlining depends on the fact that each
+ * early section memory on the system is initially
+ * online. Otherwise a given memory section which
+ * is already offline will be overlooked and can
+ * be removed completely. Call out such sections.
+ */
+ if (!online_section(ms))
+ pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
+ addr, addr + (1UL << PA_SECTION_SHIFT));
+ }
+ }
}
+
+static int __init prevent_bootmem_remove_init(void)
+{
+ int ret = 0;
+
+ if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+ return ret;
+
+ validate_bootmem_online();
+ ret = register_memory_notifier(&prevent_bootmem_remove_nb);
+ if (ret)
+ pr_err("%s: Notifier registration failed %d\n", __func__, ret);
+
+ return ret;
+}
+early_initcall(prevent_bootmem_remove_init);
#endif
+
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
+ /*
+ * Break-before-make (BBM) is required for all user space mappings
+ * when the permission changes from executable to non-executable
+ * in cases where cpu is affected with errata #2645198.
+ */
+ if (pte_user_exec(READ_ONCE(*ptep)))
+ return ptep_clear_flush(vma, addr, ptep);
+ }
+ return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
+{
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
new file mode 100644
index 000000000000..a31833e3ddc5
--- /dev/null
+++ b/arch/arm64/mm/mteswap.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pagemap.h>
+#include <linux/xarray.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <asm/mte.h>
+
+static DEFINE_XARRAY(mte_pages);
+
+void *mte_allocate_tag_storage(void)
+{
+ /* tags granule is 16 bytes, 2 tags stored per byte */
+ return kmalloc(MTE_PAGE_TAG_STORAGE, GFP_KERNEL);
+}
+
+void mte_free_tag_storage(char *storage)
+{
+ kfree(storage);
+}
+
+int mte_save_tags(struct page *page)
+{
+ void *tag_storage, *ret;
+
+ if (!page_mte_tagged(page))
+ return 0;
+
+ tag_storage = mte_allocate_tag_storage();
+ if (!tag_storage)
+ return -ENOMEM;
+
+ mte_save_page_tags(page_address(page), tag_storage);
+
+ /* lookup the swap entry.val from the page */
+ ret = xa_store(&mte_pages, page_swap_entry(page).val, tag_storage,
+ GFP_KERNEL);
+ if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+ mte_free_tag_storage(tag_storage);
+ return xa_err(ret);
+ } else if (ret) {
+ /* Entry is being replaced, free the old entry */
+ mte_free_tag_storage(ret);
+ }
+
+ return 0;
+}
+
+void mte_restore_tags(swp_entry_t entry, struct page *page)
+{
+ void *tags = xa_load(&mte_pages, entry.val);
+
+ if (!tags)
+ return;
+
+ if (try_page_mte_tagging(page)) {
+ mte_restore_page_tags(page_address(page), tags);
+ set_page_mte_tagged(page);
+ }
+}
+
+void mte_invalidate_tags(int type, pgoff_t offset)
+{
+ swp_entry_t entry = swp_entry(type, offset);
+ void *tags = xa_erase(&mte_pages, entry.val);
+
+ mte_free_tag_storage(tags);
+}
+
+void mte_invalidate_tags_area(int type)
+{
+ swp_entry_t entry = swp_entry(type, 0);
+ swp_entry_t last_entry = swp_entry(type + 1, 0);
+ void *tags;
+
+ XA_STATE(xa_state, &mte_pages, entry.val);
+
+ xa_lock(&mte_pages);
+ xas_for_each(&xa_state, tags, last_entry.val - 1) {
+ __xa_erase(&mte_pages, xa_state.xa_index);
+ mte_free_tag_storage(tags);
+ }
+ xa_unlock(&mte_pages);
+}
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
deleted file mode 100644
index 4decf1659700..000000000000
--- a/arch/arm64/mm/numa.c
+++ /dev/null
@@ -1,470 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * NUMA support, based on the x86 implementation.
- *
- * Copyright (C) 2015 Cavium Inc.
- * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
- */
-
-#define pr_fmt(fmt) "NUMA: " fmt
-
-#include <linux/acpi.h>
-#include <linux/memblock.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <asm/acpi.h>
-#include <asm/sections.h>
-
-struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-nodemask_t numa_nodes_parsed __initdata;
-static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
-
-static int numa_distance_cnt;
-static u8 *numa_distance;
-bool numa_off;
-
-static __init int numa_parse_early_param(char *opt)
-{
- if (!opt)
- return -EINVAL;
- if (str_has_prefix(opt, "off"))
- numa_off = true;
-
- return 0;
-}
-early_param("numa", numa_parse_early_param);
-
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
-EXPORT_SYMBOL(node_to_cpumask_map);
-
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-
-/*
- * Returns a pointer to the bitmask of CPUs on Node 'node'.
- */
-const struct cpumask *cpumask_of_node(int node)
-{
- if (WARN_ON(node >= nr_node_ids))
- return cpu_none_mask;
-
- if (WARN_ON(node_to_cpumask_map[node] == NULL))
- return cpu_online_mask;
-
- return node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(cpumask_of_node);
-
-#endif
-
-static void numa_update_cpu(unsigned int cpu, bool remove)
-{
- int nid = cpu_to_node(cpu);
-
- if (nid == NUMA_NO_NODE)
- return;
-
- if (remove)
- cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
- else
- cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
-}
-
-void numa_add_cpu(unsigned int cpu)
-{
- numa_update_cpu(cpu, false);
-}
-
-void numa_remove_cpu(unsigned int cpu)
-{
- numa_update_cpu(cpu, true);
-}
-
-void numa_clear_node(unsigned int cpu)
-{
- numa_remove_cpu(cpu);
- set_cpu_numa_node(cpu, NUMA_NO_NODE);
-}
-
-/*
- * Allocate node_to_cpumask_map based on number of available nodes
- * Requires node_possible_map to be valid.
- *
- * Note: cpumask_of_node() is not valid until after this is done.
- * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
- */
-static void __init setup_node_to_cpumask_map(void)
-{
- int node;
-
- /* setup nr_node_ids if not done yet */
- if (nr_node_ids == MAX_NUMNODES)
- setup_nr_node_ids();
-
- /* allocate and clear the mapping */
- for (node = 0; node < nr_node_ids; node++) {
- alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
- cpumask_clear(node_to_cpumask_map[node]);
- }
-
- /* cpumask_of_node() will now work */
- pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
-}
-
-/*
- * Set the cpu to node and mem mapping
- */
-void numa_store_cpu_info(unsigned int cpu)
-{
- set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
-}
-
-void __init early_map_cpu_to_node(unsigned int cpu, int nid)
-{
- /* fallback to node 0 */
- if (nid < 0 || nid >= MAX_NUMNODES || numa_off)
- nid = 0;
-
- cpu_to_node_map[cpu] = nid;
-
- /*
- * We should set the numa node of cpu0 as soon as possible, because it
- * has already been set up online before. cpu_to_node(0) will soon be
- * called.
- */
- if (!cpu)
- set_cpu_numa_node(cpu, nid);
-}
-
-#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(__per_cpu_offset);
-
-static int __init early_cpu_to_node(int cpu)
-{
- return cpu_to_node_map[cpu];
-}
-
-static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
-{
- return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
-}
-
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
- size_t align)
-{
- int nid = early_cpu_to_node(cpu);
-
- return memblock_alloc_try_nid(size, align,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- memblock_free_early(__pa(ptr), size);
-}
-
-void __init setup_per_cpu_areas(void)
-{
- unsigned long delta;
- unsigned int cpu;
- int rc;
-
- /*
- * Always reserve area for module percpu variables. That's
- * what the legacy allocator did.
- */
- rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
- PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
- pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
- if (rc < 0)
- panic("Failed to initialize percpu areas.");
-
- delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
- for_each_possible_cpu(cpu)
- __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
-}
-#endif
-
-/**
- * numa_add_memblk() - Set node id to memblk
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int __init numa_add_memblk(int nid, u64 start, u64 end)
-{
- int ret;
-
- ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
- if (ret < 0) {
- pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
- start, (end - 1), nid);
- return ret;
- }
-
- node_set(nid, numa_nodes_parsed);
- return ret;
-}
-
-/*
- * Initialize NODE_DATA for a node on the local memory
- */
-static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
-{
- const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
- u64 nd_pa;
- void *nd;
- int tnid;
-
- if (start_pfn >= end_pfn)
- pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
-
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa)
- panic("Cannot allocate %zu bytes for node %d data\n",
- nd_size, nid);
-
- nd = __va(nd_pa);
-
- /* report and initialize */
- pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
-
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
- NODE_DATA(nid)->node_id = nid;
- NODE_DATA(nid)->node_start_pfn = start_pfn;
- NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
-}
-
-/*
- * numa_free_distance
- *
- * The current table is freed.
- */
-void __init numa_free_distance(void)
-{
- size_t size;
-
- if (!numa_distance)
- return;
-
- size = numa_distance_cnt * numa_distance_cnt *
- sizeof(numa_distance[0]);
-
- memblock_free(__pa(numa_distance), size);
- numa_distance_cnt = 0;
- numa_distance = NULL;
-}
-
-/*
- * Create a new NUMA distance table.
- */
-static int __init numa_alloc_distance(void)
-{
- size_t size;
- u64 phys;
- int i, j;
-
- size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
- phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
- size, PAGE_SIZE);
- if (WARN_ON(!phys))
- return -ENOMEM;
-
- memblock_reserve(phys, size);
-
- numa_distance = __va(phys);
- numa_distance_cnt = nr_node_ids;
-
- /* fill with the default distances */
- for (i = 0; i < numa_distance_cnt; i++)
- for (j = 0; j < numa_distance_cnt; j++)
- numa_distance[i * numa_distance_cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
-
- pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
-
- return 0;
-}
-
-/**
- * numa_set_distance() - Set inter node NUMA distance from node to node.
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance.
- * If distance table doesn't exist, a warning is printed.
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * or @distance doesn't make sense, the call is ignored.
- */
-void __init numa_set_distance(int from, int to, int distance)
-{
- if (!numa_distance) {
- pr_warn_once("Warning: distance table not allocated yet\n");
- return;
- }
-
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
-
- numa_distance[from * numa_distance_cnt + to] = distance;
-}
-
-/*
- * Return NUMA distance @from to @to
- */
-int __node_distance(int from, int to)
-{
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
-}
-EXPORT_SYMBOL(__node_distance);
-
-static int __init numa_register_nodes(void)
-{
- int nid;
- struct memblock_region *mblk;
-
- /* Check that valid nid is set to memblks */
- for_each_memblock(memory, mblk)
- if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
- mblk->nid, mblk->base,
- mblk->base + mblk->size - 1);
- return -EINVAL;
- }
-
- /* Finally register nodes. */
- for_each_node_mask(nid, numa_nodes_parsed) {
- unsigned long start_pfn, end_pfn;
-
- get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
- setup_node_data(nid, start_pfn, end_pfn);
- node_set_online(nid);
- }
-
- /* Setup online nodes to actual nodes*/
- node_possible_map = numa_nodes_parsed;
-
- return 0;
-}
-
-static int __init numa_init(int (*init_func)(void))
-{
- int ret;
-
- nodes_clear(numa_nodes_parsed);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
-
- ret = numa_alloc_distance();
- if (ret < 0)
- return ret;
-
- ret = init_func();
- if (ret < 0)
- goto out_free_distance;
-
- if (nodes_empty(numa_nodes_parsed)) {
- pr_info("No NUMA configuration found\n");
- ret = -EINVAL;
- goto out_free_distance;
- }
-
- ret = numa_register_nodes();
- if (ret < 0)
- goto out_free_distance;
-
- setup_node_to_cpumask_map();
-
- return 0;
-out_free_distance:
- numa_free_distance();
- return ret;
-}
-
-/**
- * dummy_numa_init() - Fallback dummy NUMA init
- *
- * Used if there's no underlying NUMA architecture, NUMA initialization
- * fails, or NUMA is disabled on the command line.
- *
- * Must online at least one node (node 0) and add memory blocks that cover all
- * allowed memory. It is unlikely that this function fails.
- *
- * Return: 0 on success, -errno on failure.
- */
-static int __init dummy_numa_init(void)
-{
- int ret;
- struct memblock_region *mblk;
-
- if (numa_off)
- pr_info("NUMA disabled\n"); /* Forced off on command line. */
- pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
- memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
-
- for_each_memblock(memory, mblk) {
- ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
- if (!ret)
- continue;
-
- pr_err("NUMA init failed\n");
- return ret;
- }
-
- numa_off = true;
- return 0;
-}
-
-/**
- * arm64_numa_init() - Initialize NUMA
- *
- * Try each configured NUMA initialization method until one succeeds. The
- * last fallback is dummy single node config encomapssing whole memory.
- */
-void __init arm64_numa_init(void)
-{
- if (!numa_off) {
- if (!acpi_disabled && !numa_init(arm64_acpi_numa_init))
- return;
- if (acpi_disabled && !numa_init(of_numa_init))
- return;
- }
-
- numa_init(dummy_numa_init);
-}
-
-/*
- * We hope that we will be hotplugging memory on nodes we already know about,
- * such that acpi_get_node() succeeds and we never fall back to this...
- */
-int memory_add_physaddr_to_nid(u64 addr)
-{
- pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr);
- return 0;
-}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 9ce7bd9d4d9c..924843f1f661 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -8,9 +8,10 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
-#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
+#include <asm/kfence.h>
struct page_change_data {
pgprot_t set_mask;
@@ -19,6 +20,19 @@ struct page_change_data {
bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
+bool can_set_direct_map(void)
+{
+ /*
+ * rodata_full and DEBUG_PAGEALLOC require linear map to be
+ * mapped at page granularity, so that it is possible to
+ * protect/unprotect single pages.
+ *
+ * KFENCE pool requires page-granular mapping if initialized late.
+ */
+ return rodata_full || debug_pagealloc_enabled() ||
+ arm64_kfence_can_set_direct_map();
+}
+
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
@@ -54,7 +68,7 @@ static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
- unsigned long size = PAGE_SIZE*numpages;
+ unsigned long size = PAGE_SIZE * numpages;
unsigned long end = start + size;
struct vm_struct *area;
int i;
@@ -80,7 +94,7 @@ static int change_memory_common(unsigned long addr, int numpages,
*/
area = find_vm_area((void *)addr);
if (!area ||
- end > (unsigned long)area->addr + area->size ||
+ end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
!(area->flags & VM_ALLOC))
return -EINVAL;
@@ -126,13 +140,13 @@ int set_memory_nx(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_PXN),
- __pgprot(0));
+ __pgprot(PTE_MAYBE_GP));
}
int set_memory_x(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
- __pgprot(0),
+ __pgprot(PTE_MAYBE_GP),
__pgprot(PTE_PXN));
}
@@ -155,7 +169,7 @@ int set_direct_map_invalid_noflush(struct page *page)
.clear_mask = __pgprot(PTE_VALID),
};
- if (!rodata_full)
+ if (!can_set_direct_map())
return 0;
return apply_to_page_range(&init_mm,
@@ -170,7 +184,7 @@ int set_direct_map_default_noflush(struct page *page)
.clear_mask = __pgprot(PTE_RDONLY),
};
- if (!rodata_full)
+ if (!can_set_direct_map())
return 0;
return apply_to_page_range(&init_mm,
@@ -178,18 +192,19 @@ int set_direct_map_default_noflush(struct page *page)
PAGE_SIZE, change_page_range, &data);
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return;
set_memory_valid((unsigned long)page_address(page), numpages, enable);
}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
/*
* This function is used to determine if a linear map page has been marked as
- * not-valid. Walk the page table and check the PTE_VALID bit. This is based
- * on kern_addr_valid(), which almost does what we need.
+ * not-valid. Walk the page table and check the PTE_VALID bit.
*
* Because this is only called on the kernel linear map, p?d_sect() implies
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
@@ -198,19 +213,24 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
bool kernel_page_present(struct page *page)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return true;
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ if (p4d_none(READ_ONCE(*p4dp)))
+ return false;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
return false;
diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
index 67a9ba9eaa96..cde44c13dda1 100644
--- a/arch/arm64/mm/physaddr.c
+++ b/arch/arm64/mm/physaddr.c
@@ -9,7 +9,7 @@
phys_addr_t __virt_to_phys(unsigned long x)
{
- WARN(!__is_lm_address(x),
+ WARN(!__is_lm_address(__tag_reset(x)),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x,
(void *)x);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index a1e0592d1fbc..f66c37a1610e 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -9,13 +9,18 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h>
-#include <asm/pgtable.h>
+#include <asm/kernel-pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
+#include <asm/smp.h>
+#include <asm/sysreg.h>
#ifdef CONFIG_ARM64_64K_PAGES
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
@@ -37,20 +42,43 @@
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
#ifdef CONFIG_KASAN_SW_TAGS
-#define TCR_KASAN_FLAGS TCR_TBI1
+#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
#else
-#define TCR_KASAN_FLAGS 0
+#define TCR_KASAN_SW_FLAGS 0
#endif
-#define MAIR(attr, mt) ((attr) << ((mt) * 8))
+#ifdef CONFIG_KASAN_HW_TAGS
+#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
+#elif defined(CONFIG_ARM64_MTE)
+/*
+ * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
+ * TBI being enabled at EL1.
+ */
+#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
+#else
+#define TCR_MTE_FLAGS 0
+#endif
+
+/*
+ * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
+ * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
+ */
+#define MAIR_EL1_SET \
+ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM
/**
* cpu_do_suspend - save CPU registers context
*
* x0: virtual address of context pointer
+ *
+ * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
*/
-ENTRY(cpu_do_suspend)
+SYM_FUNC_START(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
@@ -61,11 +89,7 @@ ENTRY(cpu_do_suspend)
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x12, tpidr_el1
-alternative_else
- mrs x12, tpidr_el2
-alternative_endif
+ get_this_cpu_offset x12
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
@@ -73,22 +97,33 @@ alternative_endif
stp x8, x9, [x0, #48]
stp x10, x11, [x0, #64]
stp x12, x13, [x0, #80]
+ /*
+ * Save x18 as it may be used as a platform register, e.g. by shadow
+ * call stack.
+ */
+ str x18, [x0, #96]
ret
-ENDPROC(cpu_do_suspend)
+SYM_FUNC_END(cpu_do_suspend)
/**
* cpu_do_resume - restore CPU register context
*
* x0: Address of context pointer
*/
- .pushsection ".idmap.text", "awx"
-ENTRY(cpu_do_resume)
+SYM_FUNC_START(cpu_do_resume)
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
ldp x6, x8, [x0, #32]
ldp x9, x10, [x0, #48]
ldp x11, x12, [x0, #64]
ldp x13, x14, [x0, #80]
+ /*
+ * Restore x18, as it may be used as a platform register, and clear
+ * the buffer to minimize the risk of exposure when used for shadow
+ * call stack.
+ */
+ ldr x18, [x0, #96]
+ str xzr, [x0, #96]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -111,11 +146,7 @@ ENTRY(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- msr tpidr_el1, x13
-alternative_else
- msr tpidr_el2, x13
-alternative_endif
+ set_this_cpu_offset x13
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
@@ -124,49 +155,22 @@ alternative_endif
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+ reset_amuserenr_el0 x0 // Disable AMU access from EL0
alternative_if ARM64_HAS_RAS_EXTN
msr_s SYS_DISR_EL1, xzr
alternative_else_nop_endif
+ ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
isb
ret
-ENDPROC(cpu_do_resume)
- .popsection
+SYM_FUNC_END(cpu_do_resume)
#endif
-/*
- * cpu_do_switch_mm(pgd_phys, tsk)
- *
- * Set the translation table base pointer to be pgd_phys.
- *
- * - pgd_phys - physical address of new TTB
- */
-ENTRY(cpu_do_switch_mm)
- mrs x2, ttbr1_el1
- mmid x1, x1 // get mm->context.id
- phys_to_ttbr x3, x0
-
-alternative_if ARM64_HAS_CNP
- cbz x1, 1f // skip CNP for reserved ASID
- orr x3, x3, #TTBR_CNP_BIT
-1:
-alternative_else_nop_endif
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- bfi x3, x1, #48, #16 // set the ASID field in TTBR0
-#endif
- bfi x2, x1, #48, #16 // set the ASID
- msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
- isb
- msr ttbr0_el1, x3 // now update TTBR0
- isb
- b post_ttbr_update_workaround // Back to C code...
-ENDPROC(cpu_do_switch_mm)
-
- .pushsection ".idmap.text", "awx"
+ .pushsection ".idmap.text", "a"
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
- adrp \tmp1, empty_zero_page
+ adrp \tmp1, reserved_pg_dir
phys_to_ttbr \tmp2, \tmp1
offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2
@@ -182,50 +186,76 @@ ENDPROC(cpu_do_switch_mm)
* This is the low-level counterpart to cpu_replace_ttbr1, and should not be
* called by anything else. It can only be executed from a TTBR0 mapping.
*/
-ENTRY(idmap_cpu_replace_ttbr1)
- save_and_disable_daif flags=x2
-
+SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
__idmap_cpu_set_reserved_ttbr1 x1, x3
offset_ttbr1 x0, x3
msr ttbr1_el1, x0
isb
- restore_daif x2
-
ret
-ENDPROC(idmap_cpu_replace_ttbr1)
+SYM_FUNC_END(idmap_cpu_replace_ttbr1)
.popsection
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- .pushsection ".idmap.text", "awx"
-
- .macro __idmap_kpti_get_pgtable_ent, type
- dc cvac, cur_\()\type\()p // Ensure any existing dirty
- dmb sy // lines are written back before
- ldr \type, [cur_\()\type\()p] // loading the entry
- tbz \type, #0, skip_\()\type // Skip invalid and
- tbnz \type, #11, skip_\()\type // non-global entries
- .endm
- .macro __idmap_kpti_put_pgtable_ent_ng, type
+#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS | PTE_WRITE)
+
+ .pushsection ".idmap.text", "a"
+
+ .macro kpti_mk_tbl_ng, type, num_entries
+ add end_\type\()p, cur_\type\()p, #\num_entries * 8
+.Ldo_\type:
+ ldr \type, [cur_\type\()p] // Load the entry
+ tbz \type, #0, .Lnext_\type // Skip invalid and
+ tbnz \type, #11, .Lnext_\type // non-global entries
orr \type, \type, #PTE_NG // Same bit for blocks and pages
- str \type, [cur_\()\type\()p] // Update the entry and ensure
- dmb sy // that it is visible to all
- dc civac, cur_\()\type\()p // CPUs.
+ str \type, [cur_\type\()p] // Update the entry
+ .ifnc \type, pte
+ tbnz \type, #1, .Lderef_\type
+ .endif
+.Lnext_\type:
+ add cur_\type\()p, cur_\type\()p, #8
+ cmp cur_\type\()p, end_\type\()p
+ b.ne .Ldo_\type
+ .endm
+
+ /*
+ * Dereference the current table entry and map it into the temporary
+ * fixmap slot associated with the current level.
+ */
+ .macro kpti_map_pgtbl, type, level
+ str xzr, [temp_pte, #8 * (\level + 1)] // break before make
+ dsb nshst
+ add pte, temp_pte, #PAGE_SIZE * (\level + 1)
+ lsr pte, pte, #12
+ tlbi vaae1, pte
+ dsb nsh
+ isb
+
+ phys_to_pte pte, cur_\type\()p
+ add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
+ orr pte, pte, pte_flags
+ str pte, [temp_pte, #8 * (\level + 1)]
+ dsb nshst
.endm
/*
- * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
+ * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
+ * unsigned long temp_pte_va)
*
* Called exactly once from stop_machine context by each CPU found during boot.
*/
-__idmap_kpti_flag:
- .long 1
-ENTRY(idmap_kpti_install_ng_mappings)
+ .pushsection ".data", "aw", %progbits
+SYM_DATA(__idmap_kpti_flag, .long 1)
+ .popsection
+
+SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
cpu .req w0
+ temp_pte .req x0
num_cpus .req w1
- swapper_pa .req x2
+ pte_flags .req x1
+ temp_pgd_phys .req x2
swapper_ttb .req x3
flag_ptr .req x4
cur_pgdp .req x5
@@ -233,234 +263,182 @@ ENTRY(idmap_kpti_install_ng_mappings)
pgd .req x7
cur_pudp .req x8
end_pudp .req x9
- pud .req x10
cur_pmdp .req x11
end_pmdp .req x12
- pmd .req x13
cur_ptep .req x14
end_ptep .req x15
pte .req x16
+ valid .req x17
+ mov x5, x3 // preserve temp_pte arg
mrs swapper_ttb, ttbr1_el1
- restore_ttbr1 swapper_ttb
- adr flag_ptr, __idmap_kpti_flag
+ adr_l flag_ptr, __idmap_kpti_flag
cbnz cpu, __idmap_kpti_secondary
/* We're the boot CPU. Wait for the others to catch up */
sevl
1: wfe
- ldaxr w18, [flag_ptr]
- eor w18, w18, num_cpus
- cbnz w18, 1b
-
- /* We need to walk swapper, so turn off the MMU. */
- pre_disable_mmu_workaround
- mrs x18, sctlr_el1
- bic x18, x18, #SCTLR_ELx_M
- msr sctlr_el1, x18
+ ldaxr w17, [flag_ptr]
+ eor w17, w17, num_cpus
+ cbnz w17, 1b
+
+ /* Switch to the temporary page tables on this CPU only */
+ __idmap_cpu_set_reserved_ttbr1 x8, x9
+ offset_ttbr1 temp_pgd_phys, x8
+ msr ttbr1_el1, temp_pgd_phys
isb
+ mov temp_pte, x5
+ mov_q pte_flags, KPTI_NG_PTE_FLAGS
+
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
/* PGD */
- mov cur_pgdp, swapper_pa
- add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
-do_pgd: __idmap_kpti_get_pgtable_ent pgd
- tbnz pgd, #1, walk_puds
-next_pgd:
- __idmap_kpti_put_pgtable_ent_ng pgd
-skip_pgd:
- add cur_pgdp, cur_pgdp, #8
- cmp cur_pgdp, end_pgdp
- b.ne do_pgd
-
- /* Publish the updated tables and nuke all the TLBs */
- dsb sy
- tlbi vmalle1is
- dsb ish
- isb
+ adrp cur_pgdp, swapper_pg_dir
+ kpti_map_pgtbl pgd, 0
+ kpti_mk_tbl_ng pgd, PTRS_PER_PGD
- /* We're done: fire up the MMU again */
- mrs x18, sctlr_el1
- orr x18, x18, #SCTLR_ELx_M
- msr sctlr_el1, x18
- isb
+ /* Ensure all the updated entries are visible to secondary CPUs */
+ dsb ishst
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
+ /* We're done: fire up swapper_pg_dir again */
+ __idmap_cpu_set_reserved_ttbr1 x8, x9
+ msr ttbr1_el1, swapper_ttb
isb
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
ret
+.Lderef_pgd:
/* PUD */
-walk_puds:
- .if CONFIG_PGTABLE_LEVELS > 3
+ .if CONFIG_PGTABLE_LEVELS > 3
+ pud .req x10
pte_to_phys cur_pudp, pgd
- add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
-do_pud: __idmap_kpti_get_pgtable_ent pud
- tbnz pud, #1, walk_pmds
-next_pud:
- __idmap_kpti_put_pgtable_ent_ng pud
-skip_pud:
- add cur_pudp, cur_pudp, 8
- cmp cur_pudp, end_pudp
- b.ne do_pud
- b next_pgd
- .else /* CONFIG_PGTABLE_LEVELS <= 3 */
- mov pud, pgd
- b walk_pmds
-next_pud:
- b next_pgd
+ kpti_map_pgtbl pud, 1
+ kpti_mk_tbl_ng pud, PTRS_PER_PUD
+ b .Lnext_pgd
+ .else /* CONFIG_PGTABLE_LEVELS <= 3 */
+ pud .req pgd
+ .set .Lnext_pud, .Lnext_pgd
.endif
+.Lderef_pud:
/* PMD */
-walk_pmds:
- .if CONFIG_PGTABLE_LEVELS > 2
+ .if CONFIG_PGTABLE_LEVELS > 2
+ pmd .req x13
pte_to_phys cur_pmdp, pud
- add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
-do_pmd: __idmap_kpti_get_pgtable_ent pmd
- tbnz pmd, #1, walk_ptes
-next_pmd:
- __idmap_kpti_put_pgtable_ent_ng pmd
-skip_pmd:
- add cur_pmdp, cur_pmdp, #8
- cmp cur_pmdp, end_pmdp
- b.ne do_pmd
- b next_pud
- .else /* CONFIG_PGTABLE_LEVELS <= 2 */
- mov pmd, pud
- b walk_ptes
-next_pmd:
- b next_pud
+ kpti_map_pgtbl pmd, 2
+ kpti_mk_tbl_ng pmd, PTRS_PER_PMD
+ b .Lnext_pud
+ .else /* CONFIG_PGTABLE_LEVELS <= 2 */
+ pmd .req pgd
+ .set .Lnext_pmd, .Lnext_pgd
.endif
+.Lderef_pmd:
/* PTE */
-walk_ptes:
pte_to_phys cur_ptep, pmd
- add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
-do_pte: __idmap_kpti_get_pgtable_ent pte
- __idmap_kpti_put_pgtable_ent_ng pte
-skip_pte:
- add cur_ptep, cur_ptep, #8
- cmp cur_ptep, end_ptep
- b.ne do_pte
- b next_pmd
+ kpti_map_pgtbl pte, 3
+ kpti_mk_tbl_ng pte, PTRS_PER_PTE
+ b .Lnext_pmd
+
+ .unreq cpu
+ .unreq temp_pte
+ .unreq num_cpus
+ .unreq pte_flags
+ .unreq temp_pgd_phys
+ .unreq cur_pgdp
+ .unreq end_pgdp
+ .unreq pgd
+ .unreq cur_pudp
+ .unreq end_pudp
+ .unreq pud
+ .unreq cur_pmdp
+ .unreq end_pmdp
+ .unreq pmd
+ .unreq cur_ptep
+ .unreq end_ptep
+ .unreq pte
+ .unreq valid
/* Secondary CPUs end up here */
__idmap_kpti_secondary:
/* Uninstall swapper before surgery begins */
- __idmap_cpu_set_reserved_ttbr1 x18, x17
+ __idmap_cpu_set_reserved_ttbr1 x16, x17
/* Increment the flag to let the boot CPU we're ready */
-1: ldxr w18, [flag_ptr]
- add w18, w18, #1
- stxr w17, w18, [flag_ptr]
+1: ldxr w16, [flag_ptr]
+ add w16, w16, #1
+ stxr w17, w16, [flag_ptr]
cbnz w17, 1b
/* Wait for the boot CPU to finish messing around with swapper */
sevl
1: wfe
- ldxr w18, [flag_ptr]
- cbnz w18, 1b
+ ldxr w16, [flag_ptr]
+ cbnz w16, 1b
/* All done, act like nothing happened */
- offset_ttbr1 swapper_ttb, x18
msr ttbr1_el1, swapper_ttb
isb
ret
- .unreq cpu
- .unreq num_cpus
- .unreq swapper_pa
.unreq swapper_ttb
.unreq flag_ptr
- .unreq cur_pgdp
- .unreq end_pgdp
- .unreq pgd
- .unreq cur_pudp
- .unreq end_pudp
- .unreq pud
- .unreq cur_pmdp
- .unreq end_pmdp
- .unreq pmd
- .unreq cur_ptep
- .unreq end_ptep
- .unreq pte
-ENDPROC(idmap_kpti_install_ng_mappings)
+SYM_FUNC_END(idmap_kpti_install_ng_mappings)
.popsection
#endif
/*
* __cpu_setup
*
- * Initialise the processor for turning the MMU on. Return in x0 the
- * value of the SCTLR_EL1 register.
+ * Initialise the processor for turning the MMU on.
+ *
+ * Input:
+ * x0 - actual number of VA bits (ignored unless VA_BITS > 48)
+ * Output:
+ * Return in x0 the value of the SCTLR_EL1 register.
*/
- .pushsection ".idmap.text", "awx"
-ENTRY(__cpu_setup)
+ .pushsection ".idmap.text", "a"
+SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
- mov x0, #3 << 20
- msr cpacr_el1, x0 // Enable FP/ASIMD
- mov x0, #1 << 12 // Reset mdscr_el1 and disable
- msr mdscr_el1, x0 // access to the DCC from EL0
+ msr cpacr_el1, xzr // Reset cpacr_el1
+ mov x1, #1 << 12 // Reset mdscr_el1 and disable
+ msr mdscr_el1, x1 // access to the DCC from EL0
isb // Unmask debug exceptions now,
enable_dbg // since this is per-cpu
- reset_pmuserenr_el0 x0 // Disable PMU access from EL0
- /*
- * Memory region attributes for LPAE:
- *
- * n = AttrIndx[2:0]
- * n MAIR
- * DEVICE_nGnRnE 000 00000000
- * DEVICE_nGnRE 001 00000100
- * DEVICE_GRE 010 00001100
- * NORMAL_NC 011 01000100
- * NORMAL 100 11111111
- * NORMAL_WT 101 10111011
- */
- ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
- MAIR(0x04, MT_DEVICE_nGnRE) | \
- MAIR(0x0c, MT_DEVICE_GRE) | \
- MAIR(0x44, MT_NORMAL_NC) | \
- MAIR(0xff, MT_NORMAL) | \
- MAIR(0xbb, MT_NORMAL_WT)
- msr mair_el1, x5
- /*
- * Prepare SCTLR
- */
- mov_q x0, SCTLR_EL1_SET
+ reset_pmuserenr_el0 x1 // Disable PMU access from EL0
+ reset_amuserenr_el0 x1 // Disable AMU access from EL0
+
/*
- * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
- * both user and kernel.
+ * Default values for VMSA control registers. These will be adjusted
+ * below depending on detected CPU features.
*/
- ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+ mair .req x17
+ tcr .req x16
+ mov_q mair, MAIR_EL1_SET
+ mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
- tcr_clear_errata_bits x10, x9, x5
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
+
+ tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
- ldr_l x9, vabits_actual
- sub x9, xzr, x9
+ sub x9, xzr, x0
add x9, x9, #64
- tcr_set_t1sz x10, x9
+ tcr_set_t1sz tcr, x9
#else
- ldr_l x9, idmap_t0sz
+ idmap_get_t0sz x9
#endif
- tcr_set_t0sz x10, x9
+ tcr_set_t0sz tcr, x9
/*
* Set the IPS bits in TCR_EL1.
*/
- tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
+ tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable hardware update of the Access Flags bit.
@@ -468,11 +446,34 @@ ENTRY(__cpu_setup)
* via capabilities.
*/
mrs x9, ID_AA64MMFR1_EL1
- and x9, x9, #0xf
+ and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK
cbz x9, 1f
- orr x10, x10, #TCR_HA // hardware Access flag update
+ orr tcr, tcr, #TCR_HA // hardware Access flag update
1:
#endif /* CONFIG_ARM64_HW_AFDBM */
- msr tcr_el1, x10
+ msr mair_el1, mair
+ msr tcr_el1, tcr
+
+ mrs_s x1, SYS_ID_AA64MMFR3_EL1
+ ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
+ cbz x1, .Lskip_indirection
+
+ mov_q x0, PIE_E0
+ msr REG_PIRE0_EL1, x0
+ mov_q x0, PIE_E1
+ msr REG_PIR_EL1, x0
+
+ mov x0, TCR2_EL1x_PIE
+ msr REG_TCR2_EL1, x0
+
+.Lskip_indirection:
+
+ /*
+ * Prepare SCTLR
+ */
+ mov_q x0, INIT_SCTLR_EL1_MMU_ON
ret // return to head.S
-ENDPROC(__cpu_setup)
+
+ .unreq mair
+ .unreq tcr
+SYM_FUNC_END(__cpu_setup)
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/ptdump.c
index 0a920b538a89..e305b6593c4e 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -15,13 +15,13 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/ptdump.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptdump.h>
@@ -29,7 +29,7 @@
enum address_markers_idx {
PAGE_OFFSET_NR = 0,
PAGE_END_NR,
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
KASAN_START_NR,
#endif
};
@@ -37,7 +37,7 @@ enum address_markers_idx {
static struct addr_marker address_markers[] = {
{ PAGE_OFFSET, "Linear Mapping start" },
{ 0 /* PAGE_END */, "Linear Mapping end" },
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" },
#endif
@@ -45,14 +45,12 @@ static struct addr_marker address_markers[] = {
{ MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" },
{ VMALLOC_END, "vmalloc() end" },
- { FIXADDR_START, "Fixmap start" },
+ { FIXADDR_TOT_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" },
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
{ VMEMMAP_START, "vmemmap start" },
{ VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
-#endif
{ -1, NULL },
};
@@ -75,10 +73,11 @@ static struct addr_marker address_markers[] = {
* dumps out a description of the range.
*/
struct pg_state {
+ struct ptdump_state ptdump;
struct seq_file *seq;
const struct addr_marker *marker;
unsigned long start_address;
- unsigned level;
+ int level;
u64 current_prot;
bool check_wx;
unsigned long wx_pages;
@@ -144,6 +143,11 @@ static const struct prot_bits pte_bits[] = {
.set = "UXN",
.clear = " ",
}, {
+ .mask = PTE_GP,
+ .val = PTE_GP,
+ .set = "GP",
+ .clear = " ",
+ }, {
.mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
.set = "DEVICE/nGnRnE",
@@ -153,16 +157,16 @@ static const struct prot_bits pte_bits[] = {
.set = "DEVICE/nGnRE",
}, {
.mask = PTE_ATTRINDX_MASK,
- .val = PTE_ATTRINDX(MT_DEVICE_GRE),
- .set = "DEVICE/GRE",
- }, {
- .mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL_NC),
.set = "MEM/NORMAL-NC",
}, {
.mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL),
.set = "MEM/NORMAL",
+ }, {
+ .mask = PTE_ATTRINDX_MASK,
+ .val = PTE_ATTRINDX(MT_NORMAL_TAGGED),
+ .set = "MEM/NORMAL-TAGGED",
}
};
@@ -174,11 +178,14 @@ struct pg_level {
};
static struct pg_level pg_level[] = {
- {
- }, { /* pgd */
+ { /* pgd */
.name = "PGD",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
+ }, { /* p4d */
+ .name = "P4D",
+ .bits = pte_bits,
+ .num = ARRAY_SIZE(pte_bits),
}, { /* pud */
.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
.bits = pte_bits,
@@ -241,13 +248,17 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
-static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
- u64 val)
+static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
+ u64 val)
{
+ struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
static const char units[] = "KMGTPE";
- u64 prot = val & pg_level[level].mask;
+ u64 prot = 0;
- if (!st->level) {
+ if (level >= 0)
+ prot = val & pg_level[level].mask;
+
+ if (st->level == -1) {
st->level = level;
st->current_prot = prot;
st->start_address = addr;
@@ -260,21 +271,22 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
if (st->current_prot) {
note_prot_uxn(st, addr);
note_prot_wx(st, addr);
- pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
+ }
+
+ pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
st->start_address, addr);
- delta = (addr - st->start_address) >> 10;
- while (!(delta & 1023) && unit[1]) {
- delta >>= 10;
- unit++;
- }
- pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
- pg_level[st->level].name);
- if (pg_level[st->level].bits)
- dump_prot(st, pg_level[st->level].bits,
- pg_level[st->level].num);
- pt_dump_seq_puts(st->seq, "\n");
+ delta = (addr - st->start_address) >> 10;
+ while (!(delta & 1023) && unit[1]) {
+ delta >>= 10;
+ unit++;
}
+ pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+ pg_level[st->level].name);
+ if (st->current_prot && pg_level[st->level].bits)
+ dump_prot(st, pg_level[st->level].bits,
+ pg_level[st->level].num);
+ pt_dump_seq_puts(st->seq, "\n");
if (addr >= st->marker[1].start_address) {
st->marker++;
@@ -293,88 +305,31 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
}
-static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
- unsigned long end)
+void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
{
- unsigned long addr = start;
- pte_t *ptep = pte_offset_kernel(pmdp, start);
+ unsigned long end = ~0UL;
+ struct pg_state st;
- do {
- note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-}
+ if (info->base_addr < TASK_SIZE_64)
+ end = TASK_SIZE_64;
-static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
- unsigned long end)
-{
- unsigned long next, addr = start;
- pmd_t *pmdp = pmd_offset(pudp, start);
-
- do {
- pmd_t pmd = READ_ONCE(*pmdp);
- next = pmd_addr_end(addr, end);
-
- if (pmd_none(pmd) || pmd_sect(pmd)) {
- note_page(st, addr, 3, pmd_val(pmd));
- } else {
- BUG_ON(pmd_bad(pmd));
- walk_pte(st, pmdp, addr, next);
- }
- } while (pmdp++, addr = next, addr != end);
-}
-
-static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
- unsigned long end)
-{
- unsigned long next, addr = start;
- pud_t *pudp = pud_offset(pgdp, start);
-
- do {
- pud_t pud = READ_ONCE(*pudp);
- next = pud_addr_end(addr, end);
-
- if (pud_none(pud) || pud_sect(pud)) {
- note_page(st, addr, 2, pud_val(pud));
- } else {
- BUG_ON(pud_bad(pud));
- walk_pmd(st, pudp, addr, next);
- }
- } while (pudp++, addr = next, addr != end);
-}
-
-static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
- unsigned long start)
-{
- unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
- unsigned long next, addr = start;
- pgd_t *pgdp = pgd_offset(mm, start);
-
- do {
- pgd_t pgd = READ_ONCE(*pgdp);
- next = pgd_addr_end(addr, end);
-
- if (pgd_none(pgd)) {
- note_page(st, addr, 1, pgd_val(pgd));
- } else {
- BUG_ON(pgd_bad(pgd));
- walk_pud(st, pgdp, addr, next);
- }
- } while (pgdp++, addr = next, addr != end);
-}
-
-void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
-{
- struct pg_state st = {
- .seq = m,
+ st = (struct pg_state){
+ .seq = s,
.marker = info->markers,
+ .level = -1,
+ .ptdump = {
+ .note_page = note_page,
+ .range = (struct ptdump_range[]){
+ {info->base_addr, end},
+ {0, 0}
+ }
+ }
};
- walk_pgd(&st, info->mm, info->base_addr);
-
- note_page(&st, 0, 0, 0);
+ ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
}
-static void ptdump_initialize(void)
+static void __init ptdump_initialize(void)
{
unsigned i, j;
@@ -398,11 +353,19 @@ void ptdump_check_wx(void)
{ 0, NULL},
{ -1, NULL},
},
+ .level = -1,
.check_wx = true,
+ .ptdump = {
+ .note_page = note_page,
+ .range = (struct ptdump_range[]) {
+ {PAGE_OFFSET, ~0UL},
+ {0, 0}
+ }
+ }
};
- walk_pgd(&st, &init_mm, PAGE_OFFSET);
- note_page(&st, 0, 0, 0);
+ ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
+
if (st.wx_pages || st.uxn_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
st.wx_pages, st.uxn_pages);
@@ -410,10 +373,10 @@ void ptdump_check_wx(void)
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
-static int ptdump_init(void)
+static int __init ptdump_init(void)
{
address_markers[PAGE_END_NR].start_address = PAGE_END;
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
#endif
ptdump_initialize();
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index 064163f25592..68bf1a125502 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
+#include <linux/memory_hotplug.h>
#include <linux/seq_file.h>
#include <asm/ptdump.h>
@@ -7,12 +8,15 @@
static int ptdump_show(struct seq_file *m, void *v)
{
struct ptdump_info *info = m->private;
- ptdump_walk_pgd(m, info);
+
+ get_online_mems();
+ ptdump_walk(m, info);
+ put_online_mems();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
}
diff --git a/arch/arm64/mm/trans_pgd-asm.S b/arch/arm64/mm/trans_pgd-asm.S
new file mode 100644
index 000000000000..021c31573bcb
--- /dev/null
+++ b/arch/arm64/mm/trans_pgd-asm.S
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Copyright (c) 2021, Microsoft Corporation.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/kvm_asm.h>
+
+.macro invalid_vector label
+SYM_CODE_START_LOCAL(\label)
+ .align 7
+ b \label
+SYM_CODE_END(\label)
+.endm
+
+.macro el1_sync_vector
+SYM_CODE_START_LOCAL(el1_sync)
+ .align 7
+ cmp x0, #HVC_SET_VECTORS /* Called from hibernate */
+ b.ne 1f
+ msr vbar_el2, x1
+ mov x0, xzr
+ eret
+1: cmp x0, #HVC_SOFT_RESTART /* Called from kexec */
+ b.ne 2f
+ mov x0, x2
+ mov x2, x4
+ mov x4, x1
+ mov x1, x3
+ br x4
+2: /* Unexpected argument, set an error */
+ mov_q x0, HVC_STUB_ERR
+ eret
+SYM_CODE_END(el1_sync)
+.endm
+
+SYM_CODE_START(trans_pgd_stub_vectors)
+ invalid_vector hyp_stub_el2t_sync_invalid // Synchronous EL2t
+ invalid_vector hyp_stub_el2t_irq_invalid // IRQ EL2t
+ invalid_vector hyp_stub_el2t_fiq_invalid // FIQ EL2t
+ invalid_vector hyp_stub_el2t_error_invalid // Error EL2t
+
+ invalid_vector hyp_stub_el2h_sync_invalid // Synchronous EL2h
+ invalid_vector hyp_stub_el2h_irq_invalid // IRQ EL2h
+ invalid_vector hyp_stub_el2h_fiq_invalid // FIQ EL2h
+ invalid_vector hyp_stub_el2h_error_invalid // Error EL2h
+
+ el1_sync_vector // Synchronous 64-bit EL1
+ invalid_vector hyp_stub_el1_irq_invalid // IRQ 64-bit EL1
+ invalid_vector hyp_stub_el1_fiq_invalid // FIQ 64-bit EL1
+ invalid_vector hyp_stub_el1_error_invalid // Error 64-bit EL1
+
+ invalid_vector hyp_stub_32b_el1_sync_invalid // Synchronous 32-bit EL1
+ invalid_vector hyp_stub_32b_el1_irq_invalid // IRQ 32-bit EL1
+ invalid_vector hyp_stub_32b_el1_fiq_invalid // FIQ 32-bit EL1
+ invalid_vector hyp_stub_32b_el1_error_invalid // Error 32-bit EL1
+ .align 11
+SYM_INNER_LABEL(__trans_pgd_stub_vectors_end, SYM_L_LOCAL)
+SYM_CODE_END(trans_pgd_stub_vectors)
+
+# Check the trans_pgd_stub_vectors didn't overflow
+.org . - (__trans_pgd_stub_vectors_end - trans_pgd_stub_vectors) + SZ_2K
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
new file mode 100644
index 000000000000..7b14df3c6477
--- /dev/null
+++ b/arch/arm64/mm/trans_pgd.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Transitional page tables for kexec and hibernate
+ *
+ * This file derived from: arch/arm64/kernel/hibernate.c
+ *
+ * Copyright (c) 2021, Microsoft Corporation.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ *
+ */
+
+/*
+ * Transitional tables are used during system transferring from one world to
+ * another: such as during hibernate restore, and kexec reboots. During these
+ * phases one cannot rely on page table not being overwritten. This is because
+ * hibernate and kexec can overwrite the current page tables during transition.
+ */
+
+#include <asm/trans_pgd.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <linux/suspend.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/kfence.h>
+
+static void *trans_alloc(struct trans_pgd_info *info)
+{
+ return info->trans_alloc_page(info->trans_alloc_arg);
+}
+
+static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
+{
+ pte_t pte = READ_ONCE(*src_ptep);
+
+ if (pte_valid(pte)) {
+ /*
+ * Resume will overwrite areas that may be marked
+ * read only (code, rodata). Clear the RDONLY bit from
+ * the temporary mappings we use during restore.
+ */
+ set_pte(dst_ptep, pte_mkwrite_novma(pte));
+ } else if ((debug_pagealloc_enabled() ||
+ is_kfence_address((void *)addr)) && !pte_none(pte)) {
+ /*
+ * debug_pagealloc will removed the PTE_VALID bit if
+ * the page isn't in use by the resume kernel. It may have
+ * been in use by the original kernel, in which case we need
+ * to put it back in our copy to do the restore.
+ *
+ * Before marking this entry valid, check the pfn should
+ * be mapped.
+ */
+ BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte)));
+ }
+}
+
+static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
+ pmd_t *src_pmdp, unsigned long start, unsigned long end)
+{
+ pte_t *src_ptep;
+ pte_t *dst_ptep;
+ unsigned long addr = start;
+
+ dst_ptep = trans_alloc(info);
+ if (!dst_ptep)
+ return -ENOMEM;
+ pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
+ dst_ptep = pte_offset_kernel(dst_pmdp, start);
+
+ src_ptep = pte_offset_kernel(src_pmdp, start);
+ do {
+ _copy_pte(dst_ptep, src_ptep, addr);
+ } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
+
+ return 0;
+}
+
+static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp,
+ pud_t *src_pudp, unsigned long start, unsigned long end)
+{
+ pmd_t *src_pmdp;
+ pmd_t *dst_pmdp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (pud_none(READ_ONCE(*dst_pudp))) {
+ dst_pmdp = trans_alloc(info);
+ if (!dst_pmdp)
+ return -ENOMEM;
+ pud_populate(NULL, dst_pudp, dst_pmdp);
+ }
+ dst_pmdp = pmd_offset(dst_pudp, start);
+
+ src_pmdp = pmd_offset(src_pudp, start);
+ do {
+ pmd_t pmd = READ_ONCE(*src_pmdp);
+
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(pmd))
+ continue;
+ if (pmd_table(pmd)) {
+ if (copy_pte(info, dst_pmdp, src_pmdp, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pmd(dst_pmdp,
+ __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
+ }
+ } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp,
+ p4d_t *src_p4dp, unsigned long start,
+ unsigned long end)
+{
+ pud_t *dst_pudp;
+ pud_t *src_pudp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ if (p4d_none(READ_ONCE(*dst_p4dp))) {
+ dst_pudp = trans_alloc(info);
+ if (!dst_pudp)
+ return -ENOMEM;
+ p4d_populate(NULL, dst_p4dp, dst_pudp);
+ }
+ dst_pudp = pud_offset(dst_p4dp, start);
+
+ src_pudp = pud_offset(src_p4dp, start);
+ do {
+ pud_t pud = READ_ONCE(*src_pudp);
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ continue;
+ if (pud_table(pud)) {
+ if (copy_pmd(info, dst_pudp, src_pudp, addr, next))
+ return -ENOMEM;
+ } else {
+ set_pud(dst_pudp,
+ __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
+ }
+ } while (dst_pudp++, src_pudp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp,
+ pgd_t *src_pgdp, unsigned long start,
+ unsigned long end)
+{
+ p4d_t *dst_p4dp;
+ p4d_t *src_p4dp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ dst_p4dp = p4d_offset(dst_pgdp, start);
+ src_p4dp = p4d_offset(src_pgdp, start);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none(READ_ONCE(*src_p4dp)))
+ continue;
+ if (copy_pud(info, dst_p4dp, src_p4dp, addr, next))
+ return -ENOMEM;
+ } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp,
+ unsigned long start, unsigned long end)
+{
+ unsigned long next;
+ unsigned long addr = start;
+ pgd_t *src_pgdp = pgd_offset_k(start);
+
+ dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(READ_ONCE(*src_pgdp)))
+ continue;
+ if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next))
+ return -ENOMEM;
+ } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
+
+ return 0;
+}
+
+/*
+ * Create trans_pgd and copy linear map.
+ * info: contains allocator and its argument
+ * dst_pgdp: new page table that is created, and to which map is copied.
+ * start: Start of the interval (inclusive).
+ * end: End of the interval (exclusive).
+ *
+ * Returns 0 on success, and -ENOMEM on failure.
+ */
+int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp,
+ unsigned long start, unsigned long end)
+{
+ int rc;
+ pgd_t *trans_pgd = trans_alloc(info);
+
+ if (!trans_pgd) {
+ pr_err("Failed to allocate memory for temporary page tables.\n");
+ return -ENOMEM;
+ }
+
+ rc = copy_page_tables(info, trans_pgd, start, end);
+ if (!rc)
+ *dst_pgdp = trans_pgd;
+
+ return rc;
+}
+
+/*
+ * The page we want to idmap may be outside the range covered by VA_BITS that
+ * can be built using the kernel's p?d_populate() helpers. As a one off, for a
+ * single page, we build these page tables bottom up and just assume that will
+ * need the maximum T0SZ.
+ *
+ * Returns 0 on success, and -ENOMEM on failure.
+ * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to
+ * maximum T0SZ for this page.
+ */
+int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
+ unsigned long *t0sz, void *page)
+{
+ phys_addr_t dst_addr = virt_to_phys(page);
+ unsigned long pfn = __phys_to_pfn(dst_addr);
+ int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47;
+ int bits_mapped = PAGE_SHIFT - 4;
+ unsigned long level_mask, prev_level_entry, *levels[4];
+ int this_level, index, level_lsb, level_msb;
+
+ dst_addr &= PAGE_MASK;
+ prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX));
+
+ for (this_level = 3; this_level >= 0; this_level--) {
+ levels[this_level] = trans_alloc(info);
+ if (!levels[this_level])
+ return -ENOMEM;
+
+ level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level);
+ level_msb = min(level_lsb + bits_mapped, max_msb);
+ level_mask = GENMASK_ULL(level_msb, level_lsb);
+
+ index = (dst_addr & level_mask) >> level_lsb;
+ *(levels[this_level] + index) = prev_level_entry;
+
+ pfn = virt_to_pfn(levels[this_level]);
+ prev_level_entry = pte_val(pfn_pte(pfn,
+ __pgprot(PMD_TYPE_TABLE)));
+
+ if (level_msb == max_msb)
+ break;
+ }
+
+ *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
+ *t0sz = TCR_T0SZ(max_msb + 1);
+
+ return 0;
+}
+
+/*
+ * Create a copy of the vector table so we can call HVC_SET_VECTORS or
+ * HVC_SOFT_RESTART from contexts where the table may be overwritten.
+ */
+int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info,
+ phys_addr_t *el2_vectors)
+{
+ void *hyp_stub = trans_alloc(info);
+
+ if (!hyp_stub)
+ return -ENOMEM;
+ *el2_vectors = virt_to_phys(hyp_stub);
+ memcpy(hyp_stub, &trans_pgd_stub_vectors, ARM64_VECTOR_TABLE_LEN);
+ caches_clean_inval_pou((unsigned long)hyp_stub,
+ (unsigned long)hyp_stub +
+ ARM64_VECTOR_TABLE_LEN);
+ dcache_clean_inval_poc((unsigned long)hyp_stub,
+ (unsigned long)hyp_stub +
+ ARM64_VECTOR_TABLE_LEN);
+
+ return 0;
+}