summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S41
-rw-r--r--arch/arm64/mm/copypage.c9
-rw-r--r--arch/arm64/mm/dma-mapping.c19
-rw-r--r--arch/arm64/mm/extable.c10
-rw-r--r--arch/arm64/mm/fault.c1
-rw-r--r--arch/arm64/mm/hugetlbpage.c10
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/ioremap.c90
-rw-r--r--arch/arm64/mm/mmu.c23
-rw-r--r--arch/arm64/mm/mteswap.c9
-rw-r--r--arch/arm64/mm/proc.S173
11 files changed, 124 insertions, 265 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 21c907987080..081058d4e436 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -194,44 +194,3 @@ SYM_FUNC_START(__pi_dcache_clean_pop)
ret
SYM_FUNC_END(__pi_dcache_clean_pop)
SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop)
-
-/*
- * __dma_flush_area(start, size)
- *
- * clean & invalidate D / U line
- *
- * - start - virtual start address of region
- * - size - size in question
- */
-SYM_FUNC_START(__pi___dma_flush_area)
- add x1, x0, x1
- dcache_by_line_op civac, sy, x0, x1, x2, x3
- ret
-SYM_FUNC_END(__pi___dma_flush_area)
-SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area)
-
-/*
- * __dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-SYM_FUNC_START(__pi___dma_map_area)
- add x1, x0, x1
- b __pi_dcache_clean_poc
-SYM_FUNC_END(__pi___dma_map_area)
-SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area)
-
-/*
- * __dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-SYM_FUNC_START(__pi___dma_unmap_area)
- add x1, x0, x1
- cmp w2, #DMA_TO_DEVICE
- b.ne __pi_dcache_inval_poc
- ret
-SYM_FUNC_END(__pi___dma_unmap_area)
-SYM_FUNC_ALIAS(__dma_unmap_area, __pi___dma_unmap_area)
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 0dea80bf6de4..24913271e898 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -23,15 +23,6 @@ void copy_highpage(struct page *to, struct page *from)
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
set_bit(PG_mte_tagged, &to->flags);
- page_kasan_tag_reset(to);
- /*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
- */
- smp_wmb();
mte_copy_page_tags(kto, kfrom);
}
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6099c81b9322..599cf81f5685 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -14,20 +14,29 @@
#include <asm/xen/xen-ops.h>
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_map_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ dcache_clean_poc(start, start + size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_unmap_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ if (dir == DMA_TO_DEVICE)
+ return;
+
+ dcache_inval_poc(start, start + size);
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
- __dma_flush_area(page_address(page), size);
+ unsigned long start = (unsigned long)page_address(page);
+
+ dcache_clean_inval_poc(start, start + size);
}
#ifdef CONFIG_IOMMU_DMA
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 489455309695..228d681a8715 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -16,13 +16,6 @@ get_ex_fixup(const struct exception_table_entry *ex)
return ((unsigned long)&ex->fixup + ex->fixup);
}
-static bool ex_handler_fixup(const struct exception_table_entry *ex,
- struct pt_regs *regs)
-{
- regs->pc = get_ex_fixup(ex);
- return true;
-}
-
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
@@ -72,11 +65,10 @@ bool fixup_exception(struct pt_regs *regs)
return false;
switch (ex->type) {
- case EX_TYPE_FIXUP:
- return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
+ case EX_TYPE_KACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c5e11768e5c1..cdf3ffa0c223 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -927,6 +927,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
void tag_clear_highpage(struct page *page)
{
mte_zero_clear_page_tags(page_address(page));
- page_kasan_tag_reset(page);
set_bit(PG_mte_tagged, &page->flags);
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index e2a5ec9fdc0d..8eab05367549 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -100,16 +100,6 @@ int pud_huge(pud_t pud)
#endif
}
-/*
- * Select all bits except the pfn
- */
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
- unsigned long pfn = pte_pfn(pte);
-
- return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
-}
-
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, size_t *pgsize)
{
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 339ee84e5a61..b6ef26fc8ebe 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -389,7 +389,7 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
+ if (!defer_reserve_crashkernel())
reserve_crashkernel();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
@@ -438,7 +438,7 @@ void __init bootmem_init(void)
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
*/
- if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
+ if (defer_reserve_crashkernel())
reserve_crashkernel();
memblock_dump_all();
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index b21f91cd830d..c5af103d4ad4 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -1,96 +1,22 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Based on arch/arm/mm/ioremap.c
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- * Hacked for ARM by Phil Blundell <philb@gnu.org>
- * Hacked to allow all architectures to build, and various cleanups
- * by Russell King
- * Copyright (C) 2012 ARM Ltd.
- */
-#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/io.h>
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-
-static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
- pgprot_t prot, void *caller)
+bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
{
- unsigned long last_addr;
- unsigned long offset = phys_addr & ~PAGE_MASK;
- int err;
- unsigned long addr;
- struct vm_struct *area;
+ unsigned long last_addr = phys_addr + size - 1;
- /*
- * Page align the mapping address and size, taking account of any
- * offset.
- */
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(size + offset);
+ /* Don't allow outside PHYS_MASK */
+ if (last_addr & ~PHYS_MASK)
+ return false;
- /*
- * Don't allow wraparound, zero size or outside PHYS_MASK.
- */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
- return NULL;
-
- /*
- * Don't allow RAM to be mapped.
- */
+ /* Don't allow RAM to be mapped. */
if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
- return NULL;
-
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
- addr = (unsigned long)area->addr;
- area->phys_addr = phys_addr;
-
- err = ioremap_page_range(addr, addr + size, phys_addr, prot);
- if (err) {
- vunmap((void *)addr);
- return NULL;
- }
-
- return (void __iomem *)(offset + addr);
-}
-
-void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
-{
- return __ioremap_caller(phys_addr, size, prot,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(__ioremap);
-
-void iounmap(volatile void __iomem *io_addr)
-{
- unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
-
- /*
- * We could get an address outside vmalloc range in case
- * of ioremap_cache() reusing a RAM mapping.
- */
- if (is_vmalloc_addr((void *)addr))
- vunmap((void *)addr);
-}
-EXPORT_SYMBOL(iounmap);
-
-void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
-{
- /* For normal memory we already have a cacheable mapping. */
- if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
- return (void __iomem *)__phys_to_virt(phys_addr);
+ return false;
- return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
- __builtin_return_address(0));
+ return true;
}
-EXPORT_SYMBOL(ioremap_cache);
/*
* Must be called after early_fixmap_init
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05d77f2342a9..db7c4e6ae57b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -400,6 +400,13 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
} while (pgdp++, addr = next, addr != end);
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+extern __alias(__create_pgd_mapping)
+void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int), int flags);
+#endif
+
static phys_addr_t __pgd_pgtable_alloc(int shift)
{
void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
@@ -541,8 +548,7 @@ static void __init map_mem(pgd_t *pgdp)
#ifdef CONFIG_KEXEC_CORE
if (crash_mem_map) {
- if (IS_ENABLED(CONFIG_ZONE_DMA) ||
- IS_ENABLED(CONFIG_ZONE_DMA32))
+ if (defer_reserve_crashkernel())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
else if (crashk_res.end)
memblock_mark_nomap(crashk_res.start,
@@ -583,8 +589,7 @@ static void __init map_mem(pgd_t *pgdp)
* through /sys/kernel/kexec_crash_size interface.
*/
#ifdef CONFIG_KEXEC_CORE
- if (crash_mem_map &&
- !IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) {
+ if (crash_mem_map && !defer_reserve_crashkernel()) {
if (crashk_res.end) {
__map_memblock(pgdp, crashk_res.start,
crashk_res.end + 1,
@@ -677,13 +682,9 @@ static int __init map_entry_trampoline(void)
__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
pa_start + i * PAGE_SIZE, prot);
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- extern char __entry_tramp_data_start[];
-
- __set_fixmap(FIX_ENTRY_TRAMP_DATA,
- __pa_symbol(__entry_tramp_data_start),
- PAGE_KERNEL_RO);
- }
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
+ pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
return 0;
}
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index a9e50e930484..4334dec93bd4 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -53,15 +53,6 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
if (!tags)
return false;
- page_kasan_tag_reset(page);
- /*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
- */
- smp_wmb();
mte_restore_page_tags(page_address(page), tags);
return true;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9eb490effb7f..7837a69524c5 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -14,6 +14,7 @@
#include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h>
+#include <asm/kernel-pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
@@ -200,25 +201,51 @@ SYM_FUNC_END(idmap_cpu_replace_ttbr1)
.popsection
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+
+#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+
.pushsection ".idmap.text", "awx"
- .macro __idmap_kpti_get_pgtable_ent, type
- dc cvac, cur_\()\type\()p // Ensure any existing dirty
- dmb sy // lines are written back before
- ldr \type, [cur_\()\type\()p] // loading the entry
- tbz \type, #0, skip_\()\type // Skip invalid and
- tbnz \type, #11, skip_\()\type // non-global entries
+ .macro kpti_mk_tbl_ng, type, num_entries
+ add end_\type\()p, cur_\type\()p, #\num_entries * 8
+.Ldo_\type:
+ ldr \type, [cur_\type\()p] // Load the entry
+ tbz \type, #0, .Lnext_\type // Skip invalid and
+ tbnz \type, #11, .Lnext_\type // non-global entries
+ orr \type, \type, #PTE_NG // Same bit for blocks and pages
+ str \type, [cur_\type\()p] // Update the entry
+ .ifnc \type, pte
+ tbnz \type, #1, .Lderef_\type
+ .endif
+.Lnext_\type:
+ add cur_\type\()p, cur_\type\()p, #8
+ cmp cur_\type\()p, end_\type\()p
+ b.ne .Ldo_\type
.endm
- .macro __idmap_kpti_put_pgtable_ent_ng, type
- orr \type, \type, #PTE_NG // Same bit for blocks and pages
- str \type, [cur_\()\type\()p] // Update the entry and ensure
- dmb sy // that it is visible to all
- dc civac, cur_\()\type\()p // CPUs.
+ /*
+ * Dereference the current table entry and map it into the temporary
+ * fixmap slot associated with the current level.
+ */
+ .macro kpti_map_pgtbl, type, level
+ str xzr, [temp_pte, #8 * (\level + 1)] // break before make
+ dsb nshst
+ add pte, temp_pte, #PAGE_SIZE * (\level + 1)
+ lsr pte, pte, #12
+ tlbi vaae1, pte
+ dsb nsh
+ isb
+
+ phys_to_pte pte, cur_\type\()p
+ add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
+ orr pte, pte, pte_flags
+ str pte, [temp_pte, #8 * (\level + 1)]
+ dsb nshst
.endm
/*
- * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
+ * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
+ * unsigned long temp_pte_va)
*
* Called exactly once from stop_machine context by each CPU found during boot.
*/
@@ -228,8 +255,10 @@ SYM_DATA(__idmap_kpti_flag, .long 1)
SYM_FUNC_START(idmap_kpti_install_ng_mappings)
cpu .req w0
+ temp_pte .req x0
num_cpus .req w1
- swapper_pa .req x2
+ pte_flags .req x1
+ temp_pgd_phys .req x2
swapper_ttb .req x3
flag_ptr .req x4
cur_pgdp .req x5
@@ -237,16 +266,15 @@ SYM_FUNC_START(idmap_kpti_install_ng_mappings)
pgd .req x7
cur_pudp .req x8
end_pudp .req x9
- pud .req x10
cur_pmdp .req x11
end_pmdp .req x12
- pmd .req x13
cur_ptep .req x14
end_ptep .req x15
pte .req x16
+ valid .req x17
+ mov x5, x3 // preserve temp_pte arg
mrs swapper_ttb, ttbr1_el1
- restore_ttbr1 swapper_ttb
adr_l flag_ptr, __idmap_kpti_flag
cbnz cpu, __idmap_kpti_secondary
@@ -258,98 +286,71 @@ SYM_FUNC_START(idmap_kpti_install_ng_mappings)
eor w17, w17, num_cpus
cbnz w17, 1b
- /* We need to walk swapper, so turn off the MMU. */
- pre_disable_mmu_workaround
- mrs x17, sctlr_el1
- bic x17, x17, #SCTLR_ELx_M
- msr sctlr_el1, x17
+ /* Switch to the temporary page tables on this CPU only */
+ __idmap_cpu_set_reserved_ttbr1 x8, x9
+ offset_ttbr1 temp_pgd_phys, x8
+ msr ttbr1_el1, temp_pgd_phys
isb
+ mov temp_pte, x5
+ mov pte_flags, #KPTI_NG_PTE_FLAGS
+
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
/* PGD */
- mov cur_pgdp, swapper_pa
- add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
-do_pgd: __idmap_kpti_get_pgtable_ent pgd
- tbnz pgd, #1, walk_puds
-next_pgd:
- __idmap_kpti_put_pgtable_ent_ng pgd
-skip_pgd:
- add cur_pgdp, cur_pgdp, #8
- cmp cur_pgdp, end_pgdp
- b.ne do_pgd
-
- /* Publish the updated tables and nuke all the TLBs */
- dsb sy
- tlbi vmalle1is
- dsb ish
- isb
+ adrp cur_pgdp, swapper_pg_dir
+ kpti_map_pgtbl pgd, 0
+ kpti_mk_tbl_ng pgd, PTRS_PER_PGD
- /* We're done: fire up the MMU again */
- mrs x17, sctlr_el1
- orr x17, x17, #SCTLR_ELx_M
- set_sctlr_el1 x17
+ /* Ensure all the updated entries are visible to secondary CPUs */
+ dsb ishst
+
+ /* We're done: fire up swapper_pg_dir again */
+ __idmap_cpu_set_reserved_ttbr1 x8, x9
+ msr ttbr1_el1, swapper_ttb
+ isb
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
ret
+.Lderef_pgd:
/* PUD */
-walk_puds:
- .if CONFIG_PGTABLE_LEVELS > 3
+ .if CONFIG_PGTABLE_LEVELS > 3
+ pud .req x10
pte_to_phys cur_pudp, pgd
- add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
-do_pud: __idmap_kpti_get_pgtable_ent pud
- tbnz pud, #1, walk_pmds
-next_pud:
- __idmap_kpti_put_pgtable_ent_ng pud
-skip_pud:
- add cur_pudp, cur_pudp, 8
- cmp cur_pudp, end_pudp
- b.ne do_pud
- b next_pgd
- .else /* CONFIG_PGTABLE_LEVELS <= 3 */
- mov pud, pgd
- b walk_pmds
-next_pud:
- b next_pgd
+ kpti_map_pgtbl pud, 1
+ kpti_mk_tbl_ng pud, PTRS_PER_PUD
+ b .Lnext_pgd
+ .else /* CONFIG_PGTABLE_LEVELS <= 3 */
+ pud .req pgd
+ .set .Lnext_pud, .Lnext_pgd
.endif
+.Lderef_pud:
/* PMD */
-walk_pmds:
- .if CONFIG_PGTABLE_LEVELS > 2
+ .if CONFIG_PGTABLE_LEVELS > 2
+ pmd .req x13
pte_to_phys cur_pmdp, pud
- add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
-do_pmd: __idmap_kpti_get_pgtable_ent pmd
- tbnz pmd, #1, walk_ptes
-next_pmd:
- __idmap_kpti_put_pgtable_ent_ng pmd
-skip_pmd:
- add cur_pmdp, cur_pmdp, #8
- cmp cur_pmdp, end_pmdp
- b.ne do_pmd
- b next_pud
- .else /* CONFIG_PGTABLE_LEVELS <= 2 */
- mov pmd, pud
- b walk_ptes
-next_pmd:
- b next_pud
+ kpti_map_pgtbl pmd, 2
+ kpti_mk_tbl_ng pmd, PTRS_PER_PMD
+ b .Lnext_pud
+ .else /* CONFIG_PGTABLE_LEVELS <= 2 */
+ pmd .req pgd
+ .set .Lnext_pmd, .Lnext_pgd
.endif
+.Lderef_pmd:
/* PTE */
-walk_ptes:
pte_to_phys cur_ptep, pmd
- add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
-do_pte: __idmap_kpti_get_pgtable_ent pte
- __idmap_kpti_put_pgtable_ent_ng pte
-skip_pte:
- add cur_ptep, cur_ptep, #8
- cmp cur_ptep, end_ptep
- b.ne do_pte
- b next_pmd
+ kpti_map_pgtbl pte, 3
+ kpti_mk_tbl_ng pte, PTRS_PER_PTE
+ b .Lnext_pmd
.unreq cpu
+ .unreq temp_pte
.unreq num_cpus
- .unreq swapper_pa
+ .unreq pte_flags
+ .unreq temp_pgd_phys
.unreq cur_pgdp
.unreq end_pgdp
.unreq pgd
@@ -362,6 +363,7 @@ skip_pte:
.unreq cur_ptep
.unreq end_ptep
.unreq pte
+ .unreq valid
/* Secondary CPUs end up here */
__idmap_kpti_secondary:
@@ -381,7 +383,6 @@ __idmap_kpti_secondary:
cbnz w16, 1b
/* All done, act like nothing happened */
- offset_ttbr1 swapper_ttb, x16
msr ttbr1_el1, swapper_ttb
isb
ret