From 14c127c957c1c6070647c171e72f06e0db275ebf Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:14 +0100 Subject: arm64: mm: Flip kernel VA space In order to allow for a KASAN shadow that changes size at boot time, one must fix the KASAN_SHADOW_END for both 48 & 52-bit VAs and "grow" the start address. Also, it is highly desirable to maintain the same function addresses in the kernel .text between VA sizes. Both of these requirements necessitate us to flip the kernel address space halves s.t. the direct linear map occupies the lower addresses. This patch puts the direct linear map in the lower addresses of the kernel VA range and everything else in the higher ranges. We need to adjust: *) KASAN shadow region placement logic, *) KASAN_SHADOW_OFFSET computation logic, *) virt_to_phys, phys_to_virt checks, *) page table dumper. These are all small changes, that need to take place atomically, so they are bundled into this commit. As part of the re-arrangement, a guard region of 2MB (to preserve alignment for fixed map) is added after the vmemmap. Otherwise the vmemmap could intersect with IS_ERR pointers. Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include/asm/pgtable.h') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5fdcfe237338..046b811309bb 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -21,7 +21,7 @@ * and fixed mappings */ #define VMALLOC_START (MODULES_END) -#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) +#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) -- cgit From c8b6d2ccf9b10ce872cdea037f9685804440bb7e Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:20 +0100 Subject: arm64: mm: Separate out vmemmap vmemmap is a preprocessor definition that depends on a variable, memstart_addr. In a later patch we will need to expand the size of the VMEMMAP region and optionally modify vmemmap depending upon whether or not hardware support is available for 52-bit virtual addresses. This patch changes vmemmap to be a variable. As the old definition depended on a variable load, this should not affect performance noticeably. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm/pgtable.h') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 046b811309bb..4a695b9ee0f0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -23,8 +23,6 @@ #define VMALLOC_START (MODULES_END) #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) -#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) - #define FIRST_USER_ADDRESS 0UL #ifndef __ASSEMBLY__ @@ -35,6 +33,8 @@ #include #include +extern struct page *vmemmap; + extern void __pte_error(const char *file, int line, unsigned long val); extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pud_error(const char *file, int line, unsigned long val); -- cgit From 77ad4ce69321abbe26ec92b2a2691a66531eb688 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 14 Aug 2019 14:28:48 +0100 Subject: arm64: memory: rename VA_START to PAGE_END Prior to commit: 14c127c957c1c607 ("arm64: mm: Flip kernel VA space") ... VA_START described the start of the TTBR1 address space for a given VA size described by VA_BITS, where all kernel mappings began. Since that commit, VA_START described a portion midway through the address space, where the linear map ends and other kernel mappings begin. To avoid confusion, let's rename VA_START to PAGE_END, making it clear that it's not the start of the TTBR1 address space and implying that it's related to PAGE_OFFSET. Comments and other mnemonics are updated accordingly, along with a typo fix in the decription of VMEMMAP_SIZE. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Tested-by: Steve Capper Reviewed-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include/asm/pgtable.h') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4a695b9ee0f0..979e24fadf35 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -856,8 +856,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) -#define kc_vaddr_to_offset(v) ((v) & ~VA_START) -#define kc_offset_to_vaddr(o) ((o) | VA_START) +#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END) +#define kc_offset_to_vaddr(o) ((o) | PAGE_END) #ifdef CONFIG_ARM64_PA_BITS_52 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) -- cgit From d0b7a302d58abe24ed0f32a0672dd4c356bb73db Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Aug 2019 14:58:37 +0100 Subject: Revert "arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}" This reverts commit 24fe1b0efad4fcdd32ce46cffeab297f22581707. Commit 24fe1b0efad4fcdd ("arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}") removed ISB instructions immediately following updates to the page table, on the grounds that they are not required by the architecture and a DSB alone is sufficient to ensure that subsequent data accesses use the new translation: DDI0487E_a, B2-128: | ... no instruction that appears in program order after the DSB | instruction can alter any state of the system or perform any part of | its functionality until the DSB completes other than: | | * Being fetched from memory and decoded | * Reading the general-purpose, SIMD and floating-point, | Special-purpose, or System registers that are directly or indirectly | read without causing side-effects. However, the same document also states the following: DDI0487E_a, B2-125: | DMB and DSB instructions affect reads and writes to the memory system | generated by Load/Store instructions and data or unified cache | maintenance instructions being executed by the PE. Instruction fetches | or accesses caused by a hardware translation table access are not | explicit accesses. which appears to claim that the DSB alone is insufficient. Unfortunately, some CPU designers have followed the second clause above, whereas in Linux we've been relying on the first. This means that our mapping sequence: MOV X0, STR X0, [Xptep] // Store new PTE to page table DSB ISHST LDR X1, [X2] // Translates using the new PTE can actually raise a translation fault on the load instruction because the translation can be performed speculatively before the page table update and then marked as "faulting" by the CPU. For user PTEs, this is ok because we can handle the spurious fault, but for kernel PTEs and intermediate table entries this results in a panic(). Revert the offending commit to reintroduce the missing barriers. Cc: Fixes: 24fe1b0efad4fcdd ("arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}") Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/arm64/include/asm/pgtable.h') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5fdcfe237338..feda7294320c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) + if (pte_valid_not_user(pte)) { dsb(ishst); + isb(); + } } extern void __sync_icache_dcache(pte_t pteval); @@ -481,8 +483,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) WRITE_ONCE(*pmdp, pmd); - if (pmd_valid(pmd)) + if (pmd_valid(pmd)) { dsb(ishst); + isb(); + } } static inline void pmd_clear(pmd_t *pmdp) @@ -540,8 +544,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud) WRITE_ONCE(*pudp, pud); - if (pud_valid(pud)) + if (pud_valid(pud)) { dsb(ishst); + isb(); + } } static inline void pud_clear(pud_t *pudp) -- cgit From eb6a4dcce33925ac95023bbe5199474f8db40ba7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 23 Aug 2019 13:03:55 +0100 Subject: arm64: mm: Add ISB instruction to set_pgd() Commit 6a4cbd63c25a ("Revert "arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}"") reintroduced ISB instructions to some of our page table setter functions in light of a recent clarification to the Armv8 architecture. Although 'set_pgd()' isn't currently used to update a live page table, add the ISB instruction there too for consistency with the other macros and to provide some future-proofing if we use it on live tables in the future. Reported-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include/asm/pgtable.h') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index feda7294320c..2faa77635942 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -605,6 +605,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) WRITE_ONCE(*pgdp, pgd); dsb(ishst); + isb(); } static inline void pgd_clear(pgd_t *pgdp) -- cgit