diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/cache.S | 4 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 67 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 54 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 131 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 62 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 47 | ||||
-rw-r--r-- | arch/arm64/mm/pgd.c | 8 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 66 |
8 files changed, 214 insertions, 225 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 7f1dbe962cf5..91464e7f77cc 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -49,7 +49,7 @@ ENTRY(flush_icache_range) * - end - virtual end address of region */ ENTRY(__flush_cache_user_range) - uaccess_ttbr0_enable x2, x3 + uaccess_ttbr0_enable x2, x3, x4 dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x0, x3 @@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU isb mov x0, #0 1: - uaccess_ttbr0_disable x1 + uaccess_ttbr0_disable x1, x2 ret 9: mov x0, #-EFAULT diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 6f4017046323..301417ae2ba8 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#define NUM_USER_ASIDS ASID_FIRST_VERSION + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) +#else +#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define asid2idx(asid) ((asid) & ~ASID_MASK) +#define idx2asid(idx) asid2idx(idx) +#endif /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void) } } -static void set_reserved_asid_bits(void) -{ - if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) && - cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003)) - __set_bit(FALKOR_RESERVED_ASID, asid_map); -} - static void flush_context(unsigned int cpu) { int i; @@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); - set_reserved_asid_bits(); - for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); /* @@ -107,7 +107,7 @@ static void flush_context(unsigned int cpu) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); + __set_bit(asid2idx(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -162,16 +162,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - asid &= ~ASID_MASK; - if (!__test_and_set_bit(asid, asid_map)) + if (!__test_and_set_bit(asid2idx(asid), asid_map)) return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the - * currently active ASIDs and mark the TLBs as requiring flushes. - * We always count from ASID #1, as we use ASID #0 when setting a - * reserved TTBR0 for the init_mm. + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. */ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) @@ -188,32 +188,35 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return asid | generation; + return idx2asid(asid) | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) { unsigned long flags; - u64 asid; + u64 asid, old_active_asid; asid = atomic64_read(&mm->context.id); /* * The memory ordering here is subtle. - * If our ASID matches the current generation, then we update - * our active_asids entry with a relaxed xchg. Racing with a - * concurrent rollover means that either: + * If our active_asids is non-zero and the ASID matches the current + * generation, then we update the active_asids entry with a relaxed + * cmpxchg. Racing with a concurrent rollover means that either: * - * - We get a zero back from the xchg and end up waiting on the + * - We get a zero back from the cmpxchg and end up waiting on the * lock. Taking the lock synchronises with the rollover and so * we are forced to see the updated generation. * - * - We get a valid ASID back from the xchg, which means the + * - We get a valid ASID back from the cmpxchg, which means the * relaxed xchg in flush_context will treat us as reserved * because atomic RmWs are totally ordered for a given location. */ - if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits) - && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid)) + old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); + if (old_active_asid && + !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && + atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), + old_active_asid, asid)) goto switch_mm_fastpath; raw_spin_lock_irqsave(&cpu_asid_lock, flags); @@ -231,6 +234,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: + + arm64_apply_bp_hardening(); + /* * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when * emulating PAN. @@ -239,6 +245,15 @@ switch_mm_fastpath: cpu_switch_mm(mm->pgd, mm); } +/* Errata workaround post TTBRx_EL1 update. */ +asmlinkage void post_ttbr_update_workaround(void) +{ + asm(ALTERNATIVE("nop; nop; nop", + "ic iallu; dsb nsh; isb", + ARM64_WORKAROUND_CAVIUM_27456, + CONFIG_CAVIUM_ERRATUM_27456)); +} + static int asids_init(void) { asid_bits = get_cpu_asid_bits(); @@ -254,8 +269,6 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - set_reserved_asid_bits(); - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index b45c5bcaeccb..a96ec0181818 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -24,7 +24,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/genalloc.h> -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/dma-contiguous.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> @@ -91,46 +91,6 @@ static int __free_from_pool(void *start, size_t size) return 1; } -static void *__dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - if (IS_ENABLED(CONFIG_ZONE_DMA) && - dev->coherent_dma_mask <= DMA_BIT_MASK(32)) - flags |= GFP_DMA; - if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { - struct page *page; - void *addr; - - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, - get_order(size), flags); - if (!page) - return NULL; - - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - addr = page_address(page); - memset(addr, 0, size); - return addr; - } else { - return swiotlb_alloc_coherent(dev, size, dma_handle, flags); - } -} - -static void __dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ - bool freed; - phys_addr_t paddr = dma_to_phys(dev, dma_handle); - - - freed = dma_release_from_contiguous(dev, - phys_to_page(paddr), - size >> PAGE_SHIFT); - if (!freed) - swiotlb_free_coherent(dev, size, vaddr, dma_handle); -} - static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) @@ -152,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return addr; } - ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); + ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; @@ -173,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return coherent_ptr; no_map: - __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); + swiotlb_free(dev, size, ptr, *dma_handle, attrs); no_mem: return NULL; } @@ -191,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size, return; vunmap(vaddr); } - __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); + swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); } static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, @@ -368,7 +328,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) return 0; } -static const struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops arm64_swiotlb_dma_ops = { .alloc = __dma_alloc, .free = __dma_free, .mmap = __swiotlb_mmap, @@ -397,7 +357,7 @@ static int __init atomic_pool_init(void) page = dma_alloc_from_contiguous(NULL, nr_pages, pool_size_order, GFP_KERNEL); else - page = alloc_pages(GFP_DMA, pool_size_order); + page = alloc_pages(GFP_DMA32, pool_size_order); if (page) { int ret; @@ -923,7 +883,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { if (!dev->dma_ops) - dev->dma_ops = &swiotlb_dma_ops; + dev->dma_ops = &arm64_swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 9b7f89df49db..ce441d29e7f6 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -596,7 +596,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) info.si_signo = SIGBUS; info.si_errno = 0; - info.si_code = 0; + info.si_code = BUS_FIXME; if (esr & ESR_ELx_FnV) info.si_addr = NULL; else @@ -607,70 +607,70 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) } static const struct fault_info fault_info[] = { - { do_bad, SIGBUS, 0, "ttbr address size fault" }, - { do_bad, SIGBUS, 0, "level 1 address size fault" }, - { do_bad, SIGBUS, 0, "level 2 address size fault" }, - { do_bad, SIGBUS, 0, "level 3 address size fault" }, + { do_bad, SIGBUS, BUS_FIXME, "ttbr address size fault" }, + { do_bad, SIGBUS, BUS_FIXME, "level 1 address size fault" }, + { do_bad, SIGBUS, BUS_FIXME, "level 2 address size fault" }, + { do_bad, SIGBUS, BUS_FIXME, "level 3 address size fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, - { do_bad, SIGBUS, 0, "unknown 8" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 8" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, - { do_bad, SIGBUS, 0, "unknown 12" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 12" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, - { do_sea, SIGBUS, 0, "synchronous external abort" }, - { do_bad, SIGBUS, 0, "unknown 17" }, - { do_bad, SIGBUS, 0, "unknown 18" }, - { do_bad, SIGBUS, 0, "unknown 19" }, - { do_sea, SIGBUS, 0, "level 0 (translation table walk)" }, - { do_sea, SIGBUS, 0, "level 1 (translation table walk)" }, - { do_sea, SIGBUS, 0, "level 2 (translation table walk)" }, - { do_sea, SIGBUS, 0, "level 3 (translation table walk)" }, - { do_sea, SIGBUS, 0, "synchronous parity or ECC error" }, // Reserved when RAS is implemented - { do_bad, SIGBUS, 0, "unknown 25" }, - { do_bad, SIGBUS, 0, "unknown 26" }, - { do_bad, SIGBUS, 0, "unknown 27" }, - { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented - { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented - { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented - { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented - { do_bad, SIGBUS, 0, "unknown 32" }, + { do_sea, SIGBUS, BUS_FIXME, "synchronous external abort" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 17" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 18" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 19" }, + { do_sea, SIGBUS, BUS_FIXME, "level 0 (translation table walk)" }, + { do_sea, SIGBUS, BUS_FIXME, "level 1 (translation table walk)" }, + { do_sea, SIGBUS, BUS_FIXME, "level 2 (translation table walk)" }, + { do_sea, SIGBUS, BUS_FIXME, "level 3 (translation table walk)" }, + { do_sea, SIGBUS, BUS_FIXME, "synchronous parity or ECC error" }, // Reserved when RAS is implemented + { do_bad, SIGBUS, BUS_FIXME, "unknown 25" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 26" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 27" }, + { do_sea, SIGBUS, BUS_FIXME, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGBUS, BUS_FIXME, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGBUS, BUS_FIXME, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGBUS, BUS_FIXME, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_bad, SIGBUS, BUS_FIXME, "unknown 32" }, { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, - { do_bad, SIGBUS, 0, "unknown 34" }, - { do_bad, SIGBUS, 0, "unknown 35" }, - { do_bad, SIGBUS, 0, "unknown 36" }, - { do_bad, SIGBUS, 0, "unknown 37" }, - { do_bad, SIGBUS, 0, "unknown 38" }, - { do_bad, SIGBUS, 0, "unknown 39" }, - { do_bad, SIGBUS, 0, "unknown 40" }, - { do_bad, SIGBUS, 0, "unknown 41" }, - { do_bad, SIGBUS, 0, "unknown 42" }, - { do_bad, SIGBUS, 0, "unknown 43" }, - { do_bad, SIGBUS, 0, "unknown 44" }, - { do_bad, SIGBUS, 0, "unknown 45" }, - { do_bad, SIGBUS, 0, "unknown 46" }, - { do_bad, SIGBUS, 0, "unknown 47" }, - { do_bad, SIGBUS, 0, "TLB conflict abort" }, - { do_bad, SIGBUS, 0, "Unsupported atomic hardware update fault" }, - { do_bad, SIGBUS, 0, "unknown 50" }, - { do_bad, SIGBUS, 0, "unknown 51" }, - { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, - { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" }, - { do_bad, SIGBUS, 0, "unknown 54" }, - { do_bad, SIGBUS, 0, "unknown 55" }, - { do_bad, SIGBUS, 0, "unknown 56" }, - { do_bad, SIGBUS, 0, "unknown 57" }, - { do_bad, SIGBUS, 0, "unknown 58" }, - { do_bad, SIGBUS, 0, "unknown 59" }, - { do_bad, SIGBUS, 0, "unknown 60" }, - { do_bad, SIGBUS, 0, "section domain fault" }, - { do_bad, SIGBUS, 0, "page domain fault" }, - { do_bad, SIGBUS, 0, "unknown 63" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 34" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 35" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 36" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 37" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 38" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 39" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 40" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 41" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 42" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 43" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 44" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 45" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 46" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 47" }, + { do_bad, SIGBUS, BUS_FIXME, "TLB conflict abort" }, + { do_bad, SIGBUS, BUS_FIXME, "Unsupported atomic hardware update fault" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 50" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 51" }, + { do_bad, SIGBUS, BUS_FIXME, "implementation fault (lockdown abort)" }, + { do_bad, SIGBUS, BUS_FIXME, "implementation fault (unsupported exclusive)" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 54" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 55" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 56" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 57" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 58" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 59" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 60" }, + { do_bad, SIGBUS, BUS_FIXME, "section domain fault" }, + { do_bad, SIGBUS, BUS_FIXME, "page domain fault" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 63" }, }; int handle_guest_sea(phys_addr_t addr, unsigned int esr) @@ -707,6 +707,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die("", regs, &info, esr); } +asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + /* + * We've taken an instruction abort from userspace and not yet + * re-enabled IRQs. If the address is a kernel address, apply + * BP hardening prior to enabling IRQs and pre-emption. + */ + if (addr > TASK_SIZE) + arm64_apply_bp_hardening(); + + local_irq_enable(); + do_mem_abort(addr, esr, regs); +} + + asmlinkage void __exception do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) @@ -739,11 +756,11 @@ static struct fault_info __refdata debug_fault_info[] = { { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, - { do_bad, SIGBUS, 0, "unknown 3" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 3" }, { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, - { do_bad, SIGTRAP, 0, "aarch32 vector catch" }, + { do_bad, SIGTRAP, TRAP_FIXME, "aarch32 vector catch" }, { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, - { do_bad, SIGBUS, 0, "unknown 7" }, + { do_bad, SIGBUS, BUS_FIXME, "unknown 7" }, }; void __init hook_debug_fault_code(int nr, diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 00e7b900ca41..9f3c47acf8ff 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void) } #endif /* CONFIG_CRASH_DUMP */ /* - * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It + * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It * currently assumes that for memory starting above 4G, 32-bit devices will * use a DMA offset. */ @@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; - if (IS_ENABLED(CONFIG_ZONE_DMA)) - max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys()); + if (IS_ENABLED(CONFIG_ZONE_DMA32)) + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); max_zone_pfns[ZONE_NORMAL] = max; free_area_init_nodes(max_zone_pfns); @@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) memset(zone_size, 0, sizeof(zone_size)); /* 4GB maximum for 32-bit only capable devices */ -#ifdef CONFIG_ZONE_DMA +#ifdef CONFIG_ZONE_DMA32 max_dma = PFN_DOWN(arm64_dma_phys_limit); - zone_size[ZONE_DMA] = max_dma - min; + zone_size[ZONE_DMA32] = max_dma - min; #endif zone_size[ZONE_NORMAL] = max - max_dma; @@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) if (start >= max) continue; -#ifdef CONFIG_ZONE_DMA +#ifdef CONFIG_ZONE_DMA32 if (start < max_dma) { unsigned long dma_end = min(end, max_dma); - zhole_size[ZONE_DMA] -= dma_end - start; + zhole_size[ZONE_DMA32] -= dma_end - start; } #endif if (end > max_dma) { @@ -366,6 +366,9 @@ void __init arm64_memblock_init(void) /* Handle linux,usable-memory-range property */ fdt_enforce_memory_region(); + /* Remove memory above our supported physical address size */ + memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); + /* * Ensure that the linear region takes up exactly half of the kernel * virtual address space. This way, we can distinguish a linear address @@ -467,7 +470,7 @@ void __init arm64_memblock_init(void) early_init_fdt_scan_reserved_mem(); /* 4GB maximum for 32-bit only capable devices */ - if (IS_ENABLED(CONFIG_ZONE_DMA)) + if (IS_ENABLED(CONFIG_ZONE_DMA32)) arm64_dma_phys_limit = max_zone_dma_phys(); else arm64_dma_phys_limit = PHYS_MASK + 1; @@ -600,49 +603,6 @@ void __init mem_init(void) mem_init_print_info(NULL); -#define MLK(b, t) b, t, ((t) - (b)) >> 10 -#define MLM(b, t) b, t, ((t) - (b)) >> 20 -#define MLG(b, t) b, t, ((t) - (b)) >> 30 -#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) - - pr_notice("Virtual kernel memory layout:\n"); -#ifdef CONFIG_KASAN - pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", - MLG(KASAN_SHADOW_START, KASAN_SHADOW_END)); -#endif - pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", - MLM(MODULES_VADDR, MODULES_END)); - pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", - MLG(VMALLOC_START, VMALLOC_END)); - pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", - MLK_ROUNDUP(_text, _etext)); - pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", - MLK_ROUNDUP(__start_rodata, __init_begin)); - pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", - MLK_ROUNDUP(__init_begin, __init_end)); - pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", - MLK_ROUNDUP(_sdata, _edata)); - pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", - MLK_ROUNDUP(__bss_start, __bss_stop)); - pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", - MLK(FIXADDR_START, FIXADDR_TOP)); - pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", - MLM(PCI_IO_START, PCI_IO_END)); -#ifdef CONFIG_SPARSEMEM_VMEMMAP - pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", - MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE)); - pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", - MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), - (unsigned long)virt_to_page(high_memory))); -#endif - pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", - MLM(__phys_to_virt(memblock_start_of_DRAM()), - (unsigned long)high_memory)); - -#undef MLK -#undef MLM -#undef MLK_ROUNDUP - /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 0b1f13e0b4b3..4e369dfb83b1 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -50,6 +50,7 @@ #define NO_CONT_MAPPINGS BIT(1) u64 idmap_t0sz = TCR_T0SZ(VA_BITS); +u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); @@ -525,6 +526,35 @@ static int __init parse_rodata(char *arg) } early_param("rodata", parse_rodata); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __init map_entry_trampoline(void) +{ + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); + + /* The trampoline is always mapped and can therefore be global */ + pgprot_val(prot) &= ~PTE_NG; + + /* Map only the text into the trampoline page table */ + memset(tramp_pg_dir, 0, PGD_SIZE); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, + prot, pgd_pgtable_alloc, 0); + + /* Map both the text and data into the kernel page table */ + __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + extern char __entry_tramp_data_start[]; + + __set_fixmap(FIX_ENTRY_TRAMP_DATA, + __pa_symbol(__entry_tramp_data_start), + PAGE_KERNEL_RO); + } + + return 0; +} +core_initcall(map_entry_trampoline); +#endif + /* * Create fine-grained mappings for the kernel. */ @@ -570,8 +600,8 @@ static void __init map_kernel(pgd_t *pgd) * entry instead. */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START), - __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE)); + pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START), + lm_alias(bm_pmd)); pud_clear_fixmap(); } else { BUG(); @@ -612,7 +642,8 @@ void __init paging_init(void) * allocated with it. */ memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, - SWAPPER_DIR_SIZE - PAGE_SIZE); + __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir) + - PAGE_SIZE); } /* @@ -688,7 +719,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, if (!p) return -ENOMEM; - set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); + pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL)); } else vmemmap_verify((pte_t *)pmd, node, addr, next); } while (addr = next, addr != end); @@ -882,15 +913,19 @@ int __init arch_ioremap_pmd_supported(void) int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) { + pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | + pgprot_val(mk_sect_prot(prot))); BUG_ON(phys & ~PUD_MASK); - set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); + set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot)); return 1; } int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) { + pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | + pgprot_val(mk_sect_prot(prot))); BUG_ON(phys & ~PMD_MASK); - set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); + set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot)); return 1; } diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 051e71ec3335..289f9113a27a 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -49,6 +49,14 @@ void __init pgd_cache_init(void) if (PGD_SIZE == PAGE_SIZE) return; +#ifdef CONFIG_ARM64_PA_BITS_52 + /* + * With 52-bit physical addresses, the architecture requires the + * top-level table to be aligned to at least 64 bytes. + */ + BUILD_BUG_ON(PGD_SIZE < 64); +#endif + /* * Naturally aligned pgds required by the architecture. */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 95233dfc4c39..9f177aac6390 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend) mrs x8, mdscr_el1 mrs x9, oslsr_el1 mrs x10, sctlr_el1 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x11, tpidr_el1 +alternative_else + mrs x11, tpidr_el2 +alternative_endif mrs x12, sp_el0 stp x2, x3, [x0] stp x4, xzr, [x0, #16] @@ -116,7 +120,11 @@ ENTRY(cpu_do_resume) msr mdscr_el1, x10 msr sctlr_el1, x12 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN msr tpidr_el1, x13 +alternative_else + msr tpidr_el2, x13 +alternative_endif msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 @@ -124,6 +132,11 @@ ENTRY(cpu_do_resume) ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + +alternative_if ARM64_HAS_RAS_EXTN + msr_s SYS_DISR_EL1, xzr +alternative_else_nop_endif + isb ret ENDPROC(cpu_do_resume) @@ -138,13 +151,18 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - pre_ttbr0_update_workaround x0, x2, x3 + mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id - bfi x0, x1, #48, #16 // set the ASID - msr ttbr0_el1, x0 // set TTBR0 + phys_to_ttbr x0, x3 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + bfi x3, x1, #48, #16 // set the ASID field in TTBR0 +#endif + bfi x2, x1, #48, #16 // set the ASID + msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) isb - post_ttbr0_update_workaround - ret + msr ttbr0_el1, x3 // now update TTBR0 + isb + b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "ax" @@ -158,14 +176,16 @@ ENTRY(idmap_cpu_replace_ttbr1) save_and_disable_daif flags=x2 adrp x1, empty_zero_page - msr ttbr1_el1, x1 + phys_to_ttbr x1, x3 + msr ttbr1_el1, x3 isb tlbi vmalle1 dsb nsh isb - msr ttbr1_el1, x0 + phys_to_ttbr x0, x3 + msr ttbr1_el1, x3 isb restore_daif x2 @@ -214,25 +234,19 @@ ENTRY(__cpu_setup) /* * Prepare SCTLR */ - adr x5, crval - ldp w5, w6, [x5] - mrs x0, sctlr_el1 - bic x0, x0, x5 // clear bits - orr x0, x0, x6 // set bits + mov_q x0, SCTLR_EL1_SET /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 tcr_set_idmap_t0sz x10, x9 /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in - * TCR_EL1. + * Set the IPS bits in TCR_EL1. */ - mrs x9, ID_AA64MMFR0_EL1 - bfi x10, x9, #32, #3 + tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Hardware update of the Access and Dirty bits. @@ -249,21 +263,3 @@ ENTRY(__cpu_setup) msr tcr_el1, x10 ret // return to head.S ENDPROC(__cpu_setup) - - /* - * We set the desired value explicitly, including those of the - * reserved bits. The values of bits EE & E0E were set early in - * el2_setup, which are left untouched below. - * - * n n T - * U E WT T UD US IHBS - * CE0 XWHW CZ ME TEEA S - * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM - * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved - * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings - */ - .type crval, #object -crval: - .word 0xfcffffff // clear - .word 0x34d5d91d // set - .popsection |