diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/fault.c | 33 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 5 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 3 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 23 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 6 |
5 files changed, 46 insertions, 24 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6c8ba25bf6bb..c23751b06120 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -130,7 +130,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, force_sig_info(sig, &si, tsk); } -void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->active_mm; @@ -199,13 +199,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - if (esr & ESR_LNX_EXEC) { - vm_flags = VM_EXEC; - } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { - vm_flags = VM_WRITE; - mm_flags |= FAULT_FLAG_WRITE; - } - tsk = current; mm = tsk->mm; @@ -220,6 +213,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (in_atomic() || !mm) goto no_context; + if (user_mode(regs)) + mm_flags |= FAULT_FLAG_USER; + + if (esr & ESR_LNX_EXEC) { + vm_flags = VM_EXEC; + } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; + } + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -288,6 +291,13 @@ retry: VM_FAULT_BADACCESS)))) return 0; + /* + * If we are in kernel mode at this point, we have no context to + * handle this fault with. + */ + if (!user_mode(regs)) + goto no_context; + if (fault & VM_FAULT_OOM) { /* * We ran out of memory, call the OOM killer, and return to @@ -298,13 +308,6 @@ retry: return 0; } - /* - * If we are in kernel mode at this point, we have no context to - * handle this fault with. - */ - if (!user_mode(regs)) - goto no_context; - if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to successfully fix up diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 2fc8258bab2d..5e9aec358306 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -54,6 +54,11 @@ int pud_huge(pud_t pud) return !(pud_val(pud) & PUD_TABLE_BIT); } +int pmd_huge_support(void) +{ + return 1; +} + static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 67e8d7ce3fe7..de2de5db628d 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -44,8 +44,7 @@ static unsigned long phys_initrd_size __initdata = 0; phys_addr_t memstart_addr __read_mostly = 0; -void __init early_init_dt_setup_initrd_arch(unsigned long start, - unsigned long end) +void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) { phys_initrd_start = start; phys_initrd_size = end - start; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a8d1059b91b2..f557ebbe7013 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -296,6 +296,7 @@ void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) static void __init map_mem(void) { struct memblock_region *reg; + phys_addr_t limit; /* * Temporarily limit the memblock range. We need to do this as @@ -303,9 +304,11 @@ static void __init map_mem(void) * memory addressable from the initial direct kernel mapping. * * The initial direct kernel mapping, located at swapper_pg_dir, - * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (aligned). + * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be + * aligned to 2MB as per Documentation/arm64/booting.txt). */ - memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); + limit = PHYS_OFFSET + PGDIR_SIZE; + memblock_set_current_limit(limit); /* map all the memory banks */ for_each_memblock(memory, reg) { @@ -315,6 +318,22 @@ static void __init map_mem(void) if (start >= end) break; +#ifndef CONFIG_ARM64_64K_PAGES + /* + * For the first memory bank align the start address and + * current memblock limit to prevent create_mapping() from + * allocating pte page tables from unmapped memory. + * When 64K pages are enabled, the pte page table for the + * first PGDIR_SIZE is already present in swapper_pg_dir. + */ + if (start < limit) + start = ALIGN(start, PMD_SIZE); + if (end < limit) { + limit = end & PMD_MASK; + memblock_set_current_limit(limit); + } +#endif + create_mapping(start, __phys_to_virt(start), end - start); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a82ae8868077..b1b31bbc967b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -95,10 +95,6 @@ ENTRY(cpu_do_switch_mm) ret ENDPROC(cpu_do_switch_mm) -cpu_name: - .ascii "AArch64 Processor" - .align - .section ".text.init", #alloc, #execinstr /* @@ -151,7 +147,7 @@ ENTRY(__cpu_setup) * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \ - TCR_ASID16 | (1 << 31) + TCR_ASID16 | TCR_TBI0 | (1 << 31) #ifdef CONFIG_ARM64_64K_PAGES orr x10, x10, TCR_TG0_64K orr x10, x10, TCR_TG1_64K |