diff options
Diffstat (limited to 'arch/parisc/mm')
| -rw-r--r-- | arch/parisc/mm/fault.c | 95 | ||||
| -rw-r--r-- | arch/parisc/mm/fixmap.c | 3 | ||||
| -rw-r--r-- | arch/parisc/mm/hugetlbpage.c | 40 | ||||
| -rw-r--r-- | arch/parisc/mm/init.c | 164 | ||||
| -rw-r--r-- | arch/parisc/mm/ioremap.c | 62 |
5 files changed, 201 insertions, 163 deletions
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 869204e97ec9..f1785640b049 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs) * Fix up get_user() and put_user(). * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant * bit in the relative address of the fixup routine to indicate - * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with - * -EFAULT to report a userspace access error. + * that the register encoded in the "or %r0,%r0,register" + * opcode should be loaded with -EFAULT to report a userspace + * access error. */ if (fix->fixup & 1) { - regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT; + int fault_error_reg = fix->err_opcode & 0x1f; + if (!WARN_ON(!fault_error_reg)) + regs->gr[fault_error_reg] = -EFAULT; + pr_debug("Unalignment fixup of register %d at %pS\n", + fault_error_reg, (void*)regs->iaoq[0]); /* zero target register for get_user() */ if (parisc_acctyp(0, regs->iir) == VM_READ) { @@ -192,31 +197,31 @@ int fixup_exception(struct pt_regs *regs) * For implementation see handle_interruption() in traps.c */ static const char * const trap_description[] = { - [1] "High-priority machine check (HPMC)", - [2] "Power failure interrupt", - [3] "Recovery counter trap", - [5] "Low-priority machine check", - [6] "Instruction TLB miss fault", - [7] "Instruction access rights / protection trap", - [8] "Illegal instruction trap", - [9] "Break instruction trap", - [10] "Privileged operation trap", - [11] "Privileged register trap", - [12] "Overflow trap", - [13] "Conditional trap", - [14] "FP Assist Exception trap", - [15] "Data TLB miss fault", - [16] "Non-access ITLB miss fault", - [17] "Non-access DTLB miss fault", - [18] "Data memory protection/unaligned access trap", - [19] "Data memory break trap", - [20] "TLB dirty bit trap", - [21] "Page reference trap", - [22] "Assist emulation trap", - [25] "Taken branch trap", - [26] "Data memory access rights trap", - [27] "Data memory protection ID trap", - [28] "Unaligned data reference trap", + [1] = "High-priority machine check (HPMC)", + [2] = "Power failure interrupt", + [3] = "Recovery counter trap", + [5] = "Low-priority machine check", + [6] = "Instruction TLB miss fault", + [7] = "Instruction access rights / protection trap", + [8] = "Illegal instruction trap", + [9] = "Break instruction trap", + [10] = "Privileged operation trap", + [11] = "Privileged register trap", + [12] = "Overflow trap", + [13] = "Conditional trap", + [14] = "FP Assist Exception trap", + [15] = "Data TLB miss fault", + [16] = "Non-access ITLB miss fault", + [17] = "Non-access DTLB miss fault", + [18] = "Data memory protection/unaligned access trap", + [19] = "Data memory break trap", + [20] = "TLB dirty bit trap", + [21] = "Page reference trap", + [22] = "Assist emulation trap", + [25] = "Taken branch trap", + [26] = "Data memory access rights trap", + [27] = "Data memory protection ID trap", + [28] = "Unaligned data reference trap", }; const char *trap_name(unsigned long code) @@ -288,15 +293,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, retry: mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); - if (!vma || address < vma->vm_start) - goto check_expansion; + if (!vma || address < vma->vm_start) { + if (!prev_vma || !(prev_vma->vm_flags & VM_GROWSUP)) + goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; + } + /* * Ok, we have a good vm_area for this memory access. We still need to * check the access permissions. */ -good_area: - if ((vma->vm_flags & acc_type) != acc_type) goto bad_area; @@ -308,8 +317,13 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) { + msg = "Page fault: fault signal on kernel memory"; + goto no_context; + } return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) @@ -342,17 +356,17 @@ good_area: mmap_read_unlock(mm); return; -check_expansion: - vma = prev_vma; - if (vma && (expand_stack(vma, address) == 0)) - goto good_area; - /* * Something tried to access memory that isn't in our memory map.. */ bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: + if (!user_mode(regs) && fixup_exception(regs)) { + return; + } + if (user_mode(regs)) { int signo, si_code; @@ -444,7 +458,7 @@ handle_nadtlb_fault(struct pt_regs *regs) { unsigned long insn = regs->iir; int breg, treg, xreg, val = 0; - struct vm_area_struct *vma, *prev_vma; + struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; unsigned long address; @@ -480,7 +494,7 @@ handle_nadtlb_fault(struct pt_regs *regs) /* Search for VMA */ address = regs->ior; mmap_read_lock(mm); - vma = find_vma_prev(mm, address, &prev_vma); + vma = vma_lookup(mm, address); mmap_read_unlock(mm); /* @@ -489,7 +503,6 @@ handle_nadtlb_fault(struct pt_regs *regs) */ acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; if (vma - && address >= vma->vm_start && (vma->vm_flags & acc_type) == acc_type) val = 1; } diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c index cc15d737fda6..ae3493dae9dc 100644 --- a/arch/parisc/mm/fixmap.c +++ b/arch/parisc/mm/fixmap.c @@ -19,9 +19,6 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys) pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte; - if (pmd_none(*pmd)) - pte = pte_alloc_kernel(pmd, vaddr); - pte = pte_offset_kernel(pmd, vaddr); set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d1d3990b83f6..a94fe546d434 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -21,27 +21,6 @@ #include <asm/mmu_context.h> -unsigned long -hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct hstate *h = hstate_file(file); - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (len > TASK_SIZE) - return -ENOMEM; - - if (flags & MAP_FIXED) - if (prepare_hugepage_range(file, addr, len)) - return -EINVAL; - - if (addr) - addr = ALIGN(addr, huge_page_size(h)); - - /* we need to make sure the colouring is OK */ - return arch_get_unmapped_area(file, addr, len, pgoff, flags); -} pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -66,7 +45,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr); } return pte; } @@ -90,7 +69,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (!pud_none(*pud)) { pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) - pte = pte_offset_map(pmd, addr); + pte = pte_offset_huge(pmd, addr); } } } @@ -140,14 +119,14 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t entry) + pte_t *ptep, pte_t entry, unsigned long sz) { __set_huge_pte_at(mm, addr, ptep, entry); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { pte_t entry; @@ -180,14 +159,3 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, } return changed; } - - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index b0c43f3b0a5f..14270715d754 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -24,6 +24,7 @@ #include <linux/nodemask.h> /* for node_online_map */ #include <linux/pagemap.h> /* for release_pages */ #include <linux/compat.h> +#include <linux/execmem.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -32,6 +33,8 @@ #include <asm/sections.h> #include <asm/msgbuf.h> #include <asm/sparsemem.h> +#include <asm/asm-offsets.h> +#include <asm/shmbuf.h> extern int data_start; extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ @@ -374,10 +377,8 @@ static void __ref map_pages(unsigned long start_vaddr, #if CONFIG_PGTABLE_LEVELS == 3 if (pud_none(*pud)) { - pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, PAGE_SIZE << PMD_TABLE_ORDER); - if (!pmd) - panic("pmd allocation failed.\n"); pud_populate(NULL, pud, pmd); } #endif @@ -385,9 +386,7 @@ static void __ref map_pages(unsigned long start_vaddr, pmd = pmd_offset(pud, vaddr); for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { if (pmd_none(*pmd)) { - pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pg_table) - panic("page table allocation failed\n"); + pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(NULL, pmd, pg_table); } @@ -456,7 +455,6 @@ void free_initmem(void) unsigned long kernel_end = (unsigned long)&_end; /* Remap kernel text and data, but do not touch init section yet. */ - kernel_set_to_readonly = true; map_pages(init_end, __pa(init_end), kernel_end - init_end, PAGE_KERNEL, 0); @@ -479,7 +477,7 @@ void free_initmem(void) /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - + free_initmem_default(POISON_FREE_INITMEM); /* set up a new led state on systems shipped LED State panel */ @@ -490,11 +488,18 @@ void free_initmem(void) #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void) { - /* rodata memory was already mapped with KERNEL_RO access rights by - pagetable_init() and map_pages(). No need to do additional stuff here */ - unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; + unsigned long start = (unsigned long) &__start_rodata; + unsigned long end = (unsigned long) &__end_rodata; + + pr_info("Write protecting the kernel read-only data: %luk\n", + (end - start) >> 10); + + kernel_set_to_readonly = true; + map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0); - pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); + /* force the kernel to see the new page table entries */ + flush_cache_all(); + flush_tlb_all(); } #endif @@ -523,10 +528,6 @@ void mark_rodata_ro(void) void *parisc_vmalloc_start __ro_after_init; EXPORT_SYMBOL(parisc_vmalloc_start); -#ifdef CONFIG_PA11 -unsigned long pcxl_dma_start __ro_after_init; -#endif - void __init mem_init(void) { /* Do sanity checks on IPC (compat) structures */ @@ -561,10 +562,6 @@ void __init mem_init(void) BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); #endif - high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(max_low_pfn); - memblock_free_all(); - #ifdef CONFIG_PA11 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); @@ -626,12 +623,10 @@ static void __init pagetable_init(void) for (range = 0; range < npmem_ranges; range++) { unsigned long start_paddr; - unsigned long end_paddr; unsigned long size; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; size = pmem_ranges[range].pages << PAGE_SHIFT; - end_paddr = start_paddr + size; map_pages((unsigned long)__va(start_paddr), start_paddr, size, PAGE_KERNEL, 0); @@ -645,9 +640,7 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("zero page allocation failed.\n"); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } @@ -671,6 +664,35 @@ static void __init gateway_init(void) PAGE_SIZE, PAGE_GATEWAY, 1); } +static void __init fixmap_init(void) +{ + unsigned long addr = FIXMAP_START; + unsigned long end = FIXMAP_START + FIXMAP_SIZE; + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d = p4d_offset(pgd, addr); + pud_t *pud = pud_offset(p4d, addr); + pmd_t *pmd; + + BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE); + +#if CONFIG_PGTABLE_LEVELS == 3 + if (pud_none(*pud)) { + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, + PAGE_SIZE << PMD_TABLE_ORDER); + pud_populate(NULL, pud, pmd); + } +#endif + + pmd = pmd_offset(pud, addr); + do { + pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); + + pmd_populate_kernel(&init_mm, pmd, pte); + + addr += PAGE_SIZE; + } while (addr < end); +} + static void __init parisc_bootmem_free(void) { unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; @@ -685,6 +707,7 @@ void __init paging_init(void) setup_bootmem(); pagetable_init(); gateway_init(); + fixmap_init(); flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); @@ -692,6 +715,77 @@ void __init paging_init(void) parisc_bootmem_free(); } +static void alloc_btlb(unsigned long start, unsigned long end, int *slot, + unsigned long entry_info) +{ + const int slot_max = btlb_info.fixed_range_info.num_comb; + int min_num_pages = btlb_info.min_size; + unsigned long size; + + /* map at minimum 4 pages */ + if (min_num_pages < 4) + min_num_pages = 4; + + size = HUGEPAGE_SIZE; + while (start < end && *slot < slot_max && size >= PAGE_SIZE) { + /* starting address must have same alignment as size! */ + /* if correctly aligned and fits in double size, increase */ + if (((start & (2 * size - 1)) == 0) && + (end - start) >= (2 * size)) { + size <<= 1; + continue; + } + /* if current size alignment is too big, try smaller size */ + if ((start & (size - 1)) != 0) { + size >>= 1; + continue; + } + if ((end - start) >= size) { + if ((size >> PAGE_SHIFT) >= min_num_pages) + pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT, + size >> PAGE_SHIFT, entry_info, *slot); + (*slot)++; + start += size; + continue; + } + size /= 2; + continue; + } +} + +void btlb_init_per_cpu(void) +{ + unsigned long s, t, e; + int slot; + + /* BTLBs are not available on 64-bit CPUs */ + if (IS_ENABLED(CONFIG_PA20)) + return; + else if (pdc_btlb_info(&btlb_info) < 0) { + memset(&btlb_info, 0, sizeof btlb_info); + } + + /* insert BLTLBs for code and data segments */ + s = (uintptr_t) dereference_function_descriptor(&_stext); + e = (uintptr_t) dereference_function_descriptor(&_etext); + t = (uintptr_t) dereference_function_descriptor(&_sdata); + BUG_ON(t != e); + + /* code segments */ + slot = 0; + alloc_btlb(s, e, &slot, 0x13800000); + + /* sanity check */ + t = (uintptr_t) dereference_function_descriptor(&_edata); + e = (uintptr_t) dereference_function_descriptor(&__bss_start); + BUG_ON(t != e); + + /* data segments */ + s = (uintptr_t) dereference_function_descriptor(&_sdata); + e = (uintptr_t) dereference_function_descriptor(&__bss_stop); + alloc_btlb(s, e, &slot, 0x11800000); +} + #ifdef CONFIG_PA20 /* @@ -891,3 +985,23 @@ static const pgprot_t protection_map[16] = { [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX }; DECLARE_VM_GET_PAGE_PROT + +#ifdef CONFIG_EXECMEM +static struct execmem_info execmem_info __ro_after_init; + +struct execmem_info __init *execmem_arch_setup(void) +{ + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = VMALLOC_START, + .end = VMALLOC_END, + .pgprot = PAGE_KERNEL_RWX, + .alignment = 1, + }, + }, + }; + + return &execmem_info; +} +#endif /* CONFIG_EXECMEM */ diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 345ff0b66499..0b65c4b3baee 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -13,25 +13,9 @@ #include <linux/io.h> #include <linux/mm.h> -/* - * Generic mapping function (not visible outside): - */ - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem *ioremap(unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + pgprot_t prot) { - void __iomem *addr; - struct vm_struct *area; - unsigned long offset, last_addr; - pgprot_t pgprot; - #ifdef CONFIG_EISA unsigned long end = phys_addr + size - 1; /* Support EISA addresses */ @@ -40,11 +24,6 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size) phys_addr |= F_EXTEND(0xfc000000); #endif - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - /* * Don't allow anybody to remap normal RAM that we're using.. */ @@ -62,39 +41,6 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size) } } - pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | - _PAGE_ACCESSED | _PAGE_NO_CACHE); - - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr + 1) - phys_addr; - - /* - * Ok, go for it.. - */ - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - - addr = (void __iomem *) area->addr; - if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, - phys_addr, pgprot)) { - vunmap(addr); - return NULL; - } - - return (void __iomem *) (offset + (char __iomem *)addr); -} -EXPORT_SYMBOL(ioremap); - -void iounmap(const volatile void __iomem *io_addr) -{ - unsigned long addr = (unsigned long)io_addr & PAGE_MASK; - - if (is_vmalloc_addr((void *)addr)) - vunmap((void *)addr); + return generic_ioremap_prot(phys_addr, size, prot); } -EXPORT_SYMBOL(iounmap); +EXPORT_SYMBOL(ioremap_prot); |
