diff options
Diffstat (limited to 'arch/nios2/mm')
| -rw-r--r-- | arch/nios2/mm/cacheflush.c | 84 | ||||
| -rw-r--r-- | arch/nios2/mm/fault.c | 22 | ||||
| -rw-r--r-- | arch/nios2/mm/init.c | 49 | ||||
| -rw-r--r-- | arch/nios2/mm/pgtable.c | 3 | ||||
| -rw-r--r-- | arch/nios2/mm/tlb.c | 18 |
5 files changed, 95 insertions, 81 deletions
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 6aa9257c3ede..8321182eb927 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -71,28 +71,29 @@ static void __flush_icache(unsigned long start, unsigned long end) __asm__ __volatile(" flushp\n"); } -static void flush_aliases(struct address_space *mapping, struct page *page) +static void flush_aliases(struct address_space *mapping, struct folio *folio) { struct mm_struct *mm = current->active_mm; - struct vm_area_struct *mpnt; + struct vm_area_struct *vma; + unsigned long flags; pgoff_t pgoff; + unsigned long nr = folio_nr_pages(folio); - pgoff = page->index; + pgoff = folio->index; - flush_dcache_mmap_lock(mapping); - vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { - unsigned long offset; + flush_dcache_mmap_lock_irqsave(mapping, flags); + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { + unsigned long start; - if (mpnt->vm_mm != mm) + if (vma->vm_mm != mm) continue; - if (!(mpnt->vm_flags & VM_MAYSHARE)) + if (!(vma->vm_flags & VM_MAYSHARE)) continue; - offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - flush_cache_page(mpnt, mpnt->vm_start + offset, - page_to_pfn(page)); + start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + flush_cache_range(vma, start, start + nr * PAGE_SIZE); } - flush_dcache_mmap_unlock(mapping); + flush_dcache_mmap_unlock_irqrestore(mapping, flags); } void flush_cache_all(void) @@ -138,10 +139,11 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, __flush_icache(start, end); } -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +void flush_icache_pages(struct vm_area_struct *vma, struct page *page, + unsigned int nr) { unsigned long start = (unsigned long) page_address(page); - unsigned long end = start + PAGE_SIZE; + unsigned long end = start + nr * PAGE_SIZE; __flush_dcache(start, end); __flush_icache(start, end); @@ -158,19 +160,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, __flush_icache(start, end); } -void __flush_dcache_page(struct address_space *mapping, struct page *page) +static void __flush_dcache_folio(struct folio *folio) { /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping. */ - unsigned long start = (unsigned long)page_address(page); + unsigned long start = (unsigned long)folio_address(folio); - __flush_dcache(start, start + PAGE_SIZE); + __flush_dcache(start, start + folio_size(folio)); } -void flush_dcache_page(struct page *page) +void flush_dcache_folio(struct folio *folio) { struct address_space *mapping; @@ -178,32 +180,38 @@ void flush_dcache_page(struct page *page) * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */ - if (page == ZERO_PAGE(0)) + if (is_zero_pfn(folio_pfn(folio))) return; - mapping = page_mapping_file(page); + mapping = folio_flush_mapping(folio); /* Flush this page if there are aliases. */ if (mapping && !mapping_mapped(mapping)) { - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } else { - __flush_dcache_page(mapping, page); + __flush_dcache_folio(folio); if (mapping) { - unsigned long start = (unsigned long)page_address(page); - flush_aliases(mapping, page); - flush_icache_range(start, start + PAGE_SIZE); + unsigned long start = (unsigned long)folio_address(folio); + flush_aliases(mapping, folio); + flush_icache_range(start, start + folio_size(folio)); } - set_bit(PG_dcache_clean, &page->flags); + set_bit(PG_dcache_clean, &folio->flags.f); } } +EXPORT_SYMBOL(flush_dcache_folio); + +void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} EXPORT_SYMBOL(flush_dcache_page); -void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) { pte_t pte = *ptep; unsigned long pfn = pte_pfn(pte); - struct page *page; + struct folio *folio; struct address_space *mapping; reload_tlb_page(vma, address, pte); @@ -215,19 +223,19 @@ void update_mmu_cache(struct vm_area_struct *vma, * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */ - page = pfn_to_page(pfn); - if (page == ZERO_PAGE(0)) + if (is_zero_pfn(pfn)) return; - mapping = page_mapping_file(page); - if (!test_and_set_bit(PG_dcache_clean, &page->flags)) - __flush_dcache_page(mapping, page); + folio = page_folio(pfn_to_page(pfn)); + if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) + __flush_dcache_folio(folio); - if(mapping) - { - flush_aliases(mapping, page); + mapping = folio_flush_mapping(folio); + if (mapping) { + flush_aliases(mapping, folio); if (vma->vm_flags & VM_EXEC) - flush_icache_page(vma, page); + flush_icache_pages(vma, &folio->page, + folio_nr_pages(folio)); } } diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index edaca0a6c1c1..e3fa9c15181d 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (!mmap_read_trylock(mm)) { - if (!user_mode(regs) && !search_exception_tables(regs->ea)) - goto bad_area_nosemaphore; retry: - mmap_read_lock(mm); - } - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: code = SEGV_ACCERR; switch (cause) { @@ -136,8 +123,11 @@ good_area: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 7bc82ee889c9..94efa3de3933 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -26,6 +26,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/binfmts.h> +#include <linux/execmem.h> #include <asm/setup.h> #include <asm/page.h> @@ -50,7 +51,7 @@ void __init paging_init(void) pagetable_init(); pgd_current = swapper_pg_dir; - max_zone_pfn[ZONE_NORMAL] = max_mapnr; + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; /* pass the memory from the bootmem allocator to the main allocator */ free_area_init(max_zone_pfn); @@ -59,20 +60,6 @@ void __init paging_init(void) (unsigned long)empty_zero_page + PAGE_SIZE); } -void __init mem_init(void) -{ - unsigned long end_mem = memory_end; /* this must not include - kernel stack at top */ - - pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end); - - end_mem &= PAGE_MASK; - high_memory = __va(end_mem); - - /* this will put all memory onto the freelists */ - memblock_free_all(); -} - void __init mmu_init(void) { flush_tlb_all(); @@ -81,6 +68,10 @@ void __init mmu_init(void) pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE); static struct page *kuser_page[1]; +static struct vm_special_mapping vdso_mapping = { + .name = "[vdso]", + .pages = kuser_page, +}; static int alloc_kuser_page(void) { @@ -105,18 +96,18 @@ arch_initcall(alloc_kuser_page); int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - int ret; + struct vm_area_struct *vma; mmap_write_lock(mm); /* Map kuser helpers to user space address */ - ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE, + vma = _install_special_mapping(mm, KUSER_BASE, KUSER_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | - VM_MAYEXEC, kuser_page); + VM_MAYEXEC, &vdso_mapping); mmap_write_unlock(mm); - return ret; + return IS_ERR(vma) ? PTR_ERR(vma) : 0; } const char *arch_vma_name(struct vm_area_struct *vma) @@ -143,3 +134,23 @@ static const pgprot_t protection_map[16] = { [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1) }; DECLARE_VM_GET_PAGE_PROT + +#ifdef CONFIG_EXECMEM +static struct execmem_info execmem_info __ro_after_init; + +struct execmem_info __init *execmem_arch_setup(void) +{ + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = MODULES_VADDR, + .end = MODULES_END, + .pgprot = PAGE_KERNEL_EXEC, + .alignment = 1, + }, + }, + }; + + return &execmem_info; +} +#endif /* CONFIG_EXECMEM */ diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c index 7c76e8a7447a..6470ed378782 100644 --- a/arch/nios2/mm/pgtable.c +++ b/arch/nios2/mm/pgtable.c @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <asm/cpuinfo.h> +#include <asm/pgalloc.h> /* pteaddr: * ptbase | vpn* | zero @@ -54,7 +55,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_page(GFP_KERNEL); + ret = __pgd_alloc(mm, 0); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c index f90ac35f05f3..a9cbe20f9e79 100644 --- a/arch/nios2/mm/tlb.c +++ b/arch/nios2/mm/tlb.c @@ -144,10 +144,11 @@ static void flush_tlb_one(unsigned long addr) if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT)) continue; + tlbmisc = RDCTL(CTL_TLBMISC); pr_debug("Flush entry by writing way=%dl pid=%ld\n", - way, (pid_misc >> TLBMISC_PID_SHIFT)); + way, ((tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK)); - tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT); + tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) | (tlbmisc & TLBMISC_PID); WRCTL(CTL_TLBMISC, tlbmisc); WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); WRCTL(CTL_TLBACC, 0); @@ -237,7 +238,8 @@ void flush_tlb_pid(unsigned long mmu_pid) if (pid != mmu_pid) continue; - tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT); + tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) | + (pid << TLBMISC_PID_SHIFT); WRCTL(CTL_TLBMISC, tlbmisc); WRCTL(CTL_TLBACC, 0); } @@ -272,15 +274,17 @@ void flush_tlb_all(void) /* remember pid/way until we return */ get_misc_and_pid(&org_misc, &pid_misc); - /* Start at way 0, way is auto-incremented after each TLBACC write */ - WRCTL(CTL_TLBMISC, TLBMISC_WE); - /* Map each TLB entry to physcal address 0 with no-access and a bad ptbase */ for (line = 0; line < cpuinfo.tlb_num_lines; line++) { WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); - for (way = 0; way < cpuinfo.tlb_num_ways; way++) + for (way = 0; way < cpuinfo.tlb_num_ways; way++) { + // Code such as replace_tlb_one_pid assumes that no duplicate entries exist + // for a single address across ways, so also use way as a dummy PID + WRCTL(CTL_TLBMISC, TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) | + (way << TLBMISC_PID_SHIFT)); WRCTL(CTL_TLBACC, 0); + } addr += PAGE_SIZE; } |
