diff options
Diffstat (limited to 'arch/sh/mm')
| -rw-r--r-- | arch/sh/mm/Kconfig | 78 | ||||
| -rw-r--r-- | arch/sh/mm/alignment.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/cache-j2.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/cache-sh4.c | 33 | ||||
| -rw-r--r-- | arch/sh/mm/cache-sh7705.c | 26 | ||||
| -rw-r--r-- | arch/sh/mm/cache-shx3.c | 1 | ||||
| -rw-r--r-- | arch/sh/mm/cache.c | 68 | ||||
| -rw-r--r-- | arch/sh/mm/fault.c | 41 | ||||
| -rw-r--r-- | arch/sh/mm/hugetlbpage.c | 14 | ||||
| -rw-r--r-- | arch/sh/mm/init.c | 55 | ||||
| -rw-r--r-- | arch/sh/mm/ioremap.c | 64 | ||||
| -rw-r--r-- | arch/sh/mm/kmap.c | 3 | ||||
| -rw-r--r-- | arch/sh/mm/mmap.c | 30 | ||||
| -rw-r--r-- | arch/sh/mm/nommu.c | 6 | ||||
| -rw-r--r-- | arch/sh/mm/numa.c | 3 | ||||
| -rw-r--r-- | arch/sh/mm/pgtable.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/pmb.c | 10 | ||||
| -rw-r--r-- | arch/sh/mm/tlbex_32.c | 1 |
18 files changed, 170 insertions, 275 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index ba569cfb4368..1862411665ab 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -4,6 +4,9 @@ menu "Memory management options" config MMU bool "Support for memory management hardware" depends on !CPU_SH2 + select HAVE_PAGE_SIZE_4KB + select HAVE_PAGE_SIZE_8KB if X2TLB + select HAVE_PAGE_SIZE_64KB if CPU_SH4 default y help Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to @@ -13,34 +16,39 @@ config MMU turning this off will boot the kernel on these machines with the MMU implicitly switched off. +config NOMMU + def_bool !MMU + select HAVE_PAGE_SIZE_4KB + select HAVE_PAGE_SIZE_8KB + select HAVE_PAGE_SIZE_16KB + select HAVE_PAGE_SIZE_64KB + help + On MMU-less systems, any of these page sizes can be selected + config PAGE_OFFSET hex default "0x80000000" if MMU default "0x00000000" -config FORCE_MAX_ZONEORDER - int "Maximum zone order" - range 9 64 if PAGE_SIZE_16KB - default "9" if PAGE_SIZE_16KB - range 7 64 if PAGE_SIZE_64KB - default "7" if PAGE_SIZE_64KB - range 11 64 - default "14" if !MMU - default "11" +config ARCH_FORCE_MAX_ORDER + int "Order of maximal physically contiguous allocations" + default "8" if PAGE_SIZE_16KB + default "6" if PAGE_SIZE_64KB + default "13" if !MMU + default "10" help - The kernel memory allocator divides physically contiguous memory - blocks into "zones", where each zone is a power of two number of - pages. This option selects the largest power of two that the kernel - keeps in the memory allocator. If you need to allocate very large - blocks of physically contiguous memory, then you may need to - increase this value. - - This config option is actually maximum order plus one. For example, - a value of 11 means that the largest free memory block is 2^10 pages. + The kernel page allocator limits the size of maximal physically + contiguous allocations. The limit is called MAX_PAGE:_ORDER and it + defines the maximal power of two of number of pages that can be + allocated as a single contiguous block. This option allows + overriding the default setting when ability to allocate very + large blocks of physically contiguous memory is required. The page size is not necessarily 4KB. Keep this in mind when choosing a value for this option. + Don't change if unsure. + config MEMORY_START hex "Physical memory start address" default "0x08000000" @@ -136,10 +144,6 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_MEMORY_PROBE - def_bool y - depends on MEMORY_HOTPLUG - config IOREMAP_FIXED def_bool y depends on X2TLB @@ -152,36 +156,6 @@ config HAVE_SRAM_POOL select GENERIC_ALLOCATOR choice - prompt "Kernel page size" - default PAGE_SIZE_4KB - -config PAGE_SIZE_4KB - bool "4kB" - help - This is the default page size used by all SuperH CPUs. - -config PAGE_SIZE_8KB - bool "8kB" - depends on !MMU || X2TLB - help - This enables 8kB pages as supported by SH-X2 and later MMUs. - -config PAGE_SIZE_16KB - bool "16kB" - depends on !MMU - help - This enables 16kB pages on MMU-less SH systems. - -config PAGE_SIZE_64KB - bool "64kB" - depends on !MMU || CPU_SH4 - help - This enables support for 64kB pages, possible on all SH-4 - CPUs and later. - -endchoice - -choice prompt "HugeTLB page size" depends on HUGETLB_PAGE default HUGETLB_PAGE_SIZE_1MB if PAGE_SIZE_64KB diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c index fb517b82a87b..3a76a766f423 100644 --- a/arch/sh/mm/alignment.c +++ b/arch/sh/mm/alignment.c @@ -140,7 +140,7 @@ static int alignment_proc_open(struct inode *inode, struct file *file) static ssize_t alignment_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { - int *data = PDE_DATA(file_inode(file)); + int *data = pde_data(file_inode(file)); char mode; if (count > 0) { @@ -161,7 +161,7 @@ static const struct proc_ops alignment_proc_ops = { }; /* - * This needs to be done after sysctl_init, otherwise sys/ will be + * This needs to be done after sysctl_init_bases(), otherwise sys/ will be * overwritten. Actually, this shouldn't be in sys/ at all since * it isn't a sysctl, and it doesn't contain sysctl information. * We now locate it in /proc/cpu/alignment instead. diff --git a/arch/sh/mm/cache-j2.c b/arch/sh/mm/cache-j2.c index f277862a11f5..9ac960214380 100644 --- a/arch/sh/mm/cache-j2.c +++ b/arch/sh/mm/cache-j2.c @@ -55,9 +55,9 @@ void __init j2_cache_init(void) local_flush_cache_dup_mm = j2_flush_both; local_flush_cache_page = j2_flush_both; local_flush_cache_range = j2_flush_both; - local_flush_dcache_page = j2_flush_dcache; + local_flush_dcache_folio = j2_flush_dcache; local_flush_icache_range = j2_flush_icache; - local_flush_icache_page = j2_flush_icache; + local_flush_icache_folio = j2_flush_icache; local_flush_cache_sigtramp = j2_flush_icache; pr_info("Initial J2 CCR is %.8x\n", __raw_readl(j2_ccr_base)); diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 72c2e1b46c08..83fb34b39ca7 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -107,19 +107,29 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ -static void sh4_flush_dcache_page(void *arg) +static void sh4_flush_dcache_folio(void *arg) { - struct page *page = arg; - unsigned long addr = (unsigned long)page_address(page); + struct folio *folio = arg; #ifndef CONFIG_SMP - struct address_space *mapping = page_mapping_file(page); + struct address_space *mapping = folio_flush_mapping(folio); if (mapping && !mapping_mapped(mapping)) - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); else #endif - flush_cache_one(CACHE_OC_ADDRESS_ARRAY | - (addr & shm_align_mask), page_to_phys(page)); + { + unsigned long pfn = folio_pfn(folio); + unsigned long addr = (unsigned long)folio_address(folio); + unsigned int i, nr = folio_nr_pages(folio); + + for (i = 0; i < nr; i++) { + flush_cache_one(CACHE_OC_ADDRESS_ARRAY | + (addr & shm_align_mask), + pfn * PAGE_SIZE); + addr += PAGE_SIZE; + pfn++; + } + } wmb(); } @@ -231,13 +241,14 @@ static void sh4_flush_cache_page(void *args) if ((vma->vm_mm == current->active_mm)) vaddr = NULL; else { + struct folio *folio = page_folio(page); /* * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ map_coherent = (current_cpu_data.dcache.n_aliases && - test_bit(PG_dcache_clean, &page->flags) && - page_mapcount(page)); + test_bit(PG_dcache_clean, folio_flags(folio, 0)) && + page_mapped(page)); if (map_coherent) vaddr = kmap_coherent(page, address); else @@ -366,8 +377,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, } while (--way_count != 0); } -extern void __weak sh4__flush_region_init(void); - /* * SH-4 has virtually indexed and physically tagged cache. */ @@ -379,7 +388,7 @@ void __init sh4_cache_init(void) __raw_readl(CCN_PRR)); local_flush_icache_range = sh4_flush_icache_range; - local_flush_dcache_page = sh4_flush_dcache_page; + local_flush_dcache_folio = sh4_flush_dcache_folio; local_flush_cache_all = sh4_flush_cache_all; local_flush_cache_mm = sh4_flush_cache_mm; local_flush_cache_dup_mm = sh4_flush_cache_mm; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 9b63a53a5e46..71f8be9fc8e0 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -132,15 +132,20 @@ static void __flush_dcache_page(unsigned long phys) * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ -static void sh7705_flush_dcache_page(void *arg) +static void sh7705_flush_dcache_folio(void *arg) { - struct page *page = arg; - struct address_space *mapping = page_mapping_file(page); + struct folio *folio = arg; + struct address_space *mapping = folio_flush_mapping(folio); if (mapping && !mapping_mapped(mapping)) - clear_bit(PG_dcache_clean, &page->flags); - else - __flush_dcache_page(__pa(page_address(page))); + clear_bit(PG_dcache_clean, &folio->flags.f); + else { + unsigned long pfn = folio_pfn(folio); + unsigned int i, nr = folio_nr_pages(folio); + + for (i = 0; i < nr; i++) + __flush_dcache_page((pfn + i) * PAGE_SIZE); + } } static void sh7705_flush_cache_all(void *args) @@ -176,19 +181,20 @@ static void sh7705_flush_cache_page(void *args) * Not entirely sure why this is necessary on SH3 with 32K cache but * without it we get occasional "Memory fault" when loading a program. */ -static void sh7705_flush_icache_page(void *page) +static void sh7705_flush_icache_folio(void *arg) { - __flush_purge_region(page_address(page), PAGE_SIZE); + struct folio *folio = arg; + __flush_purge_region(folio_address(folio), folio_size(folio)); } void __init sh7705_cache_init(void) { local_flush_icache_range = sh7705_flush_icache_range; - local_flush_dcache_page = sh7705_flush_dcache_page; + local_flush_dcache_folio = sh7705_flush_dcache_folio; local_flush_cache_all = sh7705_flush_cache_all; local_flush_cache_mm = sh7705_flush_cache_all; local_flush_cache_dup_mm = sh7705_flush_cache_all; local_flush_cache_range = sh7705_flush_cache_all; local_flush_cache_page = sh7705_flush_cache_page; - local_flush_icache_page = sh7705_flush_icache_page; + local_flush_icache_folio = sh7705_flush_icache_folio; } diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index 24c58b7dc022..dec039a75664 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/io.h> #include <asm/cache.h> +#include <asm/cacheflush.h> #define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */ #define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */ diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 3aef78ceb820..c3f028bed049 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -20,9 +20,9 @@ void (*local_flush_cache_mm)(void *args) = cache_noop; void (*local_flush_cache_dup_mm)(void *args) = cache_noop; void (*local_flush_cache_page)(void *args) = cache_noop; void (*local_flush_cache_range)(void *args) = cache_noop; -void (*local_flush_dcache_page)(void *args) = cache_noop; +void (*local_flush_dcache_folio)(void *args) = cache_noop; void (*local_flush_icache_range)(void *args) = cache_noop; -void (*local_flush_icache_page)(void *args) = cache_noop; +void (*local_flush_icache_folio)(void *args) = cache_noop; void (*local_flush_cache_sigtramp)(void *args) = cache_noop; void (*__flush_wback_region)(void *start, int size); @@ -61,15 +61,17 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && - test_bit(PG_dcache_clean, &page->flags)) { + struct folio *folio = page_folio(page); + + if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && + test_bit(PG_dcache_clean, &folio->flags.f)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(vto); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } if (vma->vm_flags & VM_EXEC) @@ -80,27 +82,30 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && - test_bit(PG_dcache_clean, &page->flags)) { + struct folio *folio = page_folio(page); + + if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && + test_bit(PG_dcache_clean, &folio->flags.f)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(vfrom); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags.f); } } void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { + struct folio *src = page_folio(from); void *vfrom, *vto; vto = kmap_atomic(to); - if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) && - test_bit(PG_dcache_clean, &from->flags)) { + if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) && + test_bit(PG_dcache_clean, &src->flags.f)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(vfrom); @@ -136,27 +141,28 @@ EXPORT_SYMBOL(clear_user_highpage); void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - struct page *page; unsigned long pfn = pte_pfn(pte); if (!boot_cpu_data.dcache.n_aliases) return; - page = pfn_to_page(pfn); if (pfn_valid(pfn)) { - int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags); + struct folio *folio = page_folio(pfn_to_page(pfn)); + int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags.f); if (dirty) - __flush_purge_region(page_address(page), PAGE_SIZE); + __flush_purge_region(folio_address(folio), + folio_size(folio)); } } void __flush_anon_page(struct page *page, unsigned long vmaddr) { + struct folio *folio = page_folio(page); unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { - if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && - test_bit(PG_dcache_clean, &page->flags)) { + if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && + test_bit(PG_dcache_clean, &folio->flags.f)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); @@ -164,7 +170,8 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ kunmap_coherent(kaddr); } else - __flush_purge_region((void *)addr, PAGE_SIZE); + __flush_purge_region(folio_address(folio), + folio_size(folio)); } } @@ -215,11 +222,11 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, } EXPORT_SYMBOL(flush_cache_range); -void flush_dcache_page(struct page *page) +void flush_dcache_folio(struct folio *folio) { - cacheop_on_each_cpu(local_flush_dcache_page, page, 1); + cacheop_on_each_cpu(local_flush_dcache_folio, folio, 1); } -EXPORT_SYMBOL(flush_dcache_page); +EXPORT_SYMBOL(flush_dcache_folio); void flush_icache_range(unsigned long start, unsigned long end) { @@ -233,10 +240,11 @@ void flush_icache_range(unsigned long start, unsigned long end) } EXPORT_SYMBOL(flush_icache_range); -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +void flush_icache_pages(struct vm_area_struct *vma, struct page *page, + unsigned int nr) { - /* Nothing uses the VMA, so just pass the struct page along */ - cacheop_on_each_cpu(local_flush_icache_page, page, 1); + /* Nothing uses the VMA, so just pass the folio along */ + cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1); } void flush_cache_sigtramp(unsigned long address) @@ -312,30 +320,20 @@ void __init cpu_cache_init(void) goto skip; if (boot_cpu_data.type == CPU_J2) { - extern void __weak j2_cache_init(void); - j2_cache_init(); } else if (boot_cpu_data.family == CPU_FAMILY_SH2) { - extern void __weak sh2_cache_init(void); - sh2_cache_init(); } if (boot_cpu_data.family == CPU_FAMILY_SH2A) { - extern void __weak sh2a_cache_init(void); - sh2a_cache_init(); } if (boot_cpu_data.family == CPU_FAMILY_SH3) { - extern void __weak sh3_cache_init(void); - sh3_cache_init(); if ((boot_cpu_data.type == CPU_SH7705) && (boot_cpu_data.dcache.sets == 512)) { - extern void __weak sh7705_cache_init(void); - sh7705_cache_init(); } } @@ -343,14 +341,10 @@ void __init cpu_cache_init(void) if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { - extern void __weak sh4_cache_init(void); - sh4_cache_init(); if ((boot_cpu_data.type == CPU_SH7786) || (boot_cpu_data.type == CPU_SHX3)) { - extern void __weak shx3_cache_init(void); - shx3_cache_init(); } } diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 88a1f453d73e..06e6b4952924 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -238,8 +238,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, show_fault_oops(regs, address); die("Oops", regs, error_code); - bust_spinlocks(0); - do_exit(SIGKILL); } static void @@ -441,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, } retry: - mmap_read_lock(mm); - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) { - bad_area(regs, error_code, address); - return; - } - if (likely(vma->vm_start <= address)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, error_code, address); - return; - } - if (unlikely(expand_stack(vma, address))) { - bad_area(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address); return; } @@ -463,7 +449,6 @@ retry: * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: if (unlikely(access_error(error_code, vma))) { bad_area_access_error(regs, error_code, address); return; @@ -487,17 +472,19 @@ good_area: if (mm_fault_error(regs, error_code, address, fault)) return; - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_RETRY) { - flags |= FAULT_FLAG_TRIED; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; - /* - * No need to mmap_read_unlock(mm) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - goto retry; - } + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + goto retry; } mmap_read_unlock(mm); diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 999ab5916e69..ff209b55285a 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -38,7 +38,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr); } } } @@ -63,20 +63,10 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (pud) { pmd = pmd_offset(pud, addr); if (pmd) - pte = pte_offset_map(pmd, addr); + pte = pte_offset_huge(pmd, addr); } } } return pte; } - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index ce26c7f8950a..99e302eeeec1 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -137,10 +137,7 @@ static pmd_t * __init one_md_table_init(pud_t *pud) if (pud_none(*pud)) { pmd_t *pmd; - pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pmd) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pud_populate(&init_mm, pud, pmd); BUG_ON(pmd != pmd_offset(pud, 0)); } @@ -153,10 +150,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) if (pmd_none(*pmd)) { pte_t *pte; - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte); BUG_ON(pte != pte_offset_kernel(pmd, 0)); } @@ -212,12 +206,7 @@ void __init allocate_pgdat(unsigned int nid) get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); #ifdef CONFIG_NUMA - NODE_DATA(nid) = memblock_alloc_try_nid( - sizeof(struct pglist_data), - SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, - MEMBLOCK_ALLOC_ACCESSIBLE, nid); - if (!NODE_DATA(nid)) - panic("Can't allocate pgdat for node %d\n", nid); + alloc_node_data(nid); #endif NODE_DATA(nid)->node_start_pfn = start_pfn; @@ -341,15 +330,6 @@ unsigned int mem_init_done = 0; void __init mem_init(void) { - pg_data_t *pgdat; - - high_memory = NULL; - for_each_online_pgdat(pgdat) - high_memory = max_t(void *, high_memory, - __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - - memblock_free_all(); - /* Set this up early, so we can take care of the zero page */ cpu_cache_init(); @@ -394,32 +374,3 @@ void __init mem_init(void) mem_init_done = 1; } - -#ifdef CONFIG_MEMORY_HOTPLUG -int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_params *params) -{ - unsigned long start_pfn = PFN_DOWN(start); - unsigned long nr_pages = size >> PAGE_SHIFT; - int ret; - - if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) - return -EINVAL; - - /* We only have ZONE_NORMAL, so this is easy.. */ - ret = __add_pages(nid, start_pfn, nr_pages, params); - if (unlikely(ret)) - printk("%s: Failed, __add_pages() == %d\n", __func__, ret); - - return ret; -} - -void arch_remove_memory(int nid, u64 start, u64 size, - struct vmem_altmap *altmap) -{ - unsigned long start_pfn = PFN_DOWN(start); - unsigned long nr_pages = size >> PAGE_SHIFT; - - __remove_pages(start_pfn, nr_pages, altmap); -} -#endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 21342581144d..5bbde53fb32d 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -72,21 +72,9 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) #define __ioremap_29bit(offset, size, prot) NULL #endif /* CONFIG_29BIT */ -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem * __ref -__ioremap_caller(phys_addr_t phys_addr, unsigned long size, - pgprot_t pgprot, void *caller) +void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size, + pgprot_t pgprot) { - struct vm_struct *area; - unsigned long offset, last_addr, addr, orig_addr; void __iomem *mapped; mapped = __ioremap_trapped(phys_addr, size); @@ -97,11 +85,6 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, if (mapped) return mapped; - /* Don't allow wraparound or zero size */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr) - return NULL; - /* * If we can't yet use the regular approach, go the fixmap route. */ @@ -112,34 +95,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, * First try to remap through the PMB. * PMB entries are all pre-faulted. */ - mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); + mapped = pmb_remap_caller(phys_addr, size, pgprot, + __builtin_return_address(0)); if (mapped && !IS_ERR(mapped)) return mapped; - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr+1) - phys_addr; - - /* - * Ok, go for it.. - */ - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (!area) - return NULL; - area->phys_addr = phys_addr; - orig_addr = addr = (unsigned long)area->addr; - - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { - vunmap((void *)orig_addr); - return NULL; - } - - return (void __iomem *)(offset + (char *)orig_addr); + return generic_ioremap_prot(phys_addr, size, pgprot); } -EXPORT_SYMBOL(__ioremap_caller); +EXPORT_SYMBOL(ioremap_prot); /* * Simple checks for non-translatable mappings. @@ -158,10 +121,9 @@ static inline int iomapping_nontranslatable(unsigned long offset) return 0; } -void iounmap(void __iomem *addr) +void iounmap(volatile void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; - struct vm_struct *p; /* * Nothing to do if there is no translatable mapping. @@ -172,21 +134,15 @@ void iounmap(void __iomem *addr) /* * There's no VMA if it's from an early fixed mapping. */ - if (iounmap_fixed(addr) == 0) + if (iounmap_fixed((void __iomem *)addr) == 0) return; /* * If the PMB handled it, there's nothing else to do. */ - if (pmb_unmap(addr) == 0) + if (pmb_unmap((void __iomem *)addr) == 0) return; - p = remove_vm_area((void *)(vaddr & PAGE_MASK)); - if (!p) { - printk(KERN_ERR "%s: bad address %p\n", __func__, addr); - return; - } - - kfree(p); + generic_iounmap(addr); } EXPORT_SYMBOL(iounmap); diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index 73fd7cc99430..c9f32d5a54b8 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c @@ -27,10 +27,11 @@ void __init kmap_coherent_init(void) void *kmap_coherent(struct page *page, unsigned long addr) { + struct folio *folio = page_folio(page); enum fixed_addresses idx; unsigned long vaddr; - BUG_ON(!test_bit(PG_dcache_clean, &page->flags)); + BUG_ON(!test_bit(PG_dcache_clean, &folio->flags.f)); preempt_disable(); pagefault_disable(); diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6a1a1297baae..c442734d9b0c 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -19,6 +19,26 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); #ifdef CONFIG_MMU +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT + /* * To avoid cache aliases, we map the shared page with same color. */ @@ -32,12 +52,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr, } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int do_colour_align; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = {}; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -68,7 +89,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; @@ -80,13 +100,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, - const unsigned long flags) + const unsigned long flags, vm_flags_t vm_flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = {}; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index 8b4504413c5f..fa3dc9428a73 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c @@ -10,6 +10,8 @@ #include <linux/init.h> #include <linux/string.h> #include <linux/mm.h> + +#include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <linux/uaccess.h> @@ -28,9 +30,9 @@ __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) return 0; } -__kernel_size_t __clear_user(void *to, __kernel_size_t n) +__kernel_size_t __clear_user(void __user *to, __kernel_size_t n) { - memset(to, 0, n); + memset((__force void *)to, 0, n); return 0; } diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 50f0dc1744d0..9bc212b5e762 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -14,9 +14,6 @@ #include <linux/pfn.h> #include <asm/sections.h> -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL_GPL(node_data); - /* * On SH machines the conventional approach is to stash system RAM * in node 0, and other memory blocks in to node 1 and up, ordered by diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c index cf7ce4b57359..3a4085ea0161 100644 --- a/arch/sh/mm/pgtable.c +++ b/arch/sh/mm/pgtable.c @@ -2,12 +2,14 @@ #include <linux/mm.h> #include <linux/slab.h> +#include <asm/pgalloc.h> + static struct kmem_cache *pgd_cachep; #if PAGETABLE_LEVELS > 2 static struct kmem_cache *pmd_cachep; #endif -void pgd_ctor(void *x) +static void pgd_ctor(void *x) { pgd_t *pgd = x; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 68eb7cc6e564..482eec50f404 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -857,7 +857,7 @@ static int __init pmb_debugfs_init(void) subsys_initcall(pmb_debugfs_init); #ifdef CONFIG_PM -static void pmb_syscore_resume(void) +static void pmb_syscore_resume(void *data) { struct pmb_entry *pmbe; int i; @@ -874,13 +874,17 @@ static void pmb_syscore_resume(void) read_unlock(&pmb_rwlock); } -static struct syscore_ops pmb_syscore_ops = { +static const struct syscore_ops pmb_syscore_ops = { .resume = pmb_syscore_resume, }; +static struct syscore pmb_syscore = { + .ops = &pmb_syscore_ops, +}; + static int __init pmb_sysdev_init(void) { - register_syscore_ops(&pmb_syscore_ops); + register_syscore(&pmb_syscore); return 0; } subsys_initcall(pmb_sysdev_init); diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c index 1c53868632ee..7d58578c15f4 100644 --- a/arch/sh/mm/tlbex_32.c +++ b/arch/sh/mm/tlbex_32.c @@ -14,6 +14,7 @@ #include <linux/kdebug.h> #include <asm/mmu_context.h> #include <asm/thread_info.h> +#include <asm/tlb.h> /* * Called with interrupts disabled. |
