summaryrefslogtreecommitdiff
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile5
-rw-r--r--arch/sparc/mm/execmem.c21
-rw-r--r--arch/sparc/mm/fault_32.c60
-rw-r--r--arch/sparc/mm/fault_64.c41
-rw-r--r--arch/sparc/mm/hugetlbpage.c271
-rw-r--r--arch/sparc/mm/init_32.c67
-rw-r--r--arch/sparc/mm/init_64.c187
-rw-r--r--arch/sparc/mm/io-unit.c43
-rw-r--r--arch/sparc/mm/iommu.c55
-rw-r--r--arch/sparc/mm/leon_mm.c8
-rw-r--r--arch/sparc/mm/srmmu.c90
-rw-r--r--arch/sparc/mm/tlb.c20
-rw-r--r--arch/sparc/mm/tsb.c8
13 files changed, 346 insertions, 530 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 871354aa3c00..e9d232561c82 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -2,9 +2,6 @@
# Makefile for the linux Sparc-specific parts of the memory manager.
#
-asflags-y := -ansi
-ccflags-y := -Werror
-
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
@@ -15,3 +12,5 @@ obj-$(CONFIG_SPARC32) += leon_mm.o
# Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+
+obj-$(CONFIG_EXECMEM) += execmem.o
diff --git a/arch/sparc/mm/execmem.c b/arch/sparc/mm/execmem.c
new file mode 100644
index 000000000000..0fac97dd5728
--- /dev/null
+++ b/arch/sparc/mm/execmem.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/execmem.h>
+
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index de2031c2b2d7..86a831ebd8c8 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -83,7 +83,7 @@ static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
show_signal_msg(regs, sig, code,
addr, current);
- force_sig_fault(sig, code, (void __user *) addr, 0);
+ force_sig_fault(sig, code, (void __user *) addr);
}
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (pagefault_disabled() || !mm)
goto no_context;
+ if (!from_user && address >= PAGE_OFFSET)
+ goto no_context;
+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
-
- if (!from_user && address >= PAGE_OFFSET)
- goto bad_area;
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
@@ -187,7 +178,14 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags, regs);
- if (fault_signal_pending(fault, regs))
+ if (fault_signal_pending(fault, regs)) {
+ if (!from_user)
+ goto no_context;
+ return;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -200,17 +198,15 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
@@ -248,7 +244,6 @@ no_context:
}
unhandled_fault(address, tsk, regs);
- do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
@@ -317,17 +312,9 @@ static void force_user_fault(unsigned long address, int write)
code = SEGV_MAPERR;
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, NULL);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
-good_area:
+ goto bad_area_nosemaphore;
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
@@ -346,6 +333,7 @@ good_area:
return;
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 0a6bcc85fba7..e326caf708c6 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -99,6 +99,7 @@ static unsigned int get_user_insn(unsigned long tpc)
local_irq_disable();
pmdp = pmd_offset(pudp, tpc);
+again:
if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
goto out_irq_enable;
@@ -115,6 +116,8 @@ static unsigned int get_user_insn(unsigned long tpc)
#endif
{
ptep = pte_offset_map(pmdp, tpc);
+ if (!ptep)
+ goto again;
pte = *ptep;
if (pte_present(pte)) {
pa = (pte_pfn(pte) << PAGE_SHIFT);
@@ -176,7 +179,7 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, code, addr, current);
- force_sig_fault(sig, code, (void __user *) addr, 0);
+ force_sig_fault(sig, code, (void __user *) addr);
}
static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
@@ -383,8 +386,9 @@ continue_fault:
goto bad_area;
}
}
- if (expand_stack(vma, address))
- goto bad_area;
+ vma = expand_stack(mm, address);
+ if (!vma)
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
@@ -424,8 +428,17 @@ good_area:
fault = handle_mm_fault(vma, address, flags, regs);
- if (fault_signal_pending(fault, regs))
+ if (fault_signal_pending(fault, regs)) {
+ if (regs->tstate & TSTATE_PRIV) {
+ insn = get_fault_insn(regs, insn);
+ goto handle_kernel_fault;
+ }
goto exit_exception;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ goto lock_released;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -437,20 +450,19 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
+lock_released:
mm_rss = get_mm_rss(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
@@ -479,8 +491,9 @@ exit_exception:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- insn = get_fault_insn(regs, insn);
mmap_read_unlock(mm);
+bad_area_nosemaphore:
+ insn = get_fault_insn(regs, insn);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 0f49fada2093..4652e868663b 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -19,120 +19,29 @@
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
-/* Slightly simplified from the non-hugepage variant because by
- * definition we don't have to worry about any page coloring stuff
- */
-static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags)
-{
- struct hstate *h = hstate_file(filp);
- unsigned long task_size = TASK_SIZE;
- struct vm_unmapped_area_info info;
-
- if (test_thread_flag(TIF_32BIT))
- task_size = STACK_TOP32;
-
- info.flags = 0;
- info.length = len;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = min(task_size, VA_EXCLUDE_START);
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
- addr = vm_unmapped_area(&info);
-
- if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
- VM_BUG_ON(addr != -ENOMEM);
- info.low_limit = VA_EXCLUDE_END;
- info.high_limit = task_size;
- addr = vm_unmapped_area(&info);
- }
-
- return addr;
-}
-
-static unsigned long
-hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len,
- const unsigned long pgoff,
- const unsigned long flags)
+static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
{
- struct hstate *h = hstate_file(filp);
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
- struct vm_unmapped_area_info info;
-
- /* This should only ever run for 32-bit processes. */
- BUG_ON(!test_thread_flag(TIF_32BIT));
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = PAGE_SIZE;
- info.high_limit = mm->mmap_base;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
- addr = vm_unmapped_area(&info);
-
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- if (addr & ~PAGE_MASK) {
- VM_BUG_ON(addr != -ENOMEM);
- info.flags = 0;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = STACK_TOP32;
- addr = vm_unmapped_area(&info);
- }
+ unsigned long hugepage_size = _PAGE_SZ4MB_4U;
- return addr;
-}
+ pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4U;
-unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct hstate *h = hstate_file(file);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long task_size = TASK_SIZE;
-
- if (test_thread_flag(TIF_32BIT))
- task_size = STACK_TOP32;
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (len > task_size)
- return -ENOMEM;
-
- if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
- return addr;
- }
-
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
- if (task_size - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
+ switch (shift) {
+ case HPAGE_256MB_SHIFT:
+ hugepage_size = _PAGE_SZ256MB_4U;
+ pte_val(entry) |= _PAGE_PMD_HUGE;
+ break;
+ case HPAGE_SHIFT:
+ pte_val(entry) |= _PAGE_PMD_HUGE;
+ break;
+ case HPAGE_64K_SHIFT:
+ hugepage_size = _PAGE_SZ64K_4U;
+ break;
+ default:
+ WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
}
- if (mm->get_unmapped_area == arch_get_unmapped_area)
- return hugetlb_get_unmapped_area_bottomup(file, addr, len,
- pgoff, flags);
- else
- return hugetlb_get_unmapped_area_topdown(file, addr, len,
- pgoff, flags);
-}
-static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
-{
+ pte_val(entry) = pte_val(entry) | hugepage_size;
return entry;
}
@@ -181,6 +90,7 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
pte_t pte;
+ entry = pte_mkhuge(entry);
pte = hugepage_shift_to_tte(entry, shift);
#ifdef CONFIG_SPARC64
@@ -297,7 +207,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
return NULL;
if (sz >= PMD_SIZE)
return (pte_t *)pmd;
- return pte_alloc_map(mm, pmd, addr);
+ return pte_alloc_huge(mm, pmd, addr);
}
pte_t *huge_pte_offset(struct mm_struct *mm,
@@ -324,10 +234,10 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
if (is_hugetlb_pmd(*pmd))
return (pte_t *)pmd;
- return pte_offset_map(pmd, addr);
+ return pte_offset_huge(pmd, addr);
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned int nptes, orig_shift, shift;
@@ -363,8 +273,14 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
orig_shift);
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry, unsigned long sz)
+{
+ __set_huge_pte_at(mm, addr, ptep, entry);
+}
+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
unsigned int i, nptes, orig_shift, shift;
unsigned long size;
@@ -399,134 +315,3 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-
-int pmd_huge(pmd_t pmd)
-{
- return !pmd_none(pmd) &&
- (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
-}
-
-int pud_huge(pud_t pud)
-{
- return !pud_none(pud) &&
- (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
-}
-
-static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
- unsigned long addr)
-{
- pgtable_t token = pmd_pgtable(*pmd);
-
- pmd_clear(pmd);
- pte_free_tlb(tlb, token, addr);
- mm_dec_nr_ptes(tlb->mm);
-}
-
-static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- pmd_t *pmd;
- unsigned long next;
- unsigned long start;
-
- start = addr;
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd))
- continue;
- if (is_hugetlb_pmd(*pmd))
- pmd_clear(pmd);
- else
- hugetlb_free_pte_range(tlb, pmd, addr);
- } while (pmd++, addr = next, addr != end);
-
- start &= PUD_MASK;
- if (start < floor)
- return;
- if (ceiling) {
- ceiling &= PUD_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
- return;
-
- pmd = pmd_offset(pud, start);
- pud_clear(pud);
- pmd_free_tlb(tlb, pmd, start);
- mm_dec_nr_pmds(tlb->mm);
-}
-
-static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- pud_t *pud;
- unsigned long next;
- unsigned long start;
-
- start = addr;
- pud = pud_offset(p4d, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- if (is_hugetlb_pud(*pud))
- pud_clear(pud);
- else
- hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
- ceiling);
- } while (pud++, addr = next, addr != end);
-
- start &= PGDIR_MASK;
- if (start < floor)
- return;
- if (ceiling) {
- ceiling &= PGDIR_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
- return;
-
- pud = pud_offset(p4d, start);
- p4d_clear(p4d);
- pud_free_tlb(tlb, pud, start);
- mm_dec_nr_puds(tlb->mm);
-}
-
-void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- unsigned long next;
-
- addr &= PMD_MASK;
- if (addr < floor) {
- addr += PMD_SIZE;
- if (!addr)
- return;
- }
- if (ceiling) {
- ceiling &= PMD_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
- end -= PMD_SIZE;
- if (addr > end - 1)
- return;
-
- pgd = pgd_offset(tlb->mm, addr);
- p4d = p4d_offset(pgd, addr);
- do {
- next = p4d_addr_end(addr, end);
- if (p4d_none_or_clear_bad(p4d))
- continue;
- hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
- } while (p4d++, addr = next, addr != end);
-}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 1e9f577f084d..fdc93dd12c3e 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -37,8 +37,7 @@
#include "mm_32.h"
-unsigned long *sparc_valid_addr_bitmap;
-EXPORT_SYMBOL(sparc_valid_addr_bitmap);
+static unsigned long *sparc_valid_addr_bitmap;
unsigned long phys_base;
EXPORT_SYMBOL(phys_base);
@@ -233,19 +232,7 @@ static void __init taint_real_pages(void)
}
}
-static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long tmp;
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
-#endif
-
- for (tmp = start_pfn; tmp < end_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
-}
-
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
int i;
@@ -275,30 +262,42 @@ void __init mem_init(void)
memset(sparc_valid_addr_bitmap, 0, i << 2);
taint_real_pages();
-
- max_mapnr = last_valid_pfn - pfn_base;
- high_memory = __va(max_low_pfn << PAGE_SHIFT);
- memblock_free_all();
-
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
- unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
-
- if (end_pfn <= highstart_pfn)
- continue;
-
- if (start_pfn < highstart_pfn)
- start_pfn = highstart_pfn;
-
- map_high_region(start_pfn, end_pfn);
- }
}
void sparc_flush_page_to_ram(struct page *page)
{
unsigned long vaddr = (unsigned long)page_address(page);
- if (vaddr)
- __flush_page_to_ram(vaddr);
+ __flush_page_to_ram(vaddr);
}
EXPORT_SYMBOL(sparc_flush_page_to_ram);
+
+void sparc_flush_folio_to_ram(struct folio *folio)
+{
+ unsigned long vaddr = (unsigned long)folio_address(folio);
+ unsigned int i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++)
+ __flush_page_to_ram(vaddr + i * PAGE_SIZE);
+}
+EXPORT_SYMBOL(sparc_flush_folio_to_ram);
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1b23639e2fcd..df9f7c444c39 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -195,21 +195,26 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
#endif
#endif
-inline void flush_dcache_page_impl(struct page *page)
+inline void flush_dcache_folio_impl(struct folio *folio)
{
+ unsigned int i, nr = folio_nr_pages(folio);
+
BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
#endif
#ifdef DCACHE_ALIASING_POSSIBLE
- __flush_dcache_page(page_address(page),
- ((tlb_type == spitfire) &&
- page_mapping_file(page) != NULL));
+ for (i = 0; i < nr; i++)
+ __flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
+ ((tlb_type == spitfire) &&
+ folio_flush_mapping(folio) != NULL));
#else
- if (page_mapping_file(page) != NULL &&
- tlb_type == spitfire)
- __flush_icache_page(__pa(page_address(page)));
+ if (folio_flush_mapping(folio) != NULL &&
+ tlb_type == spitfire) {
+ for (i = 0; i < nr; i++)
+ __flush_icache_page((pfn + i) * PAGE_SIZE);
+ }
#endif
}
@@ -218,10 +223,10 @@ inline void flush_dcache_page_impl(struct page *page)
#define PG_dcache_cpu_mask \
((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
-#define dcache_dirty_cpu(page) \
- (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
+#define dcache_dirty_cpu(folio) \
+ (((folio)->flags.f >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
-static inline void set_dcache_dirty(struct page *page, int this_cpu)
+static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
{
unsigned long mask = this_cpu;
unsigned long non_cpu_bits;
@@ -238,11 +243,11 @@ static inline void set_dcache_dirty(struct page *page, int this_cpu)
"bne,pn %%xcc, 1b\n\t"
" nop"
: /* no outputs */
- : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
+ : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags.f)
: "g1", "g7");
}
-static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
+static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu)
{
unsigned long mask = (1UL << PG_dcache_dirty);
@@ -260,7 +265,7 @@ static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
" nop\n"
"2:"
: /* no outputs */
- : "r" (cpu), "r" (mask), "r" (&page->flags),
+ : "r" (cpu), "r" (mask), "r" (&folio->flags.f),
"i" (PG_dcache_cpu_mask),
"i" (PG_dcache_cpu_shift)
: "g1", "g7");
@@ -284,9 +289,10 @@ static void flush_dcache(unsigned long pfn)
page = pfn_to_page(pfn);
if (page) {
+ struct folio *folio = page_folio(page);
unsigned long pg_flags;
- pg_flags = page->flags;
+ pg_flags = folio->flags.f;
if (pg_flags & (1UL << PG_dcache_dirty)) {
int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
PG_dcache_cpu_mask);
@@ -296,11 +302,11 @@ static void flush_dcache(unsigned long pfn)
* in the SMP case.
*/
if (cpu == this_cpu)
- flush_dcache_page_impl(page);
+ flush_dcache_folio_impl(folio);
else
- smp_flush_dcache_page_impl(page, cpu);
+ smp_flush_dcache_folio_impl(folio, cpu);
- clear_dcache_dirty_cpu(page, cpu);
+ clear_dcache_dirty_cpu(folio, cpu);
put_cpu();
}
@@ -388,12 +394,14 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
}
#endif /* CONFIG_HUGETLB_PAGE */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr)
{
struct mm_struct *mm;
unsigned long flags;
bool is_huge_tsb;
pte_t pte = *ptep;
+ unsigned int i;
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
@@ -440,15 +448,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
}
}
#endif
- if (!is_huge_tsb)
- __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
- address, pte_val(pte));
+ if (!is_huge_tsb) {
+ for (i = 0; i < nr; i++) {
+ __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
+ address, pte_val(pte));
+ address += PAGE_SIZE;
+ pte_val(pte) += PAGE_SIZE;
+ }
+ }
spin_unlock_irqrestore(&mm->context.lock, flags);
}
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
{
+ unsigned long pfn = folio_pfn(folio);
struct address_space *mapping;
int this_cpu;
@@ -459,35 +473,35 @@ void flush_dcache_page(struct page *page)
* is merely the zero page. The 'bigcore' testcase in GDB
* causes this case to run millions of times.
*/
- if (page == ZERO_PAGE(0))
+ if (is_zero_pfn(pfn))
return;
this_cpu = get_cpu();
- mapping = page_mapping_file(page);
+ mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping)) {
- int dirty = test_bit(PG_dcache_dirty, &page->flags);
+ bool dirty = test_bit(PG_dcache_dirty, &folio->flags.f);
if (dirty) {
- int dirty_cpu = dcache_dirty_cpu(page);
+ int dirty_cpu = dcache_dirty_cpu(folio);
if (dirty_cpu == this_cpu)
goto out;
- smp_flush_dcache_page_impl(page, dirty_cpu);
+ smp_flush_dcache_folio_impl(folio, dirty_cpu);
}
- set_dcache_dirty(page, this_cpu);
+ set_dcache_dirty(folio, this_cpu);
} else {
- /* We could delay the flush for the !page_mapping
+ /* We could delay the flush for the !folio_mapping
* case too. But that case is for exec env/arg
* pages and those are %99 certainly going to get
* faulted into the tlb (and thus flushed) anyways.
*/
- flush_dcache_page_impl(page);
+ flush_dcache_folio_impl(folio);
}
out:
put_cpu();
}
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(flush_dcache_folio);
void __kprobes flush_icache_range(unsigned long start, unsigned long end)
{
@@ -709,9 +723,10 @@ static void __init inherit_prom_mappings(void)
void prom_world(int enter)
{
- if (!enter)
- set_fs(get_fs());
-
+ /*
+ * No need to change the address space any more, just flush
+ * the register windows
+ */
__asm__ __volatile__("flushw");
}
@@ -1060,14 +1075,9 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NUMA
- NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, nid);
- if (!NODE_DATA(nid)) {
- prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
- prom_halt();
- }
+#ifdef CONFIG_NUMA
+ alloc_node_data(nid);
NODE_DATA(nid)->node_id = nid;
#endif
@@ -1100,11 +1110,9 @@ static void init_node_masks_nonnuma(void)
}
#ifdef CONFIG_NUMA
-struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(numa_cpumask_lookup_table);
-EXPORT_SYMBOL(node_data);
static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
u32 cfg_handle)
@@ -1650,14 +1658,14 @@ bool kern_addr_valid(unsigned long addr)
if (pud_none(*pud))
return false;
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return false;
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
@@ -1666,7 +1674,6 @@ bool kern_addr_valid(unsigned long addr)
return pfn_valid(pte_pfn(*pte));
}
-EXPORT_SYMBOL(kern_addr_valid);
static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
unsigned long vend,
@@ -2280,10 +2287,10 @@ void __init paging_init(void)
setup_page_offset();
/* These build time checkes make sure that the dcache_dirty_cpu()
- * page->flags usage will work.
+ * folio->flags usage will work.
*
* When a page gets marked as dcache-dirty, we store the
- * cpu number starting at bit 32 in the page->flags. Also,
+ * cpu number starting at bit 32 in the folio->flags. Also,
* functions like clear_dcache_dirty_cpu use the cpu mask
* in 13-bit signed-immediate instruction fields.
*/
@@ -2498,10 +2505,6 @@ static void __init register_page_bootmem_info(void)
}
void __init mem_init(void)
{
- high_memory = __va(last_valid_pfn << PAGE_SHIFT);
-
- memblock_free_all();
-
/*
* Must be done after boot memory is put on freelist, because here we
* might set fields in deferred struct pages that have not yet been
@@ -2626,13 +2629,11 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
return 0;
}
-
-void vmemmap_free(unsigned long start, unsigned long end,
- struct vmem_altmap *altmap)
-{
-}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
+static pgprot_t protection_map[16] __ro_after_init;
+
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,
@@ -2877,40 +2878,40 @@ void __flush_tlb_all(void)
: : "r" (pstate));
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+static pte_t *__pte_alloc_one(struct mm_struct *mm)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- pte_t *pte = NULL;
-
- if (page)
- pte = (pte_t *) page_address(page);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
- return pte;
-}
-
-pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+ if (!ptdesc)
return NULL;
- if (!pgtable_pte_page_ctor(page)) {
- __free_page(page);
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return (pte_t *) page_address(page);
+ return ptdesc_address(ptdesc);
}
-void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- free_page((unsigned long)pte);
+ return __pte_alloc_one(mm);
+}
+
+pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ return __pte_alloc_one(mm);
}
static void __pte_free(pgtable_t pte)
{
- struct page *page = virt_to_page(pte);
+ struct ptdesc *ptdesc = virt_to_ptdesc(pte);
- pgtable_pte_page_dtor(page);
- __free_page(page);
+ pagetable_dtor(ptdesc);
+ pagetable_free(ptdesc);
+}
+
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ __pte_free(pte);
}
void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -2927,6 +2928,22 @@ void pgtable_free(void *table, bool is_page)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static void pte_free_now(struct rcu_head *head)
+{
+ struct page *page;
+
+ page = container_of(head, struct page, rcu_head);
+ __pte_free((pgtable_t)page_address(page));
+}
+
+void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
+{
+ struct page *page;
+
+ page = virt_to_page(pgtable);
+ call_rcu(&page->rcu_head, pte_free_now);
+}
+
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd)
{
@@ -2934,7 +2951,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm;
pmd_t entry = *pmd;
- if (!pmd_large(entry) || !pmd_young(entry))
+ if (!pmd_leaf(entry) || !pmd_young(entry))
return;
pte = pmd_val(entry);
@@ -3183,3 +3200,15 @@ void copy_highpage(struct page *to, struct page *from)
}
}
EXPORT_SYMBOL(copy_highpage);
+
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
+{
+ unsigned long prot = pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+
+ if (vm_flags & VM_SPARC_ADI)
+ prot |= _PAGE_MCD_4V;
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index bf3e6d2fe5d9..d409cb450de4 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -13,7 +13,8 @@
#include <linux/bitops.h>
#include <linux/dma-map-ops.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/io-unit.h>
@@ -93,13 +94,14 @@ static int __init iounit_init(void)
subsys_initcall(iounit_init);
/* One has to hold iounit->lock to call this */
-static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
+static dma_addr_t iounit_get_area(struct iounit_struct *iounit,
+ phys_addr_t phys, int size)
{
int i, j, k, npages;
unsigned long rotor, scan, limit;
iopte_t iopte;
- npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ npages = (offset_in_page(phys) + size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
/* A tiny bit of magic ingredience :) */
switch (npages) {
@@ -108,7 +110,7 @@ static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long
default: i = 0x0213; break;
}
- IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
+ IOD(("%s(%pa,%d[%d])=", __func__, &phys, size, npages));
next: j = (i & 15);
rotor = iounit->rotor[j - 1];
@@ -123,7 +125,8 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
}
i >>= 4;
if (!(i & 15))
- panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
+ panic("iounit_get_area: Couldn't find free iopte slots for (%pa,%d)\n",
+ &phys, size);
goto next;
}
for (k = 1, scan++; k < npages; k++)
@@ -131,30 +134,29 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
goto nexti;
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
scan -= npages;
- iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
- vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
+ iopte = MKIOPTE(phys & PAGE_MASK);
+ phys = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + offset_in_page(phys);
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
set_bit(scan, iounit->bmap);
sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
}
- IOD(("%08lx\n", vaddr));
- return vaddr;
+ IOD(("%pa\n", &phys));
+ return phys;
}
-static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t len, enum dma_data_direction dir,
- unsigned long attrs)
+static dma_addr_t iounit_map_phys(struct device *dev, phys_addr_t phys,
+ size_t len, enum dma_data_direction dir, unsigned long attrs)
{
- void *vaddr = page_address(page) + offset;
struct iounit_struct *iounit = dev->archdata.iommu;
- unsigned long ret, flags;
+ unsigned long flags;
+ dma_addr_t ret;
/* XXX So what is maxphys for us and how do drivers know it? */
if (!len || len > 256 * 1024)
return DMA_MAPPING_ERROR;
spin_lock_irqsave(&iounit->lock, flags);
- ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
+ ret = iounit_get_area(iounit, phys, len);
spin_unlock_irqrestore(&iounit->lock, flags);
return ret;
}
@@ -170,14 +172,15 @@ static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
spin_lock_irqsave(&iounit->lock, flags);
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
+ sg->dma_address =
+ iounit_get_area(iounit, sg_phys(sg), sg->length);
sg->dma_length = sg->length;
}
spin_unlock_irqrestore(&iounit->lock, flags);
return nents;
}
-static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
+static void iounit_unmap_phys(struct device *dev, dma_addr_t vaddr, size_t len,
enum dma_data_direction dir, unsigned long attrs)
{
struct iounit_struct *iounit = dev->archdata.iommu;
@@ -244,7 +247,7 @@ static void *iounit_alloc(struct device *dev, size_t len,
long i;
pmdp = pmd_off_k(addr);
- ptep = pte_offset_map(pmdp, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
@@ -278,8 +281,8 @@ static const struct dma_map_ops iounit_dma_ops = {
.alloc = iounit_alloc,
.free = iounit_free,
#endif
- .map_page = iounit_map_page,
- .unmap_page = iounit_unmap_page,
+ .map_phys = iounit_map_phys,
+ .unmap_phys = iounit_unmap_phys,
.map_sg = iounit_map_sg,
.unmap_sg = iounit_unmap_sg,
};
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 0c0342e5b10d..f48adf62724a 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -7,14 +7,15 @@
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mxcc.h>
@@ -180,18 +181,20 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
}
}
-static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t len, bool per_page_flush)
+static dma_addr_t __sbus_iommu_map_phys(struct device *dev, phys_addr_t paddr,
+ size_t len, bool per_page_flush, unsigned long attrs)
{
struct iommu_struct *iommu = dev->archdata.iommu;
- phys_addr_t paddr = page_to_phys(page) + offset;
- unsigned long off = paddr & ~PAGE_MASK;
+ unsigned long off = offset_in_page(paddr);
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long pfn = __phys_to_pfn(paddr);
unsigned int busa, busa0;
iopte_t *iopte, *iopte0;
int ioptex, i;
+ if (unlikely(attrs & DMA_ATTR_MMIO))
+ return DMA_MAPPING_ERROR;
+
/* XXX So what is maxphys for us and how do drivers know it? */
if (!len || len > 256 * 1024)
return DMA_MAPPING_ERROR;
@@ -201,10 +204,10 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
* XXX Is this a good assumption?
* XXX What if someone else unmaps it here and races us?
*/
- if (per_page_flush && !PageHighMem(page)) {
+ if (per_page_flush && !PhysHighMem(paddr)) {
unsigned long vaddr, p;
- vaddr = (unsigned long)page_address(page) + offset;
+ vaddr = (unsigned long)phys_to_virt(paddr);
for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
flush_page_for_dma(p);
}
@@ -230,19 +233,19 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
return busa0 + off;
}
-static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
- struct page *page, unsigned long offset, size_t len,
- enum dma_data_direction dir, unsigned long attrs)
+static dma_addr_t sbus_iommu_map_phys_gflush(struct device *dev,
+ phys_addr_t phys, size_t len, enum dma_data_direction dir,
+ unsigned long attrs)
{
flush_page_for_dma(0);
- return __sbus_iommu_map_page(dev, page, offset, len, false);
+ return __sbus_iommu_map_phys(dev, phys, len, false, attrs);
}
-static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
- struct page *page, unsigned long offset, size_t len,
- enum dma_data_direction dir, unsigned long attrs)
+static dma_addr_t sbus_iommu_map_phys_pflush(struct device *dev,
+ phys_addr_t phys, size_t len, enum dma_data_direction dir,
+ unsigned long attrs)
{
- return __sbus_iommu_map_page(dev, page, offset, len, true);
+ return __sbus_iommu_map_phys(dev, phys, len, true, attrs);
}
static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -253,10 +256,10 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
int j;
for_each_sg(sgl, sg, nents, j) {
- sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
- sg->offset, sg->length, per_page_flush);
+ sg->dma_address = __sbus_iommu_map_phys(dev, sg_phys(sg),
+ sg->length, per_page_flush, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
- return 0;
+ return -EIO;
sg->dma_length = sg->length;
}
@@ -276,7 +279,7 @@ static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
}
-static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
+static void sbus_iommu_unmap_phys(struct device *dev, dma_addr_t dma_addr,
size_t len, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_struct *iommu = dev->archdata.iommu;
@@ -302,7 +305,7 @@ static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
int i;
for_each_sg(sgl, sg, nents, i) {
- sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
+ sbus_iommu_unmap_phys(dev, sg->dma_address, sg->length, dir,
attrs);
sg->dma_address = 0x21212121;
}
@@ -358,7 +361,7 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
__flush_page_to_ram(page);
pmdp = pmd_off_k(addr);
- ptep = pte_offset_map(pmdp, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
}
@@ -425,8 +428,8 @@ static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
.alloc = sbus_iommu_alloc,
.free = sbus_iommu_free,
#endif
- .map_page = sbus_iommu_map_page_gflush,
- .unmap_page = sbus_iommu_unmap_page,
+ .map_phys = sbus_iommu_map_phys_gflush,
+ .unmap_phys = sbus_iommu_unmap_phys,
.map_sg = sbus_iommu_map_sg_gflush,
.unmap_sg = sbus_iommu_unmap_sg,
};
@@ -436,8 +439,8 @@ static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
.alloc = sbus_iommu_alloc,
.free = sbus_iommu_free,
#endif
- .map_page = sbus_iommu_map_page_pflush,
- .unmap_page = sbus_iommu_unmap_page,
+ .map_phys = sbus_iommu_map_phys_pflush,
+ .unmap_phys = sbus_iommu_unmap_phys,
.map_sg = sbus_iommu_map_sg_pflush,
.unmap_sg = sbus_iommu_unmap_sg,
};
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index ec61ff1f96b7..1dc9b3d70eda 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -39,12 +39,10 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
unsigned int ctxtbl;
unsigned int pgd, pmd, ped;
unsigned int ptr;
- unsigned int lvl, pte, paddrbase;
+ unsigned int lvl, pte;
unsigned int ctx;
unsigned int paddr_calc;
- paddrbase = 0;
-
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: trace on\n");
@@ -73,7 +71,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pgd is entry level 3\n");
lvl = 3;
pte = pgd;
- paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -96,7 +93,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pmd is entry level 2\n");
lvl = 2;
pte = pmd;
- paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -124,7 +120,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ped is entry level 1\n");
lvl = 1;
pte = ped;
- paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -147,7 +142,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ptr is entry level 0\n");
lvl = 0;
pte = ptr;
- paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (srmmu_swprobe_trace)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index a9aa6a92c7fe..f8fb4911d360 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -277,19 +277,13 @@ static void __init srmmu_nocache_init(void)
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
- srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
+ srmmu_nocache_pool = memblock_alloc_or_panic(srmmu_nocache_size,
SRMMU_NOCACHE_ALIGN_MAX);
- if (!srmmu_nocache_pool)
- panic("%s: Failed to allocate %lu bytes align=0x%x\n",
- __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap =
- memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ memblock_alloc_or_panic(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
SMP_CACHE_BYTES);
- if (!srmmu_nocache_bitmap)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- BITS_TO_LONGS(bitmap_bits) * sizeof(long));
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -355,7 +349,8 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
return NULL;
page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
- if (page_ref_inc_return(page) == 2 && !pgtable_pte_page_ctor(page)) {
+ if (page_ref_inc_return(page) == 2 &&
+ !pagetable_pte_ctor(mm, page_ptdesc(page))) {
page_ref_dec(page);
ptep = NULL;
}
@@ -371,7 +366,7 @@ void pte_free(struct mm_struct *mm, pgtable_t ptep)
page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
if (page_ref_dec_return(page) == 1)
- pgtable_pte_page_dtor(page);
+ pagetable_dtor(page_ptdesc(page));
spin_unlock(&mm->page_table_lock);
srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
@@ -451,9 +446,7 @@ static void __init sparc_context_init(int numctx)
unsigned long size;
size = numctx * sizeof(struct ctx_list);
- ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
- if (!ctx_list_pool)
- panic("%s: Failed to allocate %lu bytes\n", __func__, size);
+ ctx_list_pool = memblock_alloc_or_panic(size, SMP_CACHE_BYTES);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
@@ -1512,7 +1505,7 @@ static void __init init_viking(void)
/*
* We need this to make sure old viking takes no hits
- * on it's cache for dma snoops to workaround the
+ * on its cache for dma snoops to workaround the
* "load from non-cacheable memory" interrupt bug.
* This is only necessary because of the new way in
* which we use the IOMMU.
@@ -1636,30 +1629,32 @@ static void __init get_srmmu_type(void)
/* Local cross-calls. */
static void smp_flush_page_for_dma(unsigned long page)
{
- xc1((smpfunc_t) local_ops->page_for_dma, page);
+ xc1(local_ops->page_for_dma, page);
local_ops->page_for_dma(page);
}
static void smp_flush_cache_all(void)
{
- xc0((smpfunc_t) local_ops->cache_all);
+ xc0(local_ops->cache_all);
local_ops->cache_all();
}
static void smp_flush_tlb_all(void)
{
- xc0((smpfunc_t) local_ops->tlb_all);
+ xc0(local_ops->tlb_all);
local_ops->tlb_all();
}
+static bool any_other_mm_cpus(struct mm_struct *mm)
+{
+ return cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids;
+}
+
static void smp_flush_cache_mm(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
- xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+ if (any_other_mm_cpus(mm))
+ xc1(local_ops->cache_mm, (unsigned long)mm);
local_ops->cache_mm(mm);
}
}
@@ -1667,11 +1662,8 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
static void smp_flush_tlb_mm(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask)) {
- xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+ if (any_other_mm_cpus(mm)) {
+ xc1(local_ops->tlb_mm, (unsigned long)mm);
if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
@@ -1687,12 +1679,9 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->cache_range,
- (unsigned long) vma, start, end);
+ if (any_other_mm_cpus(mm))
+ xc3(local_ops->cache_range, (unsigned long)vma, start,
+ end);
local_ops->cache_range(vma, start, end);
}
}
@@ -1704,12 +1693,9 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->tlb_range,
- (unsigned long) vma, start, end);
+ if (any_other_mm_cpus(mm))
+ xc3(local_ops->tlb_range, (unsigned long)vma, start,
+ end);
local_ops->tlb_range(vma, start, end);
}
}
@@ -1719,12 +1705,8 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->cache_page,
- (unsigned long) vma, page);
+ if (any_other_mm_cpus(mm))
+ xc2(local_ops->cache_page, (unsigned long)vma, page);
local_ops->cache_page(vma, page);
}
}
@@ -1734,12 +1716,8 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->tlb_page,
- (unsigned long) vma, page);
+ if (any_other_mm_cpus(mm))
+ xc2(local_ops->tlb_page, (unsigned long)vma, page);
local_ops->tlb_page(vma, page);
}
}
@@ -1753,19 +1731,15 @@ static void smp_flush_page_to_ram(unsigned long page)
* XXX This experiment failed, research further... -DaveM
*/
#if 1
- xc1((smpfunc_t) local_ops->page_to_ram, page);
+ xc1(local_ops->page_to_ram, page);
#endif
local_ops->page_to_ram(page);
}
static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
{
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->sig_insns,
- (unsigned long) mm, insn_addr);
+ if (any_other_mm_cpus(mm))
+ xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
local_ops->sig_insns(mm, insn_addr);
}
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 9a725547578e..a35ddcca5e76 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -52,8 +52,10 @@ out:
void arch_enter_lazy_mmu_mode(void)
{
- struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
+ struct tlb_batch *tb;
+ preempt_disable();
+ tb = this_cpu_ptr(&tlb_batch);
tb->active = 1;
}
@@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void)
if (tb->tlb_nr)
flush_tlb_pending();
tb->active = 0;
+ preempt_enable();
}
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
@@ -118,6 +121,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
unsigned long paddr, pfn = pte_pfn(orig);
struct address_space *mapping;
struct page *page;
+ struct folio *folio;
if (!pfn_valid(pfn))
goto no_cache_flush;
@@ -127,13 +131,14 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
goto no_cache_flush;
/* A real file page? */
- mapping = page_mapping_file(page);
+ folio = page_folio(page);
+ mapping = folio_flush_mapping(folio);
if (!mapping)
goto no_cache_flush;
paddr = (unsigned long) page_address(page);
if ((paddr ^ vaddr) & (1 << 13))
- flush_dcache_page_all(mm, page);
+ flush_dcache_folio_all(mm, folio);
}
no_cache_flush:
@@ -149,6 +154,8 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
pte_t *pte;
pte = pte_offset_map(&pmd, vaddr);
+ if (!pte)
+ return;
end = vaddr + HPAGE_SIZE;
while (vaddr < end) {
if (pte_val(*pte) & _PAGE_VALID) {
@@ -179,12 +186,12 @@ static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
* hugetlb_pte_count.
*/
if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
- if (is_huge_zero_page(pmd_page(pmd)))
+ if (is_huge_zero_pmd(pmd))
mm->context.hugetlb_pte_count++;
else
mm->context.thp_pte_count++;
} else {
- if (is_huge_zero_page(pmd_page(orig)))
+ if (is_huge_zero_pmd(orig))
mm->context.hugetlb_pte_count--;
else
mm->context.thp_pte_count--;
@@ -245,6 +252,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
pmd_t old, entry;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
old = pmdp_establish(vma, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
@@ -255,7 +263,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
* Sanity check pmd before doing the actual decrement.
*/
if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
- !is_huge_zero_page(pmd_page(entry)))
+ !is_huge_zero_pmd(entry))
(vma->vm_mm)->context.thp_pte_count--;
return old;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 0dce4b7ff73e..5fe52a64c7e7 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -266,7 +266,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
default:
printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
current->comm, current->pid, tsb_bytes);
- do_exit(SIGSEGV);
+ BUG();
}
tte |= pte_sz_bits(page_sz);
@@ -385,7 +385,7 @@ static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
* will not trigger any longer.
*
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers
- * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
+ * of two. The TSB must be aligned to its size, so f.e. a 512K TSB
* must be 512K aligned. It also must be physically contiguous, so we
* cannot use vmalloc().
*
@@ -402,8 +402,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
unsigned long new_rss_limit;
gfp_t gfp_flags;
- if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
- max_tsb_size = (PAGE_SIZE << MAX_ORDER);
+ if (max_tsb_size > PAGE_SIZE << MAX_PAGE_ORDER)
+ max_tsb_size = PAGE_SIZE << MAX_PAGE_ORDER;
new_cache_index = 0;
for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {