summaryrefslogtreecommitdiff
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile6
-rw-r--r--arch/sparc/mm/extable.c107
-rw-r--r--arch/sparc/mm/fault_32.c175
-rw-r--r--arch/sparc/mm/fault_64.c74
-rw-r--r--arch/sparc/mm/highmem.c137
-rw-r--r--arch/sparc/mm/hugetlbpage.c69
-rw-r--r--arch/sparc/mm/hypersparc.S3
-rw-r--r--arch/sparc/mm/init_32.c43
-rw-r--r--arch/sparc/mm/init_64.c256
-rw-r--r--arch/sparc/mm/io-unit.c29
-rw-r--r--arch/sparc/mm/iommu.c37
-rw-r--r--arch/sparc/mm/mm_32.h5
-rw-r--r--arch/sparc/mm/srmmu.c197
-rw-r--r--arch/sparc/mm/tlb.c11
-rw-r--r--arch/sparc/mm/tsb.c10
-rw-r--r--arch/sparc/mm/ultra.S2
-rw-r--r--arch/sparc/mm/viking.S5
17 files changed, 427 insertions, 739 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index b078205b70e0..809d993f6d88 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -3,18 +3,14 @@
#
asflags-y := -ansi
-ccflags-y := -Werror
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
-obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32) += srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += srmmu_access.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC32) += leon_mm.o
# Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-
-# Only used by sparc32
-obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
deleted file mode 100644
index 241b40641873..000000000000
--- a/arch/sparc/mm/extable.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/sparc/mm/extable.c
- */
-
-#include <linux/module.h>
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish)
-{
-}
-
-/* Caller knows they are in a range if ret->fixup == 0 */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *base,
- const size_t num,
- unsigned long value)
-{
- int i;
-
- /* Single insn entries are encoded as:
- * word 1: insn address
- * word 2: fixup code address
- *
- * Range entries are encoded as:
- * word 1: first insn address
- * word 2: 0
- * word 3: last insn address + 4 bytes
- * word 4: fixup code address
- *
- * Deleted entries are encoded as:
- * word 1: unused
- * word 2: -1
- *
- * See asm/uaccess.h for more details.
- */
-
- /* 1. Try to find an exact match. */
- for (i = 0; i < num; i++) {
- if (base[i].fixup == 0) {
- /* A range entry, skip both parts. */
- i++;
- continue;
- }
-
- /* A deleted entry; see trim_init_extable */
- if (base[i].fixup == -1)
- continue;
-
- if (base[i].insn == value)
- return &base[i];
- }
-
- /* 2. Try to find a range match. */
- for (i = 0; i < (num - 1); i++) {
- if (base[i].fixup)
- continue;
-
- if (base[i].insn <= value && base[i + 1].insn > value)
- return &base[i];
-
- i++;
- }
-
- return NULL;
-}
-
-#ifdef CONFIG_MODULES
-/* We could memmove them around; easier to mark the trimmed ones. */
-void trim_init_extable(struct module *m)
-{
- unsigned int i;
- bool range;
-
- for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
- range = m->extable[i].fixup == 0;
-
- if (within_module_init(m->extable[i].insn, m)) {
- m->extable[i].fixup = -1;
- if (range)
- m->extable[i+1].fixup = -1;
- }
- if (range)
- i++;
- }
-}
-#endif /* CONFIG_MODULES */
-
-/* Special extable search, which handles ranges. Returns fixup */
-unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
-{
- const struct exception_table_entry *entry;
-
- entry = search_exception_tables(addr);
- if (!entry)
- return 0;
-
- /* Inside range? Fix g2 and return correct fixup */
- if (!entry->fixup) {
- *g2 = (addr - entry->insn) / 4;
- return (entry + 1)->fixup;
- }
-
- return entry->fixup;
-}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 89976c9b936c..86a831ebd8c8 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -23,9 +23,9 @@
#include <linux/interrupt.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
+#include <linux/extable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/setup.h>
@@ -55,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
die_if_kernel("Oops", regs);
}
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
- unsigned long address)
-{
- struct pt_regs regs;
- unsigned long g2;
- unsigned int insn;
- int i;
-
- i = search_extables_range(ret_pc, &g2);
- switch (i) {
- case 3:
- /* load & store will be handled by fixup */
- return 3;
-
- case 1:
- /* store will be handled by fixup, load will bump out */
- /* for _to_ macros */
- insn = *((unsigned int *) pc);
- if ((insn >> 21) & 1)
- return 1;
- break;
-
- case 2:
- /* load will be handled by fixup, store will bump out */
- /* for _from_ macros */
- insn = *((unsigned int *) pc);
- if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
- return 2;
- break;
-
- default:
- break;
- }
-
- memset(&regs, 0, sizeof(regs));
- regs.pc = pc;
- regs.npc = pc + 4;
- __asm__ __volatile__(
- "rd %%psr, %0\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n" : "=r" (regs.psr));
- unhandled_fault(address, current, &regs);
-
- /* Not reached */
- return 0;
-}
-
static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk)
@@ -131,7 +83,7 @@ static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
show_signal_msg(regs, sig, code,
addr, current);
- force_sig_fault(sig, code, (void __user *) addr, 0);
+ force_sig_fault(sig, code, (void __user *) addr);
}
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
@@ -163,12 +115,10 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- unsigned int fixup;
- unsigned long g2;
int from_user = !(regs->psr & PSR_PS);
int code;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
if (text_fault)
address = regs->pc;
@@ -193,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (pagefault_disabled() || !mm)
goto no_context;
+ if (!from_user && address >= PAGE_OFFSET)
+ goto no_context;
+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
-
- if (!from_user && address >= PAGE_OFFSET)
- goto bad_area;
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
@@ -235,9 +176,16 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!from_user)
+ goto no_context;
+ return;
+ }
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -250,30 +198,18 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
- 1, regs, address);
- } else {
- current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
- 1, regs, address);
- }
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -281,7 +217,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -292,45 +228,29 @@ bad_area_nosemaphore:
/* Is this in ex_table? */
no_context:
- g2 = regs->u_regs[UREG_G2];
if (!from_user) {
- fixup = search_extables_range(regs->pc, &g2);
- /* Values below 10 are reserved for other things */
- if (fixup > 10) {
- extern const unsigned int __memset_start[];
- extern const unsigned int __memset_end[];
- extern const unsigned int __csum_partial_copy_start[];
- extern const unsigned int __csum_partial_copy_end[];
+ const struct exception_table_entry *entry;
+ entry = search_exception_tables(regs->pc);
#ifdef DEBUG_EXCEPTIONS
- printk("Exception: PC<%08lx> faddr<%08lx>\n",
- regs->pc, address);
- printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
- regs->pc, fixup, g2);
+ printk("Exception: PC<%08lx> faddr<%08lx>\n",
+ regs->pc, address);
+ printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
+ regs->pc, entry->fixup);
#endif
- if ((regs->pc >= (unsigned long)__memset_start &&
- regs->pc < (unsigned long)__memset_end) ||
- (regs->pc >= (unsigned long)__csum_partial_copy_start &&
- regs->pc < (unsigned long)__csum_partial_copy_end)) {
- regs->u_regs[UREG_I4] = address;
- regs->u_regs[UREG_I5] = regs->pc;
- }
- regs->u_regs[UREG_G2] = g2;
- regs->pc = fixup;
- regs->npc = regs->pc + 4;
- return;
- }
+ regs->pc = entry->fixup;
+ regs->npc = regs->pc + 4;
+ return;
}
unhandled_fault(address, tsk, regs);
- do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (from_user) {
pagefault_out_of_memory();
return;
@@ -338,7 +258,7 @@ out_of_memory:
goto no_context;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
if (!from_user)
goto no_context;
@@ -392,17 +312,9 @@ static void force_user_fault(unsigned long address, int write)
code = SEGV_MAPERR;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, NULL);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
-good_area:
+ goto bad_area_nosemaphore;
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
@@ -412,20 +324,21 @@ good_area:
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- switch (handle_mm_fault(vma, address, flags)) {
+ switch (handle_mm_fault(vma, address, flags, NULL)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
+bad_area_nosemaphore:
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 2371fb6b97e4..e326caf708c6 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -27,7 +27,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/asi.h>
@@ -71,7 +70,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
}
/*
- * We now make sure that mmap_sem is held in all paths that call
+ * We now make sure that mmap_lock is held in all paths that call
* this. Additionally, to prevent kswapd from ripping ptes from
* under us, raise interrupts around the time that we look at the
* pte, kswapd will have to wait to get his smp ipi response from
@@ -80,6 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
static unsigned int get_user_insn(unsigned long tpc)
{
pgd_t *pgdp = pgd_offset(current->mm, tpc);
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte;
@@ -88,7 +88,10 @@ static unsigned int get_user_insn(unsigned long tpc)
if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
goto out;
- pudp = pud_offset(pgdp, tpc);
+ p4dp = p4d_offset(pgdp, tpc);
+ if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp)))
+ goto out;
+ pudp = pud_offset(p4dp, tpc);
if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
goto out;
@@ -96,6 +99,7 @@ static unsigned int get_user_insn(unsigned long tpc)
local_irq_disable();
pmdp = pmd_offset(pudp, tpc);
+again:
if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
goto out_irq_enable;
@@ -112,6 +116,8 @@ static unsigned int get_user_insn(unsigned long tpc)
#endif
{
ptep = pte_offset_map(pmdp, tpc);
+ if (!ptep)
+ goto again;
pte = *ptep;
if (pte_present(pte)) {
pa = (pte_pfn(pte) << PAGE_SHIFT);
@@ -173,7 +179,7 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, code, addr, current);
- force_sig_fault(sig, code, (void __user *) addr, 0);
+ force_sig_fault(sig, code, (void __user *) addr);
}
static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
@@ -267,7 +273,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
int si_code, fault_code;
vm_fault_t fault;
unsigned long address, mm_rss;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
fault_code = get_thread_fault_code();
@@ -315,7 +321,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
@@ -323,7 +329,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
}
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
if (fault_code & FAULT_CODE_BAD_RA)
@@ -380,8 +386,9 @@ continue_fault:
goto bad_area;
}
}
- if (expand_stack(vma, address))
- goto bad_area;
+ vma = expand_stack(mm, address);
+ if (!vma)
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
@@ -419,10 +426,19 @@ good_area:
goto bad_area;
}
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs)) {
+ if (regs->tstate & TSTATE_PRIV) {
+ insn = get_fault_insn(regs, insn);
+ goto handle_kernel_fault;
+ }
goto exit_exception;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ goto lock_released;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -434,30 +450,19 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
- 1, regs, address);
- } else {
- current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
- 1, regs, address);
- }
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
+lock_released:
mm_rss = get_mm_rss(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
@@ -486,8 +491,9 @@ exit_exception:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
+ mmap_read_unlock(mm);
+bad_area_nosemaphore:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
@@ -499,7 +505,7 @@ handle_kernel_fault:
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
goto exit_exception;
@@ -512,7 +518,7 @@ intr_or_no_mm:
do_sigbus:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
deleted file mode 100644
index d4a80adea7e5..000000000000
--- a/arch/sparc/mm/highmem.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * highmem.c: virtual kernel memory mappings for high memory
- *
- * Provides kernel-static versions of atomic kmap functions originally
- * found as inlines in include/asm-sparc/highmem.h. These became
- * needed as kmap_atomic() and kunmap_atomic() started getting
- * called from within modules.
- * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
- *
- * But kmap_atomic() and kunmap_atomic() cannot be inlined in
- * modules because they are loaded with btfixup-ped functions.
- */
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need it.
- *
- * XXX This is an old text. Actually, it's good to use atomic kmaps,
- * provided you remember that they are atomic and not try to sleep
- * with a kmap taken, much like a spinlock. Non-atomic kmaps are
- * shared by CPUs, and so precious, and establishing them requires IPI.
- * Atomic kmaps are lightweight and we may have NCPUS more of them.
- */
-#include <linux/highmem.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
-#include <asm/vaddrs.h>
-
-pgprot_t kmap_prot;
-
-static pte_t *kmap_pte;
-
-void __init kmap_init(void)
-{
- unsigned long address;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *dir;
-
- address = __fix_to_virt(FIX_KMAP_BEGIN);
- p4d = p4d_offset(pgd_offset_k(address), address);
- pud = pud_offset(p4d, address);
- dir = pmd_offset(pud, address);
-
- /* cache the first kmap pte */
- kmap_pte = pte_offset_kernel(dir, address);
- kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
-}
-
-void *kmap_atomic(struct page *page)
-{
- unsigned long vaddr;
- long idx, type;
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-
-/* XXX Fix - Anton */
-#if 0
- __flush_cache_one(vaddr);
-#else
- flush_cache_all();
-#endif
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
-/* XXX Fix - Anton */
-#if 0
- __flush_tlb_one(vaddr);
-#else
- flush_tlb_all();
-#endif
-
- return (void*) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
-
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- preempt_enable();
- return;
- }
-
- type = kmap_atomic_idx();
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
- unsigned long idx;
-
- idx = type + KM_TYPE_NR * smp_processor_id();
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
-
- /* XXX Fix - Anton */
-#if 0
- __flush_cache_one(vaddr);
-#else
- flush_cache_all();
-#endif
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- /* XXX Fix - Anton */
-#if 0
- __flush_tlb_one(vaddr);
-#else
- flush_tlb_all();
-#endif
- }
-#endif
-
- kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index f78793a06bbd..b432500c13a5 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -14,7 +14,6 @@
#include <asm/mman.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -178,18 +177,17 @@ static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
return sun4u_hugepage_shift_to_tte(entry, shift);
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writeable)
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- unsigned int shift = huge_page_shift(hstate_vma(vma));
pte_t pte;
+ entry = pte_mkhuge(entry);
pte = hugepage_shift_to_tte(entry, shift);
#ifdef CONFIG_SPARC64
/* If this vma has ADI enabled on it, turn on TTE.mcd
*/
- if (vma->vm_flags & VM_SPARC_ADI)
+ if (flags & VM_SPARC_ADI)
return pte_mkmcd(pte);
else
return pte_mknotmcd(pte);
@@ -248,14 +246,17 @@ static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
return shift;
}
-static unsigned int huge_tte_to_shift(pte_t entry)
+static unsigned long tte_to_shift(pte_t entry)
{
- unsigned long shift;
-
if (tlb_type == hypervisor)
- shift = sun4v_huge_tte_to_shift(entry);
- else
- shift = sun4u_huge_tte_to_shift(entry);
+ return sun4v_huge_tte_to_shift(entry);
+
+ return sun4u_huge_tte_to_shift(entry);
+}
+
+static unsigned int huge_tte_to_shift(pte_t entry)
+{
+ unsigned long shift = tte_to_shift(entry);
if (shift == PAGE_SHIFT)
WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
@@ -273,15 +274,21 @@ static unsigned long huge_tte_to_size(pte_t pte)
return size;
}
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
+unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
+unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
if (!pud)
return NULL;
if (sz >= PUD_SIZE)
@@ -291,20 +298,24 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return NULL;
if (sz >= PMD_SIZE)
return (pte_t *)pmd;
- return pte_alloc_map(mm, pmd, addr);
+ return pte_alloc_huge(mm, pmd, addr);
}
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
return NULL;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
+ return NULL;
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return NULL;
if (is_hugetlb_pud(*pud))
@@ -314,10 +325,10 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
if (is_hugetlb_pmd(*pmd))
return (pte_t *)pmd;
- return pte_offset_map(pmd, addr);
+ return pte_offset_huge(pmd, addr);
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned int nptes, orig_shift, shift;
@@ -353,6 +364,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
orig_shift);
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry, unsigned long sz)
+{
+ __set_huge_pte_at(mm, addr, ptep, entry);
+}
+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
@@ -449,7 +466,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
mm_dec_nr_pmds(tlb->mm);
}
-static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -458,7 +475,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start;
start = addr;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
@@ -481,8 +498,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
if (end - 1 > ceiling - 1)
return;
- pud = pud_offset(pgd, start);
- pgd_clear(pgd);
+ pud = pud_offset(p4d, start);
+ p4d_clear(p4d);
pud_free_tlb(tlb, pud, start);
mm_dec_nr_puds(tlb->mm);
}
@@ -492,6 +509,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
+ p4d_t *p4d;
unsigned long next;
addr &= PMD_MASK;
@@ -511,10 +529,11 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
return;
pgd = pgd_offset(tlb->mm, addr);
+ p4d = p4d_offset(pgd, addr);
do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(p4d))
continue;
- hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
- } while (pgd++, addr = next, addr != end);
+ hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
+ } while (p4d++, addr = next, addr != end);
}
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
index 66885a8dc50a..6c2521e85a42 100644
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/asi.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
#include <linux/init.h>
@@ -293,7 +294,7 @@ hypersparc_flush_tlb_range:
cmp %o3, -1
be hypersparc_flush_tlb_range_out
#endif
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 906eda1158b4..d96a14ffceeb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -29,7 +29,6 @@
#include <asm/sections.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/setup.h>
#include <asm/tlb.h>
@@ -38,8 +37,7 @@
#include "mm_32.h"
-unsigned long *sparc_valid_addr_bitmap;
-EXPORT_SYMBOL(sparc_valid_addr_bitmap);
+static unsigned long *sparc_valid_addr_bitmap;
unsigned long phys_base;
EXPORT_SYMBOL(phys_base);
@@ -193,10 +191,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
/* Reserve the kernel text/data/bss. */
size = (start_pfn << PAGE_SHIFT) - phys_base;
memblock_reserve(phys_base, size);
+ memblock_add(phys_base, size);
size = memblock_phys_mem_size() - memblock_reserved_size();
*pages_avail = (size >> PAGE_SHIFT) - high_pages;
+ /* Only allow low memory to be allocated via memblock allocation */
+ memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
+
return max_pfn;
}
@@ -289,15 +291,42 @@ void __init mem_init(void)
map_high_region(start_pfn, end_pfn);
}
-
- mem_init_print_info(NULL);
}
void sparc_flush_page_to_ram(struct page *page)
{
unsigned long vaddr = (unsigned long)page_address(page);
- if (vaddr)
- __flush_page_to_ram(vaddr);
+ __flush_page_to_ram(vaddr);
}
EXPORT_SYMBOL(sparc_flush_page_to_ram);
+
+void sparc_flush_folio_to_ram(struct folio *folio)
+{
+ unsigned long vaddr = (unsigned long)folio_address(folio);
+ unsigned int i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++)
+ __flush_page_to_ram(vaddr + i * PAGE_SIZE);
+}
+EXPORT_SYMBOL(sparc_flush_folio_to_ram);
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index e6d91819da92..1ca9054d9b97 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -27,11 +27,11 @@
#include <linux/percpu.h>
#include <linux/mmzone.h>
#include <linux/gfp.h>
+#include <linux/bootmem_info.h>
#include <asm/head.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/iommu.h>
#include <asm/io.h>
@@ -195,21 +195,26 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
#endif
#endif
-inline void flush_dcache_page_impl(struct page *page)
+inline void flush_dcache_folio_impl(struct folio *folio)
{
+ unsigned int i, nr = folio_nr_pages(folio);
+
BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
#endif
#ifdef DCACHE_ALIASING_POSSIBLE
- __flush_dcache_page(page_address(page),
- ((tlb_type == spitfire) &&
- page_mapping_file(page) != NULL));
+ for (i = 0; i < nr; i++)
+ __flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
+ ((tlb_type == spitfire) &&
+ folio_flush_mapping(folio) != NULL));
#else
- if (page_mapping_file(page) != NULL &&
- tlb_type == spitfire)
- __flush_icache_page(__pa(page_address(page)));
+ if (folio_flush_mapping(folio) != NULL &&
+ tlb_type == spitfire) {
+ for (i = 0; i < nr; i++)
+ __flush_icache_page((pfn + i) * PAGE_SIZE);
+ }
#endif
}
@@ -218,10 +223,10 @@ inline void flush_dcache_page_impl(struct page *page)
#define PG_dcache_cpu_mask \
((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
-#define dcache_dirty_cpu(page) \
- (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
+#define dcache_dirty_cpu(folio) \
+ (((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
-static inline void set_dcache_dirty(struct page *page, int this_cpu)
+static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
{
unsigned long mask = this_cpu;
unsigned long non_cpu_bits;
@@ -238,11 +243,11 @@ static inline void set_dcache_dirty(struct page *page, int this_cpu)
"bne,pn %%xcc, 1b\n\t"
" nop"
: /* no outputs */
- : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
+ : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags)
: "g1", "g7");
}
-static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
+static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu)
{
unsigned long mask = (1UL << PG_dcache_dirty);
@@ -260,7 +265,7 @@ static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
" nop\n"
"2:"
: /* no outputs */
- : "r" (cpu), "r" (mask), "r" (&page->flags),
+ : "r" (cpu), "r" (mask), "r" (&folio->flags),
"i" (PG_dcache_cpu_mask),
"i" (PG_dcache_cpu_shift)
: "g1", "g7");
@@ -284,9 +289,10 @@ static void flush_dcache(unsigned long pfn)
page = pfn_to_page(pfn);
if (page) {
+ struct folio *folio = page_folio(page);
unsigned long pg_flags;
- pg_flags = page->flags;
+ pg_flags = folio->flags;
if (pg_flags & (1UL << PG_dcache_dirty)) {
int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
PG_dcache_cpu_mask);
@@ -296,11 +302,11 @@ static void flush_dcache(unsigned long pfn)
* in the SMP case.
*/
if (cpu == this_cpu)
- flush_dcache_page_impl(page);
+ flush_dcache_folio_impl(folio);
else
- smp_flush_dcache_page_impl(page, cpu);
+ smp_flush_dcache_folio_impl(folio, cpu);
- clear_dcache_dirty_cpu(page, cpu);
+ clear_dcache_dirty_cpu(folio, cpu);
put_cpu();
}
@@ -325,23 +331,12 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
}
#ifdef CONFIG_HUGETLB_PAGE
-static void __init add_huge_page_size(unsigned long size)
-{
- unsigned int order;
-
- if (size_to_hstate(size))
- return;
-
- order = ilog2(size) - PAGE_SHIFT;
- hugetlb_add_hstate(order);
-}
-
static int __init hugetlbpage_init(void)
{
- add_huge_page_size(1UL << HPAGE_64K_SHIFT);
- add_huge_page_size(1UL << HPAGE_SHIFT);
- add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
- add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
+ hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT);
+ hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
+ hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT);
+ hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT);
return 0;
}
@@ -360,16 +355,11 @@ static void __init pud_huge_patch(void)
__asm__ __volatile__("flush %0" : : "r" (addr));
}
-static int __init setup_hugepagesz(char *string)
+bool __init arch_hugetlb_valid_size(unsigned long size)
{
- unsigned long long hugepage_size;
- unsigned int hugepage_shift;
+ unsigned int hugepage_shift = ilog2(size);
unsigned short hv_pgsz_idx;
unsigned int hv_pgsz_mask;
- int rc = 0;
-
- hugepage_size = memparse(string, &string);
- hugepage_shift = ilog2(hugepage_size);
switch (hugepage_shift) {
case HPAGE_16GB_SHIFT:
@@ -397,28 +387,21 @@ static int __init setup_hugepagesz(char *string)
hv_pgsz_mask = 0;
}
- if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
- hugetlb_bad_size();
- pr_err("hugepagesz=%llu not supported by MMU.\n",
- hugepage_size);
- goto out;
- }
+ if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U)
+ return false;
- add_huge_page_size(hugepage_size);
- rc = 1;
-
-out:
- return rc;
+ return true;
}
-__setup("hugepagesz=", setup_hugepagesz);
#endif /* CONFIG_HUGETLB_PAGE */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr)
{
struct mm_struct *mm;
unsigned long flags;
bool is_huge_tsb;
pte_t pte = *ptep;
+ unsigned int i;
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
@@ -465,15 +448,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
}
}
#endif
- if (!is_huge_tsb)
- __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
- address, pte_val(pte));
+ if (!is_huge_tsb) {
+ for (i = 0; i < nr; i++) {
+ __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
+ address, pte_val(pte));
+ address += PAGE_SIZE;
+ pte_val(pte) += PAGE_SIZE;
+ }
+ }
spin_unlock_irqrestore(&mm->context.lock, flags);
}
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
{
+ unsigned long pfn = folio_pfn(folio);
struct address_space *mapping;
int this_cpu;
@@ -484,35 +473,35 @@ void flush_dcache_page(struct page *page)
* is merely the zero page. The 'bigcore' testcase in GDB
* causes this case to run millions of times.
*/
- if (page == ZERO_PAGE(0))
+ if (is_zero_pfn(pfn))
return;
this_cpu = get_cpu();
- mapping = page_mapping_file(page);
+ mapping = folio_flush_mapping(folio);
if (mapping && !mapping_mapped(mapping)) {
- int dirty = test_bit(PG_dcache_dirty, &page->flags);
+ bool dirty = test_bit(PG_dcache_dirty, &folio->flags);
if (dirty) {
- int dirty_cpu = dcache_dirty_cpu(page);
+ int dirty_cpu = dcache_dirty_cpu(folio);
if (dirty_cpu == this_cpu)
goto out;
- smp_flush_dcache_page_impl(page, dirty_cpu);
+ smp_flush_dcache_folio_impl(folio, dirty_cpu);
}
- set_dcache_dirty(page, this_cpu);
+ set_dcache_dirty(folio, this_cpu);
} else {
/* We could delay the flush for the !page_mapping
* case too. But that case is for exec env/arg
* pages and those are %99 certainly going to get
* faulted into the tlb (and thus flushed) anyways.
*/
- flush_dcache_page_impl(page);
+ flush_dcache_folio_impl(folio);
}
out:
put_cpu();
}
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(flush_dcache_folio);
void __kprobes flush_icache_range(unsigned long start, unsigned long end)
{
@@ -529,10 +518,7 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
if (kaddr >= PAGE_OFFSET)
paddr = kaddr & mask;
else {
- pgd_t *pgdp = pgd_offset_k(kaddr);
- pud_t *pudp = pud_offset(pgdp, kaddr);
- pmd_t *pmdp = pmd_offset(pudp, kaddr);
- pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
+ pte_t *ptep = virt_to_kpte(kaddr);
paddr = pte_val(*ptep) & mask;
}
@@ -737,9 +723,10 @@ static void __init inherit_prom_mappings(void)
void prom_world(int enter)
{
- if (!enter)
- set_fs(get_fs());
-
+ /*
+ * No need to change the address space any more, just flush
+ * the register windows
+ */
__asm__ __volatile__("flushw");
}
@@ -932,7 +919,7 @@ struct node_mem_mask {
static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct mdesc_mlgroup {
u64 node;
@@ -1088,7 +1075,7 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
SMP_CACHE_BYTES, nid);
@@ -1109,7 +1096,7 @@ static void __init allocate_node_data(int nid)
static void init_node_masks_nonnuma(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
#endif
@@ -1119,7 +1106,7 @@ static void init_node_masks_nonnuma(void)
node_masks[0].match = 0;
num_node_masks = 1;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
for (i = 0; i < NR_CPUS; i++)
numa_cpu_lookup_table[i] = 0;
@@ -1127,7 +1114,7 @@ static void init_node_masks_nonnuma(void)
#endif
}
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
@@ -1221,18 +1208,14 @@ int of_node_to_nid(struct device_node *dp)
static void __init add_node_ranges(void)
{
- struct memblock_region *reg;
+ phys_addr_t start, end;
unsigned long prev_max;
+ u64 i;
memblock_resized:
prev_max = memblock.memory.max;
- for_each_memblock(memory, reg) {
- unsigned long size = reg->size;
- unsigned long start, end;
-
- start = reg->base;
- end = start + size;
+ for_each_mem_range(i, &start, &end) {
while (start < end) {
unsigned long this_end;
int nid;
@@ -1240,7 +1223,7 @@ memblock_resized:
this_end = memblock_nid_range(start, end, &nid);
numadbg("Setting memblock NUMA node nid[%d] "
- "start[%lx] end[%lx]\n",
+ "start[%llx] end[%lx]\n",
nid, start, this_end);
memblock_set_node(start, this_end - start,
@@ -1639,7 +1622,6 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
/* XXX cpu notifier XXX */
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
return end_pfn;
@@ -1653,6 +1635,7 @@ static unsigned long max_phys_bits = 40;
bool kern_addr_valid(unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -1672,29 +1655,32 @@ bool kern_addr_valid(unsigned long addr)
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
- return 0;
+ return false;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
+ return false;
+
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud))
- return 0;
+ return false;
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
- return 0;
+ return false;
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
- return 0;
+ return false;
return pfn_valid(pte_pfn(*pte));
}
-EXPORT_SYMBOL(kern_addr_valid);
static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
unsigned long vend,
@@ -1800,6 +1786,7 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
while (vstart < vend) {
unsigned long this_end, paddr = __pa(vstart);
pgd_t *pgd = pgd_offset_k(vstart);
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -1814,7 +1801,20 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
- pud = pud_offset(pgd, vstart);
+
+ p4d = p4d_offset(pgd, vstart);
+ if (p4d_none(*p4d)) {
+ pud_t *new;
+
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
+ alloc_bytes += PAGE_SIZE;
+ p4d_populate(&init_mm, p4d, new);
+ }
+
+ pud = pud_offset(p4d, vstart);
if (pud_none(*pud)) {
pmd_t *new;
@@ -2294,10 +2294,10 @@ void __init paging_init(void)
setup_page_offset();
/* These build time checkes make sure that the dcache_dirty_cpu()
- * page->flags usage will work.
+ * folio->flags usage will work.
*
* When a page gets marked as dcache-dirty, we store the
- * cpu number starting at bit 32 in the page->flags. Also,
+ * cpu number starting at bit 32 in the folio->flags. Also,
* functions like clear_dcache_dirty_cpu use the cpu mask
* in 13-bit signed-immediate instruction fields.
*/
@@ -2468,7 +2468,7 @@ void __init paging_init(void)
max_zone_pfns[ZONE_NORMAL] = end_pfn;
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
}
printk("Booting Linux...\n");
@@ -2502,7 +2502,7 @@ int page_in_phys_avail(unsigned long paddr)
static void __init register_page_bootmem_info(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
for_each_online_node(i)
@@ -2535,7 +2535,6 @@ void __init mem_init(void)
}
mark_page_reserved(mem_map_zero);
- mem_init_print_info(NULL);
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cheetah_ecache_flush_init();
@@ -2612,13 +2611,18 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
for (; vstart < vend; vstart += PMD_SIZE) {
pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
unsigned long pte;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
if (!pgd)
return -ENOMEM;
- pud = vmemmap_pud_populate(pgd, vstart, node);
+ p4d = vmemmap_p4d_populate(pgd, vstart, node);
+ if (!p4d)
+ return -ENOMEM;
+
+ pud = vmemmap_pud_populate(p4d, vstart, node);
if (!pud)
return -ENOMEM;
@@ -2643,6 +2647,9 @@ void vmemmap_free(unsigned long start, unsigned long end,
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
+static pgprot_t protection_map[16] __ro_after_init;
+
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,
@@ -2900,14 +2907,15 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
+
+ if (!ptdesc)
return NULL;
- if (!pgtable_pte_page_ctor(page)) {
- free_unref_page(page);
+ if (!pagetable_pte_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return (pte_t *) page_address(page);
+ return ptdesc_address(ptdesc);
}
void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
@@ -2917,10 +2925,10 @@ void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static void __pte_free(pgtable_t pte)
{
- struct page *page = virt_to_page(pte);
+ struct ptdesc *ptdesc = virt_to_ptdesc(pte);
- pgtable_pte_page_dtor(page);
- __free_page(page);
+ pagetable_pte_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -2937,6 +2945,22 @@ void pgtable_free(void *table, bool is_page)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static void pte_free_now(struct rcu_head *head)
+{
+ struct page *page;
+
+ page = container_of(head, struct page, rcu_head);
+ __pte_free((pgtable_t)page_address(page));
+}
+
+void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
+{
+ struct page *page;
+
+ page = virt_to_page(pgtable);
+ call_rcu(&page->rcu_head, pte_free_now);
+}
+
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd)
{
@@ -2944,7 +2968,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm;
pmd_t entry = *pmd;
- if (!pmd_large(entry) || !pmd_young(entry))
+ if (!pmd_leaf(entry) || !pmd_young(entry))
return;
pte = pmd_val(entry);
@@ -3193,3 +3217,15 @@ void copy_highpage(struct page *to, struct page *from)
}
}
EXPORT_SYMBOL(copy_highpage);
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ unsigned long prot = pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+
+ if (vm_flags & VM_SPARC_ADI)
+ prot |= _PAGE_MCD_4V;
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 33a0facd9eb5..d8376f61b4d0 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -10,14 +10,12 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
-#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/io-unit.h>
#include <asm/mxcc.h>
@@ -38,6 +36,8 @@
#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
+static const struct dma_map_ops iounit_dma_ops;
+
static void __init iounit_iommu_init(struct platform_device *op)
{
struct iounit_struct *iounit;
@@ -70,6 +70,8 @@ static void __init iounit_iommu_init(struct platform_device *op)
xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
for (; xpt < xptend; xpt++)
sbus_writel(0, xpt);
+
+ op->dev.dma_ops = &iounit_dma_ops;
}
static int __init iounit_init(void)
@@ -238,21 +240,15 @@ static void *iounit_alloc(struct device *dev, size_t len,
while(addr < end) {
page = va;
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
long i;
- pgdp = pgd_offset(&init_mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_map(pmdp, addr);
+ pmdp = pmd_off_k(addr);
+ ptep = pte_offset_kernel(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
-
+
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = iounit->page_table + i;
@@ -288,8 +284,3 @@ static const struct dma_map_ops iounit_dma_ops = {
.map_sg = iounit_map_sg,
.unmap_sg = iounit_unmap_sg,
};
-
-void __init ld_mmu_iounit(void)
-{
- dma_ops = &iounit_dma_ops;
-}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 4d3c6991f0ae..5a5080db800f 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -7,18 +7,16 @@
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mxcc.h>
#include <asm/mbus.h>
@@ -54,6 +52,9 @@ static pgprot_t dvma_prot; /* Consistent mapping pte flags */
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
+static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
+static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
+
static void __init sbus_iommu_init(struct platform_device *op)
{
struct iommu_struct *iommu;
@@ -129,6 +130,11 @@ static void __init sbus_iommu_init(struct platform_device *op)
(int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
op->dev.archdata.iommu = iommu;
+
+ if (flush_page_for_dma_global)
+ op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
+ else
+ op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
}
static int __init iommu_init(void)
@@ -251,7 +257,7 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
sg->offset, sg->length, per_page_flush);
if (sg->dma_address == DMA_MAPPING_ERROR)
- return 0;
+ return -EIO;
sg->dma_length = sg->length;
}
@@ -342,9 +348,6 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
while(addr < end) {
page = va;
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -355,11 +358,8 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
else
__flush_page_to_ram(page);
- pgdp = pgd_offset(&init_mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_map(pmdp, addr);
+ pmdp = pmd_off_k(addr);
+ ptep = pte_offset_kernel(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
}
@@ -445,13 +445,6 @@ static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
void __init ld_mmu_iommu(void)
{
- if (flush_page_for_dma_global) {
- /* flush_page_for_dma flushes everything, no matter of what page is it */
- dma_ops = &sbus_iommu_dma_gflush_ops;
- } else {
- dma_ops = &sbus_iommu_dma_pflush_ops;
- }
-
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h
index 0d0b06e952a5..ee55f1080634 100644
--- a/arch/sparc/mm/mm_32.h
+++ b/arch/sparc/mm/mm_32.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* fault_32.c - visible as they are called from assembler */
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
- unsigned long address);
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address);
@@ -20,6 +18,3 @@ void __init srmmu_paging_init(void);
/* iommu.c */
void ld_mmu_iommu(void);
-
-/* io-unit.c */
-void ld_mmu_iounit(void);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index f56c3c9a9793..852085ada368 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -136,36 +136,8 @@ static void msi_set_sync(void)
void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
- unsigned long ptp; /* Physical address, shifted right by 4 */
- int i;
-
- ptp = __nocache_pa(ptep) >> 4;
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
- ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
- }
-}
-
-void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
-{
- unsigned long ptp; /* Physical address, shifted right by 4 */
- int i;
-
- ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
- ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
- }
-}
-
-/* Find an entry in the third-level page table.. */
-pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
-{
- void *pte;
-
- pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
- return (pte_t *) pte +
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+ unsigned long ptp = __nocache_pa(ptep) >> 4;
+ set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
}
/*
@@ -175,18 +147,18 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
*/
static void *__srmmu_get_nocache(int size, int align)
{
- int offset;
+ int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
unsigned long addr;
- if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
+ if (size < minsz) {
printk(KERN_ERR "Size 0x%x too small for nocache request\n",
size);
- size = SRMMU_NOCACHE_BITMAP_SHIFT;
+ size = minsz;
}
- if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
- printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
+ if (size & (minsz - 1)) {
+ printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
size);
- size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
+ size += minsz - 1;
}
BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
@@ -331,9 +303,9 @@ static void __init srmmu_nocache_init(void)
while (vaddr < srmmu_nocache_end) {
pgd = pgd_offset_k(vaddr);
- p4d = p4d_offset(__nocache_fix(pgd), vaddr);
- pud = pud_offset(__nocache_fix(p4d), vaddr);
- pmd = pmd_offset(__nocache_fix(pgd), vaddr);
+ p4d = p4d_offset(pgd, vaddr);
+ pud = pud_offset(p4d, vaddr);
+ pmd = pmd_offset(__nocache_fix(pud), vaddr);
pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
@@ -376,31 +348,34 @@ pgd_t *get_pgd_fast(void)
*/
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- unsigned long pte;
+ pte_t *ptep;
struct page *page;
- if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
- return NULL;
- page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
- if (!pgtable_pte_page_ctor(page)) {
- __free_page(page);
+ if (!(ptep = pte_alloc_one_kernel(mm)))
return NULL;
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
+ spin_lock(&mm->page_table_lock);
+ if (page_ref_inc_return(page) == 2 &&
+ !pagetable_pte_ctor(page_ptdesc(page))) {
+ page_ref_dec(page);
+ ptep = NULL;
}
- return page;
+ spin_unlock(&mm->page_table_lock);
+
+ return ptep;
}
-void pte_free(struct mm_struct *mm, pgtable_t pte)
+void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
- unsigned long p;
+ struct page *page;
- pgtable_pte_page_dtor(pte);
- p = (unsigned long)page_address(pte); /* Cached address (for test) */
- if (p == 0)
- BUG();
- p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
+ spin_lock(&mm->page_table_lock);
+ if (page_ref_dec_return(page) == 1)
+ pagetable_pte_dtor(page_ptdesc(page));
+ spin_unlock(&mm->page_table_lock);
- /* free non cached virtual address*/
- srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
+ srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
}
/* context handling - a dynamically sized pool is used */
@@ -715,7 +690,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pgdp = pgd_offset_k(start);
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
- if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
+ if (pud_none(*__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
@@ -724,7 +699,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pud_set(__nocache_fix(pudp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pudp), start);
- if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
@@ -822,13 +797,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
what = 0;
addr = start - PAGE_SIZE;
- if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
- if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
+ if (!(start & ~(PMD_MASK))) {
+ if (srmmu_probe(addr + PMD_SIZE) == probed)
what = 1;
}
- if (!(start & ~(SRMMU_PGDIR_MASK))) {
- if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
+ if (!(start & ~(PGDIR_MASK))) {
+ if (srmmu_probe(addr + PGDIR_SIZE) == probed)
what = 2;
}
@@ -836,11 +811,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (what == 2) {
- *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
- start += SRMMU_PGDIR_SIZE;
+ *__nocache_fix(pgdp) = __pgd(probed);
+ start += PGDIR_SIZE;
continue;
}
- if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
+ if (pud_none(*__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
@@ -848,29 +823,21 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pud_set(__nocache_fix(pudp), pmdp);
}
- pmdp = pmd_offset(__nocache_fix(pgdp), start);
- if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ pmdp = pmd_offset(__nocache_fix(pudp), start);
+ if (what == 1) {
+ *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
+ start += PMD_SIZE;
+ continue;
+ }
+ if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, PTE_SIZE);
pmd_set(__nocache_fix(pmdp), ptep);
}
- if (what == 1) {
- /* We bend the rule where all 16 PTPs in a pmd_t point
- * inside the same PTE page, and we leak a perfectly
- * good hardware PTE piece. Alternatives seem worse.
- */
- unsigned int x; /* Index of HW PMD in soft cluster */
- unsigned long *val;
- x = (start >> PMD_SHIFT) & 15;
- val = &pmdp->pmdv[x];
- *(unsigned long *)__nocache_fix(val) = probed;
- start += SRMMU_REAL_PMD_SIZE;
- continue;
- }
ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
- *(pte_t *)__nocache_fix(ptep) = __pte(probed);
+ *__nocache_fix(ptep) = __pte(probed);
start += PAGE_SIZE;
}
}
@@ -884,15 +851,15 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base
unsigned long big_pte;
big_pte = KERNEL_PTE(phys_base >> 4);
- *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
+ *__nocache_fix(pgdp) = __pgd(big_pte);
}
/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
{
- unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
- unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
- unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
+ unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
+ unsigned long vstart = (vbase & PGDIR_MASK);
+ unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
/* Map "low" memory only */
const unsigned long min_vaddr = PAGE_OFFSET;
const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
@@ -905,7 +872,7 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
while (vstart < vend) {
do_large_mapping(vstart, pstart);
- vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
+ vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
}
return vstart;
}
@@ -974,7 +941,7 @@ void __init srmmu_paging_init(void)
srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
for (i = 0; i < num_contexts; i++)
- srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
+ srmmu_ctxd_set(__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
flush_cache_all();
srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
@@ -1005,27 +972,14 @@ void __init srmmu_paging_init(void)
sparc_context_init(num_contexts);
- kmap_init();
-
{
- unsigned long zones_size[MAX_NR_ZONES];
- unsigned long zholes_size[MAX_NR_ZONES];
- unsigned long npages;
- int znum;
-
- for (znum = 0; znum < MAX_NR_ZONES; znum++)
- zones_size[znum] = zholes_size[znum] = 0;
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
- npages = max_low_pfn - pfn_base;
+ max_zone_pfn[ZONE_DMA] = max_low_pfn;
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
+ max_zone_pfn[ZONE_HIGHMEM] = highend_pfn;
- zones_size[ZONE_DMA] = npages;
- zholes_size[ZONE_DMA] = npages - pages_avail;
-
- npages = highend_pfn - max_low_pfn;
- zones_size[ZONE_HIGHMEM] = npages;
- zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
-
- free_area_init_node(0, zones_size, pfn_base, zholes_size);
+ free_area_init(max_zone_pfn);
}
}
@@ -1559,7 +1513,7 @@ static void __init init_viking(void)
/*
* We need this to make sure old viking takes no hits
- * on it's cache for dma snoops to workaround the
+ * on its cache for dma snoops to workaround the
* "load from non-cacheable memory" interrupt bug.
* This is only necessary because of the new way in
* which we use the IOMMU.
@@ -1683,19 +1637,19 @@ static void __init get_srmmu_type(void)
/* Local cross-calls. */
static void smp_flush_page_for_dma(unsigned long page)
{
- xc1((smpfunc_t) local_ops->page_for_dma, page);
+ xc1(local_ops->page_for_dma, page);
local_ops->page_for_dma(page);
}
static void smp_flush_cache_all(void)
{
- xc0((smpfunc_t) local_ops->cache_all);
+ xc0(local_ops->cache_all);
local_ops->cache_all();
}
static void smp_flush_tlb_all(void)
{
- xc0((smpfunc_t) local_ops->tlb_all);
+ xc0(local_ops->tlb_all);
local_ops->tlb_all();
}
@@ -1706,7 +1660,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+ xc1(local_ops->cache_mm, (unsigned long)mm);
local_ops->cache_mm(mm);
}
}
@@ -1718,7 +1672,7 @@ static void smp_flush_tlb_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask)) {
- xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+ xc1(local_ops->tlb_mm, (unsigned long)mm);
if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
@@ -1738,8 +1692,8 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->cache_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->cache_range, (unsigned long)vma, start,
+ end);
local_ops->cache_range(vma, start, end);
}
}
@@ -1755,8 +1709,8 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->tlb_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->tlb_range, (unsigned long)vma, start,
+ end);
local_ops->tlb_range(vma, start, end);
}
}
@@ -1770,8 +1724,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->cache_page,
- (unsigned long) vma, page);
+ xc2(local_ops->cache_page, (unsigned long)vma, page);
local_ops->cache_page(vma, page);
}
}
@@ -1785,8 +1738,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->tlb_page,
- (unsigned long) vma, page);
+ xc2(local_ops->tlb_page, (unsigned long)vma, page);
local_ops->tlb_page(vma, page);
}
}
@@ -1800,7 +1752,7 @@ static void smp_flush_page_to_ram(unsigned long page)
* XXX This experiment failed, research further... -DaveM
*/
#if 1
- xc1((smpfunc_t) local_ops->page_to_ram, page);
+ xc1(local_ops->page_to_ram, page);
#endif
local_ops->page_to_ram(page);
}
@@ -1811,8 +1763,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->sig_insns,
- (unsigned long) mm, insn_addr);
+ xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
local_ops->sig_insns(mm, insn_addr);
}
@@ -1865,9 +1816,7 @@ void __init load_mmu(void)
&smp_cachetlb_ops;
#endif
- if (sparc_cpu_model == sun4d)
- ld_mmu_iounit();
- else
+ if (sparc_cpu_model != sun4d)
ld_mmu_iommu();
#ifdef CONFIG_SMP
if (sparc_cpu_model == sun4d)
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3d72d2deb13b..b44d79d778c7 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -9,9 +9,8 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/preempt.h>
+#include <linux/pagemap.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
@@ -119,6 +118,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
unsigned long paddr, pfn = pte_pfn(orig);
struct address_space *mapping;
struct page *page;
+ struct folio *folio;
if (!pfn_valid(pfn))
goto no_cache_flush;
@@ -128,13 +128,14 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
goto no_cache_flush;
/* A real file page? */
- mapping = page_mapping_file(page);
+ folio = page_folio(page);
+ mapping = folio_flush_mapping(folio);
if (!mapping)
goto no_cache_flush;
paddr = (unsigned long) page_address(page);
if ((paddr ^ vaddr) & (1 << 13))
- flush_dcache_page_all(mm, page);
+ flush_dcache_folio_all(mm, folio);
}
no_cache_flush:
@@ -150,6 +151,8 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
pte_t *pte;
pte = pte_offset_map(&pmd, vaddr);
+ if (!pte)
+ return;
end = vaddr + HPAGE_SIZE;
while (vaddr < end) {
if (pte_val(*pte) & _PAGE_VALID) {
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f5edc28aa3a5..5fe52a64c7e7 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -8,9 +8,9 @@
#include <linux/preempt.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/tsb.h>
@@ -266,7 +266,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
default:
printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
current->comm, current->pid, tsb_bytes);
- do_exit(SIGSEGV);
+ BUG();
}
tte |= pte_sz_bits(page_sz);
@@ -385,7 +385,7 @@ static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
* will not trigger any longer.
*
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers
- * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
+ * of two. The TSB must be aligned to its size, so f.e. a 512K TSB
* must be 512K aligned. It also must be physically contiguous, so we
* cannot use vmalloc().
*
@@ -402,8 +402,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
unsigned long new_rss_limit;
gfp_t gfp_flags;
- if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
- max_tsb_size = (PAGE_SIZE << MAX_ORDER);
+ if (max_tsb_size > PAGE_SIZE << MAX_PAGE_ORDER)
+ max_tsb_size = PAGE_SIZE << MAX_PAGE_ORDER;
new_cache_index = 0;
for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index d220b6848746..70e658d107e0 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -5,8 +5,8 @@
* Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
*/
+#include <linux/pgtable.h>
#include <asm/asi.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/mmu_context.h>
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index adaef6e7b8cf..48f062de7a7f 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -13,6 +13,7 @@
#include <asm/asi.h>
#include <asm/mxcc.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
#include <asm/viking.h>
@@ -157,7 +158,7 @@ viking_flush_tlb_range:
cmp %o3, -1
be 2f
#endif
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
@@ -243,7 +244,7 @@ sun4dsmp_flush_tlb_range:
ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1