summaryrefslogtreecommitdiff
path: root/arch/openrisc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/openrisc/mm')
-rw-r--r--arch/openrisc/mm/Makefile3
-rw-r--r--arch/openrisc/mm/cache.c99
-rw-r--r--arch/openrisc/mm/fault.c124
-rw-r--r--arch/openrisc/mm/init.c127
-rw-r--r--arch/openrisc/mm/ioremap.c102
-rw-r--r--arch/openrisc/mm/tlb.c48
6 files changed, 263 insertions, 240 deletions
diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile
index 324ba2634529..8a0e580e2213 100644
--- a/arch/openrisc/mm/Makefile
+++ b/arch/openrisc/mm/Makefile
@@ -1,5 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for the linux openrisc-specific parts of the memory manager.
#
-obj-y := fault.o tlb.o init.o ioremap.o
+obj-y := fault.o cache.o tlb.o init.o ioremap.o
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
new file mode 100644
index 000000000000..f33df46dae4e
--- /dev/null
+++ b/arch/openrisc/mm/cache.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenRISC cache.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2015 Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
+ */
+
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/cpuinfo.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Check if the cache component exists.
+ */
+bool cpu_cache_is_present(const unsigned int cache_type)
+{
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long mask = SPR_UPR_UP | cache_type;
+
+ return !((upr & mask) ^ mask);
+}
+
+static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
+ const unsigned short reg, const unsigned int cache_type)
+{
+ if (!cpu_cache_is_present(cache_type))
+ return;
+
+ while (paddr < end) {
+ mtspr(reg, paddr);
+ paddr += L1_CACHE_BYTES;
+ }
+}
+
+static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
+ const unsigned int cache_type)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long end = paddr + PAGE_SIZE;
+
+ paddr &= ~(L1_CACHE_BYTES - 1);
+
+ cache_loop(paddr, end, reg, cache_type);
+}
+
+void local_dcache_page_flush(struct page *page)
+{
+ cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
+}
+EXPORT_SYMBOL(local_dcache_page_flush);
+
+void local_icache_page_inv(struct page *page)
+{
+ cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
+}
+EXPORT_SYMBOL(local_icache_page_inv);
+
+void local_dcache_range_flush(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
+}
+
+void local_dcache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
+}
+
+void local_icache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
+}
+
+void update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte)
+{
+ unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
+ struct folio *folio = page_folio(pfn_to_page(pfn));
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
+
+ /*
+ * Since icaches do not snoop for updated data on OpenRISC, we
+ * must write back and invalidate any dirty pages manually. We
+ * can skip data pages, since they will not end up in icaches.
+ */
+ if ((vma->vm_flags & VM_EXEC) && dirty) {
+ unsigned int nr = folio_nr_pages(folio);
+
+ while (nr--)
+ sync_icache_dcache(folio_page(folio, nr));
+ }
+}
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 4a41f8493ab0..29e232d78d82 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC fault.c
*
@@ -8,34 +9,30 @@
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/extable.h>
+#include <linux/sched/signal.h>
+#include <linux/perf_event.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <asm/bug.h>
+#include <asm/mmu_context.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
#define NUM_TLB_ENTRIES 64
#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
-unsigned long pte_misses; /* updated by do_page_fault() */
-unsigned long pte_errors; /* updated by do_page_fault() */
-
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
- * - also look into include/asm-or32/mmu_context.h
+ * - also look into include/asm/mmu_context.h
*/
-volatile pgd_t *current_pgd;
+volatile pgd_t *current_pgd[NR_CPUS];
-extern void die(char *, struct pt_regs *, long);
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long vector, int write_acc);
/*
* This routine handles page faults. It determines the address,
@@ -52,9 +49,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
- siginfo_t info;
- int fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ int si_code;
+ vm_fault_t fault;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current;
@@ -86,6 +83,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
if (user_mode(regs)) {
/* Exception was in userspace: reenable interrupts */
local_irq_enable();
+ flags |= FAULT_FLAG_USER;
} else {
/* If exception was in a syscall, then IRQ's may have
* been enabled or disabled. If they were enabled,
@@ -96,7 +94,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
}
mm = tsk->mm;
- info.si_code = SEGV_MAPERR;
+ si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
@@ -106,8 +104,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
if (in_interrupt() || !mm)
goto no_context;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -129,8 +129,9 @@ retry:
if (address + PAGE_SIZE < regs->sp)
goto bad_area;
}
- if (expand_stack(vma, address))
- goto bad_area;
+ vma = expand_stack(mm, address);
+ if (!vma)
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
@@ -138,7 +139,7 @@ retry:
*/
good_area:
- info.si_code = SEGV_ACCERR;
+ si_code = SEGV_ACCERR;
/* first do some preliminary protection checks */
@@ -162,39 +163,41 @@ good_area:
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+ }
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- /*RGD modeled on Cris */
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
-
- /* No need to up_read(&mm->mmap_sem) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
-
- goto retry;
- }
+ /*RGD modeled on Cris */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -203,18 +206,14 @@ good_area:
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void *)address;
- force_sig_info(SIGSEGV, &info, tsk);
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
@@ -232,8 +231,6 @@ no_context:
{
const struct exception_table_entry *entry;
- __asm__ __volatile__("l.nop 42");
-
if ((entry = search_exception_tables(regs->pc)) != NULL) {
/* Adjust the instruction pointer in the stackframe */
regs->pc = entry->fixup;
@@ -255,35 +252,26 @@ no_context:
die("Oops", regs, write_acc);
- do_exit(SIGKILL);
-
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- __asm__ __volatile__("l.nop 42");
- __asm__ __volatile__("l.nop 1");
-
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void *)address;
- force_sig_info(SIGBUS, &info, tsk);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
@@ -305,6 +293,7 @@ vmalloc_fault:
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -316,7 +305,7 @@ vmalloc_fault:
phx_mmu("vmalloc_fault");
*/
- pgd = (pgd_t *)current_pgd + offset;
+ pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
/* Since we're two-level, we don't need to do both
@@ -331,8 +320,13 @@ vmalloc_fault:
* it exists.
*/
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto no_context;
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 7f94652311d7..9382d9a0ec78 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC idle.c
*
@@ -8,11 +9,6 @@
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/signal.h>
@@ -26,42 +22,33 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/blkdev.h> /* for initrd_* */
#include <linux/pagemap.h>
-#include <linux/memblock.h>
-#include <asm/segment.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
-#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/cacheflush.h>
int mem_init_done;
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
static void __init zone_sizes_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
-
- /* Clear the zone sizes */
- memset(zones_size, 0, sizeof(zones_size));
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
/*
* We use only ZONE_NORMAL
*/
- zones_size[ZONE_NORMAL] = max_low_pfn;
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
- free_area_init(zones_size);
+ free_area_init(max_zone_pfn);
}
extern const char _s_kernel_ro[], _e_kernel_ro[];
@@ -74,29 +61,32 @@ extern const char _s_kernel_ro[], _e_kernel_ro[];
*/
static void __init map_ram(void)
{
+ phys_addr_t start, end;
unsigned long v, p, e;
pgprot_t prot;
pgd_t *pge;
+ p4d_t *p4e;
pud_t *pue;
pmd_t *pme;
pte_t *pte;
+ u64 i;
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
- struct memblock_region *region;
v = PAGE_OFFSET;
- for_each_memblock(memory, region) {
- p = (u32) region->base & PAGE_MASK;
- e = p + (u32) region->size;
+ for_each_mem_range(i, &start, &end) {
+ p = (u32) start & PAGE_MASK;
+ e = (u32) end;
v = (u32) __va(p);
pge = pgd_offset_k(v);
while (p < e) {
int j;
- pue = pud_offset(pge, v);
+ p4e = p4d_offset(pge, v);
+ pue = pud_offset(p4e, v);
pme = pmd_offset(pue, v);
if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
@@ -106,11 +96,14 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate page for PTEs\n",
+ __func__);
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
/* Fill the newly allocated page with PTE'S */
- for (j = 0; p < e && j < PTRS_PER_PGD;
+ for (j = 0; p < e && j < PTRS_PER_PTE;
v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
if (v >= (u32) _e_kernel_ro ||
v < (u32) _s_kernel_ro)
@@ -125,15 +118,12 @@ static void __init map_ram(void)
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
- region->base, region->base + region->size);
+ start, end);
}
}
void __init paging_init(void)
{
- extern void tlb_init(void);
-
- unsigned long end;
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@@ -147,9 +137,7 @@ void __init paging_init(void)
* (even if it is most probably not used until the next
* switch_mm)
*/
- current_pgd = init_mm.pgd;
-
- end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
+ current_pgd[smp_processor_id()] = init_mm.pgd;
map_ram();
@@ -189,8 +177,8 @@ void __init paging_init(void)
barrier();
/* Invalidate instruction caches after code modification */
- mtspr(SPR_ICBIR, 0x900);
- mtspr(SPR_ICBIR, 0xa00);
+ local_icache_block_inv(0x900);
+ local_icache_block_inv(0xa00);
/* New TLB miss handlers and kernel page tables are in now place.
* Make sure that page flags get updated for all pages in TLB by
@@ -206,30 +194,71 @@ void __init mem_init(void)
{
BUG_ON(!mem_map);
- max_mapnr = max_low_pfn;
- high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
-
/* clear the zero-page */
memset((void *)empty_zero_page, 0, PAGE_SIZE);
- /* this will put all low memory onto the freelists */
- free_all_bootmem();
-
- mem_init_print_info(NULL);
-
printk("mem_init_done ...........................................\n");
mem_init_done = 1;
return;
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
+static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ p4d = p4d_offset(pgd_offset_k(va), va);
+ pud = pud_offset(p4d, va);
+ pmd = pmd_offset(pud, va);
+ pte = pte_alloc_kernel(pmd, va);
+
+ if (pte == NULL)
+ return -ENOMEM;
+
+ if (pgprot_val(prot))
+ set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot));
+ else
+ pte_clear(&init_mm, va, pte);
+
+ local_flush_tlb_page(NULL, va);
+ return 0;
}
-#endif
-void free_initmem(void)
+/*
+ * __set_fix must now support both EARLYCON and TEXT_POKE mappings,
+ * which are used at different stages of kernel execution.
+ */
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot)
{
- free_initmem_default(-1);
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ map_page(address, phys, prot);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY_X,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 62b08ef392be..3b352f97fecb 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC ioremap.c
*
@@ -8,106 +9,20 @@
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/bug.h>
-#include <asm/pgtable.h>
#include <linux/sched.h>
#include <asm/tlbflush.h>
extern int mem_init_done;
-static unsigned int fixmaps_used __initdata;
-
/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem *__init_refok
-__ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot)
-{
- phys_addr_t p;
- unsigned long v;
- unsigned long offset, last_addr;
- struct vm_struct *area = NULL;
-
- /* Don't allow wraparound or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /*
- * Mappings have to be page-aligned
- */
- offset = addr & ~PAGE_MASK;
- p = addr & PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - p;
-
- if (likely(mem_init_done)) {
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- v = (unsigned long)area->addr;
- } else {
- if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
- return NULL;
- v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
- fixmaps_used += (size >> PAGE_SHIFT);
- }
-
- if (ioremap_page_range(v, v + size, p, prot)) {
- if (likely(mem_init_done))
- vfree(area->addr);
- else
- fixmaps_used -= (size >> PAGE_SHIFT);
- return NULL;
- }
-
- return (void __iomem *)(offset + (char *)v);
-}
-
-void iounmap(void *addr)
-{
- /* If the page is from the fixmap pool then we just clear out
- * the fixmap mapping.
- */
- if (unlikely((unsigned long)addr > FIXADDR_START)) {
- /* This is a bit broken... we don't really know
- * how big the area is so it's difficult to know
- * how many fixed pages to invalidate...
- * just flush tlb and hope for the best...
- * consider this a FIXME
- *
- * Really we should be clearing out one or more page
- * table entries for these virtual addresses so that
- * future references cause a page fault... for now, we
- * rely on two things:
- * i) this code never gets called on known boards
- * ii) invalid accesses to the freed areas aren't made
- */
- flush_tlb_all();
- return;
- }
-
- return vfree((void *)(PAGE_MASK & (unsigned long)addr));
-}
-
-/**
* OK, this one's a bit tricky... ioremap can get called before memory is
* initialized (early serial console does this) and will want to alloc a page
* for its mapping. No userspace pages will ever get allocated before memory
@@ -116,22 +31,15 @@ void iounmap(void *addr)
* the memblock infrastructure.
*/
-pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
+pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ pte = __pte_alloc_one_kernel(mm);
} else {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-#if 0
- /* FIXME: use memblock... */
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
-#endif
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
- if (pte)
- clear_page(pte);
return pte;
}
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index 683bd4d31c7c..3115f2e4f864 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC tlb.c
*
@@ -9,11 +10,6 @@
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/sched.h>
@@ -26,9 +22,7 @@
#include <linux/mm.h>
#include <linux/init.h>
-#include <asm/segment.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/spr_defs.h>
@@ -49,7 +43,7 @@
*
*/
-void flush_tlb_all(void)
+void local_flush_tlb_all(void)
{
int i;
unsigned long num_tlb_sets;
@@ -86,7 +80,7 @@ void flush_tlb_all(void)
#define flush_itlb_page_no_eir(addr) \
mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
if (have_dtlbeir)
flush_dtlb_page_eir(addr);
@@ -99,8 +93,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
flush_itlb_page_no_eir(addr);
}
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
int addr;
bool dtlbeir;
@@ -129,13 +123,13 @@ void flush_tlb_range(struct vm_area_struct *vma,
* This should be changed to loop over over mm and call flush_tlb_range.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
{
/* Was seeing bugs with the mm struct passed to us. Scrapped most of
this function. */
- /* Several architctures do this */
- flush_tlb_all();
+ /* Several architectures do this */
+ local_flush_tlb_all();
}
/* called in schedule() just before actually doing the switch_to */
@@ -143,21 +137,28 @@ void flush_tlb_mm(struct mm_struct *mm)
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *next_tsk)
{
+ unsigned int cpu;
+
+ if (unlikely(prev == next))
+ return;
+
+ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
/* remember the pgd for the fault handlers
* this is similar to the pgd register in some other CPU's.
* we need our own copy of it because current and active_mm
* might be invalid at points where we still need to derefer
* the pgd.
*/
- current_pgd = next->pgd;
+ current_pgd[cpu] = next->pgd;
/* We don't have context support implemented, so flush all
* entries belonging to previous map
*/
-
- if (prev != next)
- flush_tlb_mm(prev);
-
+ local_flush_tlb_mm(prev);
}
/*
@@ -181,12 +182,3 @@ void destroy_context(struct mm_struct *mm)
flush_tlb_mm(mm);
}
-
-/* called once during VM initialization, from init.c */
-
-void __init tlb_init(void)
-{
- /* Do nothing... */
- /* invalidate the entire TLB */
- /* flush_tlb_all(); */
-}