summaryrefslogtreecommitdiff
path: root/arch/openrisc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/openrisc/mm')
-rw-r--r--arch/openrisc/mm/cache.c68
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c54
-rw-r--r--arch/openrisc/mm/ioremap.c89
-rw-r--r--arch/openrisc/mm/tlb.c9
5 files changed, 104 insertions, 120 deletions
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index 534a52ec5e66..f33df46dae4e 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -14,44 +14,86 @@
#include <asm/spr_defs.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
+#include <asm/cpuinfo.h>
#include <asm/tlbflush.h>
-static __always_inline void cache_loop(struct page *page, const unsigned int reg)
+/*
+ * Check if the cache component exists.
+ */
+bool cpu_cache_is_present(const unsigned int cache_type)
{
- unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
- unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long mask = SPR_UPR_UP | cache_type;
+
+ return !((upr & mask) ^ mask);
+}
+
+static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
+ const unsigned short reg, const unsigned int cache_type)
+{
+ if (!cpu_cache_is_present(cache_type))
+ return;
- while (line < paddr + PAGE_SIZE) {
- mtspr(reg, line);
- line += L1_CACHE_BYTES;
+ while (paddr < end) {
+ mtspr(reg, paddr);
+ paddr += L1_CACHE_BYTES;
}
}
+static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
+ const unsigned int cache_type)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long end = paddr + PAGE_SIZE;
+
+ paddr &= ~(L1_CACHE_BYTES - 1);
+
+ cache_loop(paddr, end, reg, cache_type);
+}
+
void local_dcache_page_flush(struct page *page)
{
- cache_loop(page, SPR_DCBFR);
+ cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
}
EXPORT_SYMBOL(local_dcache_page_flush);
void local_icache_page_inv(struct page *page)
{
- cache_loop(page, SPR_ICBIR);
+ cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
}
EXPORT_SYMBOL(local_icache_page_inv);
+void local_dcache_range_flush(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
+}
+
+void local_dcache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
+}
+
+void local_icache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
+}
+
void update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
- struct page *page = pfn_to_page(pfn);
- int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
+ struct folio *folio = page_folio(pfn_to_page(pfn));
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
/*
* Since icaches do not snoop for updated data on OpenRISC, we
* must write back and invalidate any dirty pages manually. We
* can skip data pages, since they will not end up in icaches.
*/
- if ((vma->vm_flags & VM_EXEC) && dirty)
- sync_icache_dcache(page);
-}
+ if ((vma->vm_flags & VM_EXEC) && dirty) {
+ unsigned int nr = folio_nr_pages(folio);
+ while (nr--)
+ sync_icache_dcache(folio_page(folio, nr));
+ }
+}
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index a9dcd4381d1a..29e232d78d82 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/perf_event.h>
#include <linux/uaccess.h>
+#include <asm/bug.h>
#include <asm/mmu_context.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
@@ -30,7 +31,8 @@
*/
volatile pgd_t *current_pgd[NR_CPUS];
-extern void __noreturn die(char *, struct pt_regs *, long);
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long vector, int write_acc);
/*
* This routine handles page faults. It determines the address,
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d531ab82be12..9382d9a0ec78 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -35,6 +35,7 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/cacheflush.h>
int mem_init_done;
@@ -123,8 +124,6 @@ static void __init map_ram(void)
void __init paging_init(void)
{
- extern void tlb_init(void);
-
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@@ -178,8 +177,8 @@ void __init paging_init(void)
barrier();
/* Invalidate instruction caches after code modification */
- mtspr(SPR_ICBIR, 0x900);
- mtspr(SPR_ICBIR, 0xa00);
+ local_icache_block_inv(0x900);
+ local_icache_block_inv(0xa00);
/* New TLB miss handlers and kernel page tables are in now place.
* Make sure that page flags get updated for all pages in TLB by
@@ -195,20 +194,55 @@ void __init mem_init(void)
{
BUG_ON(!mem_map);
- max_mapnr = max_low_pfn;
- high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
-
/* clear the zero-page */
memset((void *)empty_zero_page, 0, PAGE_SIZE);
- /* this will put all low memory onto the freelists */
- memblock_free_all();
-
printk("mem_init_done ...........................................\n");
mem_init_done = 1;
return;
}
+static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ p4d = p4d_offset(pgd_offset_k(va), va);
+ pud = pud_offset(p4d, va);
+ pmd = pmd_offset(pud, va);
+ pte = pte_alloc_kernel(pmd, va);
+
+ if (pte == NULL)
+ return -ENOMEM;
+
+ if (pgprot_val(prot))
+ set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot));
+ else
+ pte_clear(&init_mm, va, pte);
+
+ local_flush_tlb_page(NULL, va);
+ return 0;
+}
+
+/*
+ * __set_fix must now support both EARLYCON and TEXT_POKE mappings,
+ * which are used at different stages of kernel execution.
+ */
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ map_page(address, phys, prot);
+}
+
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY_X,
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 8ec0dafecf25..3b352f97fecb 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -22,89 +22,7 @@
extern int mem_init_done;
-static unsigned int fixmaps_used __initdata;
-
/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
-{
- phys_addr_t p;
- unsigned long v;
- unsigned long offset, last_addr;
- struct vm_struct *area = NULL;
-
- /* Don't allow wraparound or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /*
- * Mappings have to be page-aligned
- */
- offset = addr & ~PAGE_MASK;
- p = addr & PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - p;
-
- if (likely(mem_init_done)) {
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- v = (unsigned long)area->addr;
- } else {
- if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
- return NULL;
- v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
- fixmaps_used += (size >> PAGE_SHIFT);
- }
-
- if (ioremap_page_range(v, v + size, p,
- __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
- if (likely(mem_init_done))
- vfree(area->addr);
- else
- fixmaps_used -= (size >> PAGE_SHIFT);
- return NULL;
- }
-
- return (void __iomem *)(offset + (char *)v);
-}
-EXPORT_SYMBOL(ioremap);
-
-void iounmap(volatile void __iomem *addr)
-{
- /* If the page is from the fixmap pool then we just clear out
- * the fixmap mapping.
- */
- if (unlikely((unsigned long)addr > FIXADDR_START)) {
- /* This is a bit broken... we don't really know
- * how big the area is so it's difficult to know
- * how many fixed pages to invalidate...
- * just flush tlb and hope for the best...
- * consider this a FIXME
- *
- * Really we should be clearing out one or more page
- * table entries for these virtual addresses so that
- * future references cause a page fault... for now, we
- * rely on two things:
- * i) this code never gets called on known boards
- * ii) invalid accesses to the freed areas aren't made
- */
- flush_tlb_all();
- return;
- }
-
- return vfree((void *)(PAGE_MASK & (unsigned long)addr));
-}
-EXPORT_SYMBOL(iounmap);
-
-/**
* OK, this one's a bit tricky... ioremap can get called before memory is
* initialized (early serial console does this) and will want to alloc a page
* for its mapping. No userspace pages will ever get allocated before memory
@@ -118,12 +36,9 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte;
if (likely(mem_init_done)) {
- pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+ pte = __pte_alloc_one_kernel(mm);
} else {
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
return pte;
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index e2f2a3c3bb22..3115f2e4f864 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -182,12 +182,3 @@ void destroy_context(struct mm_struct *mm)
flush_tlb_mm(mm);
}
-
-/* called once during VM initialization, from init.c */
-
-void __init tlb_init(void)
-{
- /* Do nothing... */
- /* invalidate the entire TLB */
- /* flush_tlb_all(); */
-}