summaryrefslogtreecommitdiff
path: root/arch/openrisc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/openrisc/mm')
-rw-r--r--arch/openrisc/mm/Makefile2
-rw-r--r--arch/openrisc/mm/cache.c61
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c2
-rw-r--r--arch/openrisc/mm/tlb.c16
5 files changed, 73 insertions, 12 deletions
diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile
index 324ba2634529..a31b2a42e966 100644
--- a/arch/openrisc/mm/Makefile
+++ b/arch/openrisc/mm/Makefile
@@ -2,4 +2,4 @@
# Makefile for the linux openrisc-specific parts of the memory manager.
#
-obj-y := fault.o tlb.o init.o ioremap.o
+obj-y := fault.o cache.o tlb.o init.o ioremap.o
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
new file mode 100644
index 000000000000..b747bf1fc1b6
--- /dev/null
+++ b/arch/openrisc/mm/cache.c
@@ -0,0 +1,61 @@
+/*
+ * OpenRISC cache.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2015 Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static void cache_loop(struct page *page, const unsigned int reg)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+
+ while (line < paddr + PAGE_SIZE) {
+ mtspr(reg, line);
+ line += L1_CACHE_BYTES;
+ }
+}
+
+void local_dcache_page_flush(struct page *page)
+{
+ cache_loop(page, SPR_DCBFR);
+}
+EXPORT_SYMBOL(local_dcache_page_flush);
+
+void local_icache_page_inv(struct page *page)
+{
+ cache_loop(page, SPR_ICBIR);
+}
+EXPORT_SYMBOL(local_icache_page_inv);
+
+void update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte)
+{
+ unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(pfn);
+ int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
+
+ /*
+ * Since icaches do not snoop for updated data on OpenRISC, we
+ * must write back and invalidate any dirty pages manually. We
+ * can skip data pages, since they will not end up in icaches.
+ */
+ if ((vma->vm_flags & VM_EXEC) && dirty)
+ sync_icache_dcache(page);
+}
+
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index e310ab499385..d0021dfae20a 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -33,7 +33,7 @@ unsigned long pte_errors; /* updated by do_page_fault() */
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
* - also look into include/asm-or32/mmu_context.h
*/
-volatile pgd_t *current_pgd;
+volatile pgd_t *current_pgd[NR_CPUS];
extern void die(char *, struct pt_regs *, long);
@@ -319,7 +319,7 @@ vmalloc_fault:
phx_mmu("vmalloc_fault");
*/
- pgd = (pgd_t *)current_pgd + offset;
+ pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
/* Since we're two-level, we don't need to do both
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index f67d82b9d22f..6972d5d6f23f 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -147,7 +147,7 @@ void __init paging_init(void)
* (even if it is most probably not used until the next
* switch_mm)
*/
- current_pgd = init_mm.pgd;
+ current_pgd[smp_processor_id()] = init_mm.pgd;
end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index 683bd4d31c7c..6c253a2e86bc 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -49,7 +49,7 @@
*
*/
-void flush_tlb_all(void)
+void local_flush_tlb_all(void)
{
int i;
unsigned long num_tlb_sets;
@@ -86,7 +86,7 @@ void flush_tlb_all(void)
#define flush_itlb_page_no_eir(addr) \
mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
if (have_dtlbeir)
flush_dtlb_page_eir(addr);
@@ -99,8 +99,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
flush_itlb_page_no_eir(addr);
}
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
int addr;
bool dtlbeir;
@@ -129,13 +129,13 @@ void flush_tlb_range(struct vm_area_struct *vma,
* This should be changed to loop over over mm and call flush_tlb_range.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
{
/* Was seeing bugs with the mm struct passed to us. Scrapped most of
this function. */
/* Several architctures do this */
- flush_tlb_all();
+ local_flush_tlb_all();
}
/* called in schedule() just before actually doing the switch_to */
@@ -149,14 +149,14 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* might be invalid at points where we still need to derefer
* the pgd.
*/
- current_pgd = next->pgd;
+ current_pgd[smp_processor_id()] = next->pgd;
/* We don't have context support implemented, so flush all
* entries belonging to previous map
*/
if (prev != next)
- flush_tlb_mm(prev);
+ local_flush_tlb_mm(prev);
}