diff options
Diffstat (limited to 'arch/openrisc/mm')
| -rw-r--r-- | arch/openrisc/mm/cache.c | 58 | ||||
| -rw-r--r-- | arch/openrisc/mm/fault.c | 4 | ||||
| -rw-r--r-- | arch/openrisc/mm/init.c | 54 | ||||
| -rw-r--r-- | arch/openrisc/mm/ioremap.c | 9 | ||||
| -rw-r--r-- | arch/openrisc/mm/tlb.c | 9 |
5 files changed, 98 insertions, 36 deletions
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index eb43b73f3855..f33df46dae4e 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -14,37 +14,76 @@ #include <asm/spr_defs.h> #include <asm/cache.h> #include <asm/cacheflush.h> +#include <asm/cpuinfo.h> #include <asm/tlbflush.h> -static __always_inline void cache_loop(struct page *page, const unsigned int reg) +/* + * Check if the cache component exists. + */ +bool cpu_cache_is_present(const unsigned int cache_type) { - unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; - unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); + unsigned long upr = mfspr(SPR_UPR); + unsigned long mask = SPR_UPR_UP | cache_type; + + return !((upr & mask) ^ mask); +} + +static __always_inline void cache_loop(unsigned long paddr, unsigned long end, + const unsigned short reg, const unsigned int cache_type) +{ + if (!cpu_cache_is_present(cache_type)) + return; - while (line < paddr + PAGE_SIZE) { - mtspr(reg, line); - line += L1_CACHE_BYTES; + while (paddr < end) { + mtspr(reg, paddr); + paddr += L1_CACHE_BYTES; } } +static __always_inline void cache_loop_page(struct page *page, const unsigned short reg, + const unsigned int cache_type) +{ + unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; + unsigned long end = paddr + PAGE_SIZE; + + paddr &= ~(L1_CACHE_BYTES - 1); + + cache_loop(paddr, end, reg, cache_type); +} + void local_dcache_page_flush(struct page *page) { - cache_loop(page, SPR_DCBFR); + cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP); } EXPORT_SYMBOL(local_dcache_page_flush); void local_icache_page_inv(struct page *page) { - cache_loop(page, SPR_ICBIR); + cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP); } EXPORT_SYMBOL(local_icache_page_inv); +void local_dcache_range_flush(unsigned long start, unsigned long end) +{ + cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP); +} + +void local_dcache_range_inv(unsigned long start, unsigned long end) +{ + cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP); +} + +void local_icache_range_inv(unsigned long start, unsigned long end) +{ + cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP); +} + void update_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte) { unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; struct folio *folio = page_folio(pfn_to_page(pfn)); - int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); + int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f); /* * Since icaches do not snoop for updated data on OpenRISC, we @@ -58,4 +97,3 @@ void update_cache(struct vm_area_struct *vma, unsigned long address, sync_icache_dcache(folio_page(folio, nr)); } } - diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index a9dcd4381d1a..29e232d78d82 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -18,6 +18,7 @@ #include <linux/perf_event.h> #include <linux/uaccess.h> +#include <asm/bug.h> #include <asm/mmu_context.h> #include <asm/siginfo.h> #include <asm/signal.h> @@ -30,7 +31,8 @@ */ volatile pgd_t *current_pgd[NR_CPUS]; -extern void __noreturn die(char *, struct pt_regs *, long); +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long vector, int write_acc); /* * This routine handles page faults. It determines the address, diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index d531ab82be12..9382d9a0ec78 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -35,6 +35,7 @@ #include <asm/fixmap.h> #include <asm/tlbflush.h> #include <asm/sections.h> +#include <asm/cacheflush.h> int mem_init_done; @@ -123,8 +124,6 @@ static void __init map_ram(void) void __init paging_init(void) { - extern void tlb_init(void); - int i; printk(KERN_INFO "Setting up paging and PTEs.\n"); @@ -178,8 +177,8 @@ void __init paging_init(void) barrier(); /* Invalidate instruction caches after code modification */ - mtspr(SPR_ICBIR, 0x900); - mtspr(SPR_ICBIR, 0xa00); + local_icache_block_inv(0x900); + local_icache_block_inv(0xa00); /* New TLB miss handlers and kernel page tables are in now place. * Make sure that page flags get updated for all pages in TLB by @@ -195,20 +194,55 @@ void __init mem_init(void) { BUG_ON(!mem_map); - max_mapnr = max_low_pfn; - high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); - /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); - /* this will put all low memory onto the freelists */ - memblock_free_all(); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; } +static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot) +{ + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + p4d = p4d_offset(pgd_offset_k(va), va); + pud = pud_offset(p4d, va); + pmd = pmd_offset(pud, va); + pte = pte_alloc_kernel(pmd, va); + + if (pte == NULL) + return -ENOMEM; + + if (pgprot_val(prot)) + set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot)); + else + pte_clear(&init_mm, va, pte); + + local_flush_tlb_page(NULL, va); + return 0; +} + +/* + * __set_fix must now support both EARLYCON and TEXT_POKE mappings, + * which are used at different stages of kernel execution. + */ +void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t prot) +{ + unsigned long address = __fix_to_virt(idx); + + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } + + map_page(address, phys, prot); +} + static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY_X, diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index 91c8259d4b7e..3b352f97fecb 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -22,7 +22,7 @@ extern int mem_init_done; -/** +/* * OK, this one's a bit tricky... ioremap can get called before memory is * initialized (early serial console does this) and will want to alloc a page * for its mapping. No userspace pages will ever get allocated before memory @@ -36,12 +36,9 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) pte_t *pte; if (likely(mem_init_done)) { - pte = (pte_t *)get_zeroed_page(GFP_KERNEL); + pte = __pte_alloc_one_kernel(mm); } else { - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } return pte; diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c index e2f2a3c3bb22..3115f2e4f864 100644 --- a/arch/openrisc/mm/tlb.c +++ b/arch/openrisc/mm/tlb.c @@ -182,12 +182,3 @@ void destroy_context(struct mm_struct *mm) flush_tlb_mm(mm); } - -/* called once during VM initialization, from init.c */ - -void __init tlb_init(void) -{ - /* Do nothing... */ - /* invalidate the entire TLB */ - /* flush_tlb_all(); */ -} |
