diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-29 08:25:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-29 08:25:00 -0700 |
commit | ac435075892e3e651c667b4a9f2267cf3ef1d5a2 (patch) | |
tree | ff45694dd545392cc6553af1219b2bd840974307 /arch/csky/mm | |
parent | 9f51ae62c84a23ade0ba86457d30a30c9db0c50f (diff) | |
parent | 2347e7e1aea410865e3c3f92014b07ff7d4c5b02 (diff) |
Merge tag 'csky-for-linus-4.20' of https://github.com/c-sky/csky-linux
Pull C-SKY architecture port from Guo Ren:
"This contains the Linux port for C-SKY(csky) based on linux-4.19
Release, which has been through 10 rounds of review on mailing list.
More information:
http://en.c-sky.com
The development repo:
https://github.com/c-sky/csky-linux
ABI Documentation:
https://github.com/c-sky/csky-doc
Here is the pre-built cross compiler for fast test from our CI:
https://gitlab.com/c-sky/buildroot/-/jobs/101608095/artifacts/file/output/images/csky_toolchain_qemu_csky_ck807f_4.18_glibc_defconfig_482b221e52908be1c9b2ccb444255e1562bb7025.tar.xz
We use buildroot as our CI-test enviornment. "LTP, Lmbench ..." will
be tested for every commit. See here for more details:
https://gitlab.com/c-sky/buildroot/pipelines
We'll continouslly improve csky subsystem in future"
Arnd acks, and adds the following notes:
"I did a thorough review of the ABI, which as usual mainly consists of
spotting any files that don't use the asm-generic ABI itself, and
having it changed to it matches exactly what we do on other new
architectures.
I also looked at every other patch and commented on maybe half of them
where I saw something that did not quite seem right. Others have
reviewed specific patches in greater depth. I'm sure that one could
fine more of the minor details, but as long as they are not ABI
relevant, they can be fixed later.
The only patch that is part of the ABI and that nobody reviewed is the
signal handling. This is one of the areas I never worked on in much
detail. I did not see anything wrong with it, but I also don't know
what the problems with the other architectures are here, and we seem
to be hitting issues occasionally, and we never managed to generalize
this enough for new architectures to have a trivial implementation.
I was originally hoping that we could have the 64-bit time_t
interfaces ready in time to completely drop the 32-bit ones, but that
did not happen. We might still remove them in the next merge window
depending on whether the libc upstream people prefer to keep them or
not.
One more general comment: I think this may well be the last new CPU
architecture we ever add to the kernel. Both nds32 and c-sky are made
by companies that also work on risc-v, and generally speaking risc-v
seems to be killing off any of the minor licensable instruction set
projects, just like ARM has mostly killed off the custom
vendor-specific instruction sets already.
If we add another architecture in the future, it may instead be
something like the LLVM bitcode or WebAssembly, who knows?"
To which Geert Uytterhoeven pipes in about another architecture still in
the pipeline: Kalray MPPA.
* tag 'csky-for-linus-4.20' of https://github.com/c-sky/csky-linux: (24 commits)
dt-bindings: interrupt-controller: C-SKY APB intc
irqchip: add C-SKY APB bus interrupt controller
dt-bindings: interrupt-controller: C-SKY SMP intc
irqchip: add C-SKY SMP interrupt controller
MAINTAINERS: Add csky
dt-bindings: Add vendor prefix for csky
dt-bindings: csky CPU Bindings
csky: Misc headers
csky: SMP support
csky: Debug and Ptrace GDB
csky: User access
csky: Library functions
csky: ELF and module probe
csky: Atomic operations
csky: IRQ handling
csky: VDSO and rt_sigreturn
csky: Process management and Signal
csky: MMU and page table management
csky: Cache and TLB routines
csky: System Call
...
Diffstat (limited to 'arch/csky/mm')
-rw-r--r-- | arch/csky/mm/Makefile | 13 | ||||
-rw-r--r-- | arch/csky/mm/cachev1.c | 126 | ||||
-rw-r--r-- | arch/csky/mm/cachev2.c | 79 | ||||
-rw-r--r-- | arch/csky/mm/dma-mapping.c | 254 | ||||
-rw-r--r-- | arch/csky/mm/fault.c | 212 | ||||
-rw-r--r-- | arch/csky/mm/highmem.c | 198 | ||||
-rw-r--r-- | arch/csky/mm/init.c | 122 | ||||
-rw-r--r-- | arch/csky/mm/ioremap.c | 48 | ||||
-rw-r--r-- | arch/csky/mm/syscache.c | 32 | ||||
-rw-r--r-- | arch/csky/mm/tlb.c | 219 |
10 files changed, 1303 insertions, 0 deletions
diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile new file mode 100644 index 000000000000..c870eb36efbc --- /dev/null +++ b/arch/csky/mm/Makefile @@ -0,0 +1,13 @@ +ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) +obj-y += cachev2.o +else +obj-y += cachev1.o +endif + +obj-y += dma-mapping.o +obj-y += fault.o +obj-$(CONFIG_HIGHMEM) += highmem.o +obj-y += init.o +obj-y += ioremap.o +obj-y += syscache.o +obj-y += tlb.o diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c new file mode 100644 index 000000000000..b8a75cce0b8c --- /dev/null +++ b/arch/csky/mm/cachev1.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/spinlock.h> +#include <asm/cache.h> +#include <abi/reg_ops.h> + +/* for L1-cache */ +#define INS_CACHE (1 << 0) +#define DATA_CACHE (1 << 1) +#define CACHE_INV (1 << 4) +#define CACHE_CLR (1 << 5) +#define CACHE_OMS (1 << 6) +#define CACHE_ITS (1 << 7) +#define CACHE_LICF (1 << 31) + +/* for L2-cache */ +#define CR22_LEVEL_SHIFT (1) +#define CR22_SET_SHIFT (7) +#define CR22_WAY_SHIFT (30) +#define CR22_WAY_SHIFT_L2 (29) + +static DEFINE_SPINLOCK(cache_lock); + +static inline void cache_op_line(unsigned long i, unsigned int val) +{ + mtcr("cr22", i); + mtcr("cr17", val); +} + +#define CCR2_L2E (1 << 3) +static void cache_op_all(unsigned int value, unsigned int l2) +{ + mtcr("cr17", value | CACHE_CLR); + mb(); + + if (l2 && (mfcr_ccr2() & CCR2_L2E)) { + mtcr("cr24", value | CACHE_CLR); + mb(); + } +} + +static void cache_op_range( + unsigned int start, + unsigned int end, + unsigned int value, + unsigned int l2) +{ + unsigned long i, flags; + unsigned int val = value | CACHE_CLR | CACHE_OMS; + bool l2_sync; + + if (unlikely((end - start) >= PAGE_SIZE) || + unlikely(start < PAGE_OFFSET) || + unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { + cache_op_all(value, l2); + return; + } + + if ((mfcr_ccr2() & CCR2_L2E) && l2) + l2_sync = 1; + else + l2_sync = 0; + + spin_lock_irqsave(&cache_lock, flags); + + i = start & ~(L1_CACHE_BYTES - 1); + for (; i < end; i += L1_CACHE_BYTES) { + cache_op_line(i, val); + if (l2_sync) { + mb(); + mtcr("cr24", val); + } + } + spin_unlock_irqrestore(&cache_lock, flags); + + mb(); +} + +void dcache_wb_line(unsigned long start) +{ + asm volatile("idly4\n":::"memory"); + cache_op_line(start, DATA_CACHE|CACHE_CLR); + mb(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); +} + +void icache_inv_all(void) +{ + cache_op_all(INS_CACHE|CACHE_INV, 0); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); +} + +void dcache_wbinv_all(void) +{ + cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void cache_wbinv_all(void) +{ + cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); +} + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1); +} diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c new file mode 100644 index 000000000000..baaf05d69f44 --- /dev/null +++ b/arch/csky/mm/cachev2.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <asm/cache.h> +#include <asm/barrier.h> + +inline void dcache_wb_line(unsigned long start) +{ + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); + sync_is(); +} + +void icache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("icache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} + +void icache_inv_all(void) +{ + asm volatile("icache.ialls\n":::"memory"); + sync_is(); +} + +void dcache_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dcache_inv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} + +void cache_wbinv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); + sync_is(); + + i = start & ~(L1_CACHE_BYTES - 1); + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("icache.iva %0\n"::"r"(i):"memory"); + sync_is(); +} +EXPORT_SYMBOL(cache_wbinv_range); + +void dma_wbinv_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} + +void dma_wb_range(unsigned long start, unsigned long end) +{ + unsigned long i = start & ~(L1_CACHE_BYTES - 1); + + for (; i < end; i += L1_CACHE_BYTES) + asm volatile("dcache.civa %0\n"::"r"(i):"memory"); + sync_is(); +} diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c new file mode 100644 index 000000000000..85437b21e045 --- /dev/null +++ b/arch/csky/mm/dma-mapping.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/cache.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/dma-noncoherent.h> +#include <linux/genalloc.h> +#include <linux/highmem.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/types.h> +#include <linux/version.h> +#include <asm/cache.h> + +static struct gen_pool *atomic_pool; +static size_t atomic_pool_size __initdata = SZ_256K; + +static int __init early_coherent_pool(char *p) +{ + atomic_pool_size = memparse(p, &p); + return 0; +} +early_param("coherent_pool", early_coherent_pool); + +static int __init atomic_pool_init(void) +{ + struct page *page; + size_t size = atomic_pool_size; + void *ptr; + int ret; + + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + BUG(); + + page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size)); + if (!page) + BUG(); + + ptr = dma_common_contiguous_remap(page, size, VM_ALLOC, + pgprot_noncached(PAGE_KERNEL), + __builtin_return_address(0)); + if (!ptr) + BUG(); + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, + page_to_phys(page), atomic_pool_size, -1); + if (ret) + BUG(); + + gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL); + + pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n", + atomic_pool_size / 1024); + + pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr, + page_to_phys(page)); + + return 0; +} +postcore_initcall(atomic_pool_init); + +static void *csky_dma_alloc_atomic(struct device *dev, size_t size, + dma_addr_t *dma_handle) +{ + unsigned long addr; + + addr = gen_pool_alloc(atomic_pool, size); + if (addr) + *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr); + + return (void *)addr; +} + +static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + gen_pool_free(atomic_pool, (unsigned long)vaddr, size); +} + +static void __dma_clear_buffer(struct page *page, size_t size) +{ + if (PageHighMem(page)) { + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + do { + void *ptr = kmap_atomic(page); + size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE; + + memset(ptr, 0, _size); + dma_wbinv_range((unsigned long)ptr, + (unsigned long)ptr + _size); + + kunmap_atomic(ptr); + + page++; + size -= PAGE_SIZE; + count--; + } while (count); + } else { + void *ptr = page_address(page); + + memset(ptr, 0, size); + dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size); + } +} + +static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *vaddr; + struct page *page; + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if (DMA_ATTR_NON_CONSISTENT & attrs) { + pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__); + return NULL; + } + + if (IS_ENABLED(CONFIG_DMA_CMA)) + page = dma_alloc_from_contiguous(dev, count, get_order(size), + gfp); + else + page = alloc_pages(gfp, get_order(size)); + + if (!page) { + pr_err("csky %s no more free pages.\n", __func__); + return NULL; + } + + *dma_handle = page_to_phys(page); + + __dma_clear_buffer(page, size); + + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) + return page; + + vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP, + pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0)); + if (!vaddr) + BUG(); + + return vaddr; +} + +static void csky_dma_free_nonatomic( + struct device *dev, + size_t size, + void *vaddr, + dma_addr_t dma_handle, + unsigned long attrs + ) +{ + struct page *page = phys_to_page(dma_handle); + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + if ((unsigned int)vaddr >= VMALLOC_START) + dma_common_free_remap(vaddr, size, VM_USERMAP); + + if (IS_ENABLED(CONFIG_DMA_CMA)) + dma_release_from_contiguous(dev, page, count); + else + __free_pages(page, get_order(size)); +} + +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + if (gfpflags_allow_blocking(gfp)) + return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp, + attrs); + else + return csky_dma_alloc_atomic(dev, size, dma_handle); +} + +void arch_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size)) + csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs); + else + csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs); +} + +static inline void cache_op(phys_addr_t paddr, size_t size, + void (*fn)(unsigned long start, unsigned long end)) +{ + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); + unsigned int offset = paddr & ~PAGE_MASK; + size_t left = size; + unsigned long start; + + do { + size_t len = left; + + if (PageHighMem(page)) { + void *addr; + + if (offset + len > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + } + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + start = (unsigned long)(addr + offset); + fn(start, start + len); + kunmap_atomic(addr); + } else { + start = (unsigned long)phys_to_virt(paddr); + fn(start, start + size); + } + offset = 0; + page++; + left -= len; + } while (left); +} + +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + cache_op(paddr, size, dma_wb_range); + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_wbinv_range); + break; + default: + BUG(); + } +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + cache_op(paddr, size, dma_wb_range); + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + cache_op(paddr, size, dma_wbinv_range); + break; + default: + BUG(); + } +} diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c new file mode 100644 index 000000000000..7df57f90b52c --- /dev/null +++ b/arch/csky/mm/fault.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/signal.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/version.h> +#include <linux/vt_kern.h> +#include <linux/kernel.h> +#include <linux/extable.h> +#include <linux/uaccess.h> + +#include <asm/hardirq.h> +#include <asm/mmu_context.h> +#include <asm/traps.h> +#include <asm/page.h> + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(instruction_pointer(regs)); + if (fixup) { + regs->pc = fixup->nextinsn; + + return 1; + } + + return 0; +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, + unsigned long mmu_meh) +{ + struct vm_area_struct *vma = NULL; + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + int si_code; + int fault; + unsigned long address = mmu_meh & PAGE_MASK; + + si_code = SEGV_MAPERR; + +#ifndef CONFIG_CPU_HAS_TLBI + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (unlikely(address >= VMALLOC_START) && + unlikely(address <= VMALLOC_END)) { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = __pgd_offset(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + unsigned long pgd_base; + + pgd_base = tlb_get_pgd(); + pgd = (pgd_t *)pgd_base + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd_k)) + goto no_context; + set_pgd(pgd, *pgd_k); + + pud = (pud_t *)pgd; + pud_k = (pud_t *)pgd_k; + if (!pud_present(*pud_k)) + goto no_context; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + goto no_context; + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + return; + } +#endif + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + si_code = SEGV_ACCERR; + + if (write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + BUG(); + } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + + up_read(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + tsk->thread.address = address; + tsk->thread.error_code = write; + force_sig_fault(SIGSEGV, si_code, (void __user *)address, current); + return; + } + +no_context: + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + pr_alert("Unable to %s at vaddr: %08lx, epc: %08lx\n", + __func__, address, regs->pc); + die_if_kernel("Oops", regs, write); + +out_of_memory: + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + pagefault_out_of_memory(); + return; + +do_sigbus: + up_read(&mm->mmap_sem); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; + + tsk->thread.address = address; + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); +} diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c new file mode 100644 index 000000000000..e168ac087ccb --- /dev/null +++ b/arch/csky/mm/highmem.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/module.h> +#include <linux/highmem.h> +#include <linux/smp.h> +#include <linux/bootmem.h> +#include <asm/fixmap.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> + +static pte_t *kmap_pte; + +unsigned long highstart_pfn, highend_pfn; + +void *kmap(struct page *page) +{ + void *addr; + + might_sleep(); + if (!PageHighMem(page)) + return page_address(page); + addr = kmap_high(page); + flush_tlb_one((unsigned long)addr); + + return addr; +} +EXPORT_SYMBOL(kmap); + +void kunmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} +EXPORT_SYMBOL(kunmap); + +void *kmap_atomic(struct page *page) +{ + unsigned long vaddr; + int idx, type; + + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte - idx))); +#endif + set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); + flush_tlb_one((unsigned long)vaddr); + + return (void *)vaddr; +} +EXPORT_SYMBOL(kmap_atomic); + +void __kunmap_atomic(void *kvaddr) +{ + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + int idx; + + if (vaddr < FIXADDR_START) + goto out; + +#ifdef CONFIG_DEBUG_HIGHMEM + idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx(); + + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + + pte_clear(&init_mm, vaddr, kmap_pte - idx); + flush_tlb_one(vaddr); +#else + (void) idx; /* to kill a warning */ +#endif + kmap_atomic_idx_pop(); +out: + pagefault_enable(); + preempt_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); + +/* + * This is the same as kmap_atomic() but can map memory that doesn't + * have a struct page associated with it. + */ +void *kmap_atomic_pfn(unsigned long pfn) +{ + unsigned long vaddr; + int idx, type; + + pagefault_disable(); + + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); + flush_tlb_one(vaddr); + + return (void *) vaddr; +} + +struct page *kmap_atomic_to_page(void *ptr) +{ + unsigned long idx, vaddr = (unsigned long)ptr; + pte_t *pte; + + if (vaddr < FIXADDR_START) + return virt_to_page(ptr); + + idx = virt_to_fix(vaddr); + pte = kmap_pte - (idx - FIX_KMAP_BEGIN); + return pte_page(*pte); +} + +static void __init fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ +#ifdef CONFIG_HIGHMEM + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + int i, j, k; + unsigned long vaddr; + + vaddr = start; + i = __pgd_offset(vaddr); + j = __pud_offset(vaddr); + k = __pmd_offset(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { + pud = (pud_t *)pgd; + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { + pmd = (pmd_t *)pud; + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { + if (pmd_none(*pmd)) { + pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + set_pmd(pmd, __pmd(__pa(pte))); + BUG_ON(pte != pte_offset_kernel(pmd, 0)); + } + vaddr += PMD_SIZE; + } + k = 0; + } + j = 0; + } +#endif +} + +void __init fixaddr_kmap_pages_init(void) +{ + unsigned long vaddr; + pgd_t *pgd_base; +#ifdef CONFIG_HIGHMEM + pgd_t *pgd; + pmd_t *pmd; + pud_t *pud; + pte_t *pte; +#endif + pgd_base = swapper_pg_dir; + + /* + * Fixed mappings: + */ + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + fixrange_init(vaddr, 0, pgd_base); + +#ifdef CONFIG_HIGHMEM + /* + * Permanent kmaps: + */ + vaddr = PKMAP_BASE; + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + + pgd = swapper_pg_dir + __pgd_offset(vaddr); + pud = (pud_t *)pgd; + pmd = pmd_offset(pud, vaddr); + pte = pte_offset_kernel(pmd, vaddr); + pkmap_page_table = pte; +#endif +} + +void __init kmap_init(void) +{ + unsigned long vaddr; + + fixaddr_kmap_pages_init(); + + vaddr = __fix_to_virt(FIX_KMAP_BEGIN); + + kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); +} diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c new file mode 100644 index 000000000000..ce2711e050ad --- /dev/null +++ b/arch/csky/mm/init.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/bug.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/pagemap.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/bootmem.h> +#include <linux/highmem.h> +#include <linux/memblock.h> +#include <linux/swap.h> +#include <linux/proc_fs.h> +#include <linux/pfn.h> + +#include <asm/setup.h> +#include <asm/cachectl.h> +#include <asm/dma.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/mmu_context.h> +#include <asm/sections.h> +#include <asm/tlb.h> + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; +pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] + __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); + +void __init mem_init(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long tmp; + + max_mapnr = highend_pfn; +#else + max_mapnr = max_low_pfn; +#endif + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + + free_all_bootmem(); + +#ifdef CONFIG_HIGHMEM + for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { + struct page *page = pfn_to_page(tmp); + + /* FIXME not sure about */ + if (!memblock_is_reserved(tmp << PAGE_SHIFT)) + free_highmem_page(page); + } +#endif + mem_init_print_info(NULL); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (start < end) + pr_info("Freeing initrd memory: %ldk freed\n", + (end - start) >> 10); + + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + init_page_count(virt_to_page(start)); + free_page(start); + totalram_pages++; + } +} +#endif + +extern char __init_begin[], __init_end[]; + +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long) &__init_begin; + + while (addr < (unsigned long) &__init_end) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + free_page(addr); + totalram_pages++; + addr += PAGE_SIZE; + } + + pr_info("Freeing unused kernel memory: %dk freed\n", + ((unsigned int)&__init_end - (unsigned int)&__init_begin) >> 10); +} + +void pgd_init(unsigned long *p) +{ + int i; + + for (i = 0; i < PTRS_PER_PGD; i++) + p[i] = __pa(invalid_pte_table); +} + +void __init pre_mmu_init(void) +{ + /* + * Setup page-table and enable TLB-hardrefill + */ + flush_tlb_all(); + pgd_init((unsigned long *)swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); + TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); + + asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; + + /* Setup page mask to 4k */ + write_mmu_pagemask(0); +} diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c new file mode 100644 index 000000000000..7ad3ff103f4a --- /dev/null +++ b/arch/csky/mm/ioremap.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/io.h> + +#include <asm/pgtable.h> + +void __iomem *ioremap(phys_addr_t addr, size_t size) +{ + phys_addr_t last_addr; + unsigned long offset, vaddr; + struct vm_struct *area; + pgprot_t prot; + + last_addr = addr + size - 1; + if (!size || last_addr < addr) + return NULL; + + offset = addr & (~PAGE_MASK); + addr &= PAGE_MASK; + size = PAGE_ALIGN(size + offset); + + area = get_vm_area_caller(size, VM_ALLOC, __builtin_return_address(0)); + if (!area) + return NULL; + + vaddr = (unsigned long)area->addr; + + prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | + _PAGE_GLOBAL | _CACHE_UNCACHED); + + if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { + free_vm_area(area); + return NULL; + } + + return (void __iomem *)(vaddr + offset); +} +EXPORT_SYMBOL(ioremap); + +void iounmap(void __iomem *addr) +{ + vunmap((void *)((unsigned long)addr & PAGE_MASK)); +} +EXPORT_SYMBOL(iounmap); diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c new file mode 100644 index 000000000000..c4645e4e97f4 --- /dev/null +++ b/arch/csky/mm/syscache.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/syscalls.h> +#include <asm/page.h> +#include <asm/cache.h> +#include <asm/cachectl.h> + +SYSCALL_DEFINE3(cacheflush, + void __user *, addr, + unsigned long, bytes, + int, cache) +{ + switch (cache) { + case ICACHE: + icache_inv_range((unsigned long)addr, + (unsigned long)addr + bytes); + break; + case DCACHE: + dcache_wb_range((unsigned long)addr, + (unsigned long)addr + bytes); + break; + case BCACHE: + cache_wbinv_range((unsigned long)addr, + (unsigned long)addr + bytes); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c new file mode 100644 index 000000000000..08b8394e5b8f --- /dev/null +++ b/arch/csky/mm/tlb.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> + +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/setup.h> + +#define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE + +void flush_tlb_all(void) +{ + tlb_invalid_all(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + int cpu = smp_processor_id(); + + if (cpu_context(cpu, mm) != 0) + drop_mmu_context(mm, cpu); + + tlb_invalid_all(); +} + +#define restore_asid_inv_utlb(oldpid, newpid) \ +do { \ + if ((oldpid & ASID_MASK) == newpid) \ + write_mmu_entryhi(oldpid + 1); \ + write_mmu_entryhi(oldpid); \ +} while (0) + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int cpu = smp_processor_id(); + + if (cpu_context(cpu, mm) != 0) { + unsigned long size, flags; + int newpid = cpu_asid(cpu, mm); + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + size = (size + 1) >> 1; + if (size <= CSKY_TLB_SIZE/2) { + start &= (PAGE_MASK << 1); + end += ((PAGE_SIZE << 1) - 1); + end &= (PAGE_MASK << 1); +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vaas %0" + ::"r"(start | newpid)); + start += (PAGE_SIZE << 1); + } + sync_is(); +#else + { + int oldpid = read_mmu_entryhi(); + + while (start < end) { + int idx; + + write_mmu_entryhi(start | newpid); + start += (PAGE_SIZE << 1); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, newpid); + } +#endif + } else { + drop_mmu_context(mm, cpu); + } + local_irq_restore(flags); + } +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long size, flags; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size <= CSKY_TLB_SIZE) { + start &= (PAGE_MASK << 1); + end += ((PAGE_SIZE << 1) - 1); + end &= (PAGE_MASK << 1); +#ifdef CONFIG_CPU_HAS_TLBI + while (start < end) { + asm volatile("tlbi.vaas %0"::"r"(start)); + start += (PAGE_SIZE << 1); + } + sync_is(); +#else + { + int oldpid = read_mmu_entryhi(); + + while (start < end) { + int idx; + + write_mmu_entryhi(start); + start += (PAGE_SIZE << 1); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, 0); + } +#endif + } else { + flush_tlb_all(); + } + + local_irq_restore(flags); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + int cpu = smp_processor_id(); + int newpid = cpu_asid(cpu, vma->vm_mm); + + if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { + page &= (PAGE_MASK << 1); + +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.vaas %0"::"r"(page | newpid)); + sync_is(); +#else + { + int oldpid, idx; + unsigned long flags; + + local_irq_save(flags); + oldpid = read_mmu_entryhi(); + write_mmu_entryhi(page | newpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, newpid); + local_irq_restore(flags); + } +#endif + } +} + +/* + * Remove one kernel space TLB entry. This entry is assumed to be marked + * global so we don't do the ASID thing. + */ +void flush_tlb_one(unsigned long page) +{ + int oldpid; + + oldpid = read_mmu_entryhi(); + page &= (PAGE_MASK << 1); + +#ifdef CONFIG_CPU_HAS_TLBI + page = page | (oldpid & 0xfff); + asm volatile("tlbi.vaas %0"::"r"(page)); + sync_is(); +#else + { + int idx; + unsigned long flags; + + page = page | (oldpid & 0xff); + + local_irq_save(flags); + write_mmu_entryhi(page); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif +} +EXPORT_SYMBOL(flush_tlb_one); + +/* show current 32 jtlbs */ +void show_jtlb_table(void) +{ + unsigned long flags; + int entryhi, entrylo0, entrylo1; + int entry; + int oldpid; + + local_irq_save(flags); + entry = 0; + pr_info("\n\n\n"); + + oldpid = read_mmu_entryhi(); + while (entry < CSKY_TLB_SIZE) { + write_mmu_index(entry); + tlb_read(); + entryhi = read_mmu_entryhi(); + entrylo0 = read_mmu_entrylo0(); + entrylo0 = entrylo0; + entrylo1 = read_mmu_entrylo1(); + entrylo1 = entrylo1; + pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;" + " entrylo1 - 0x%x\n", + entry, entryhi, entrylo0, entrylo1); + entry++; + } + write_mmu_entryhi(oldpid); + local_irq_restore(flags); +} |