diff options
Diffstat (limited to 'arch/csky/include/asm')
34 files changed, 472 insertions, 434 deletions
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index 904a18a818be..3a5c7f6e5aac 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -1,8 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y := syscall_table_32.h + generic-y += asm-offsets.h generic-y += extable.h -generic-y += gpio.h generic-y += kvm_para.h +generic-y += mcs_spinlock.h generic-y += qrwlock.h +generic-y += qrwlock_types.h +generic-y += qspinlock.h +generic-y += parport.h generic-y += user.h generic-y += vmlinux.lds.h +generic-y += text-patching.h diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h new file mode 100644 index 000000000000..4dab44f6143a --- /dev/null +++ b/arch/csky/include/asm/atomic.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_ATOMIC_H +#define __ASM_CSKY_ATOMIC_H + +#ifdef CONFIG_SMP +#include <asm-generic/atomic64.h> + +#include <asm/cmpxchg.h> +#include <asm/barrier.h> + +#define __atomic_acquire_fence() __bar_brarw() + +#define __atomic_release_fence() __bar_brwaw() + +static __always_inline int arch_atomic_read(const atomic_t *v) +{ + return READ_ONCE(v->counter); +} +static __always_inline void arch_atomic_set(atomic_t *v, int i) +{ + WRITE_ONCE(v->counter, i); +} + +#define ATOMIC_OP(op) \ +static __always_inline \ +void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%2) \n" \ + " " #op " %0, %1 \n" \ + " stex.w %0, (%2) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp) \ + : "r" (i), "r" (&v->counter) \ + : "memory"); \ +} + +ATOMIC_OP(add) +ATOMIC_OP(sub) +ATOMIC_OP(and) +ATOMIC_OP( or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + +#define ATOMIC_FETCH_OP(op) \ +static __always_inline \ +int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + register int ret, tmp; \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%3) \n" \ + " mov %1, %0 \n" \ + " " #op " %0, %2 \n" \ + " stex.w %0, (%3) \n" \ + " bez %0, 1b \n" \ + : "=&r" (tmp), "=&r" (ret) \ + : "r" (i), "r"(&v->counter) \ + : "memory"); \ + return ret; \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static __always_inline \ +int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \ +} + +#define ATOMIC_OPS(op, c_op) \ + ATOMIC_FETCH_OP(op) \ + ATOMIC_OP_RETURN(op, c_op) + +ATOMIC_OPS(add, +) +ATOMIC_OPS(sub, -) + +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN + +#define ATOMIC_OPS(op) \ + ATOMIC_FETCH_OP(op) + +ATOMIC_OPS(and) +ATOMIC_OPS( or) +ATOMIC_OPS(xor) + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS + +#undef ATOMIC_FETCH_OP + +static __always_inline int +arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int prev, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%3) \n" + " cmpne %0, %4 \n" + " bf 2f \n" + " mov %1, %0 \n" + " add %1, %2 \n" + " stex.w %1, (%3) \n" + " bez %1, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (prev), "=&r" (tmp) + : "r" (a), "r" (&v->counter), "r" (u) + : "memory"); + + return prev; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless + +static __always_inline bool +arch_atomic_inc_unless_negative(atomic_t *v) +{ + int rc, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%2) \n" + " movi %1, 0 \n" + " blz %0, 2f \n" + " movi %1, 1 \n" + " addi %0, 1 \n" + " stex.w %0, (%2) \n" + " bez %0, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (tmp), "=&r" (rc) + : "r" (&v->counter) + : "memory"); + + return tmp ? true : false; + +} +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative + +static __always_inline bool +arch_atomic_dec_unless_positive(atomic_t *v) +{ + int rc, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%2) \n" + " movi %1, 0 \n" + " bhz %0, 2f \n" + " movi %1, 1 \n" + " subi %0, 1 \n" + " stex.w %0, (%2) \n" + " bez %0, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (tmp), "=&r" (rc) + : "r" (&v->counter) + : "memory"); + + return tmp ? true : false; +} +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive + +static __always_inline int +arch_atomic_dec_if_positive(atomic_t *v) +{ + int dec, tmp; + + __asm__ __volatile__ ( + RELEASE_FENCE + "1: ldex.w %0, (%2) \n" + " subi %1, %0, 1 \n" + " blz %1, 2f \n" + " stex.w %1, (%2) \n" + " bez %1, 1b \n" + FULL_FENCE + "2:\n" + : "=&r" (dec), "=&r" (tmp) + : "r" (&v->counter) + : "memory"); + + return dec - 1; +} +#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive + +#else +#include <asm-generic/atomic.h> +#endif + +#endif /* __ASM_CSKY_ATOMIC_H */ diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h index f4045dd53e17..15de58b10aec 100644 --- a/arch/csky/include/asm/barrier.h +++ b/arch/csky/include/asm/barrier.h @@ -37,17 +37,21 @@ * bar.brar * bar.bwaw */ +#define FULL_FENCE ".long 0x842fc000\n" +#define ACQUIRE_FENCE ".long 0x8427c000\n" +#define RELEASE_FENCE ".long 0x842ec000\n" + #define __bar_brw() asm volatile (".long 0x842cc000\n":::"memory") #define __bar_br() asm volatile (".long 0x8424c000\n":::"memory") #define __bar_bw() asm volatile (".long 0x8428c000\n":::"memory") #define __bar_arw() asm volatile (".long 0x8423c000\n":::"memory") #define __bar_ar() asm volatile (".long 0x8421c000\n":::"memory") #define __bar_aw() asm volatile (".long 0x8422c000\n":::"memory") -#define __bar_brwarw() asm volatile (".long 0x842fc000\n":::"memory") -#define __bar_brarw() asm volatile (".long 0x8427c000\n":::"memory") +#define __bar_brwarw() asm volatile (FULL_FENCE:::"memory") +#define __bar_brarw() asm volatile (ACQUIRE_FENCE:::"memory") #define __bar_bwarw() asm volatile (".long 0x842bc000\n":::"memory") #define __bar_brwar() asm volatile (".long 0x842dc000\n":::"memory") -#define __bar_brwaw() asm volatile (".long 0x842ec000\n":::"memory") +#define __bar_brwaw() asm volatile (RELEASE_FENCE:::"memory") #define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") #define __bar_brar() asm volatile (".long 0x8425c000\n":::"memory") #define __bar_bwaw() asm volatile (".long 0x842ac000\n":::"memory") @@ -56,7 +60,6 @@ #define __smp_rmb() __bar_brar() #define __smp_wmb() __bar_bwaw() -#define ACQUIRE_FENCE ".long 0x8427c000\n" #define __smp_acquire_fence() __bar_brarw() #define __smp_release_fence() __bar_brwaw() diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h index 91818787d860..80d67eee6e86 100644 --- a/arch/csky/include/asm/bitops.h +++ b/arch/csky/include/asm/bitops.h @@ -9,7 +9,7 @@ /* * asm-generic/bitops/ffs.h */ -static inline int ffs(int x) +static inline __attribute_const__ int ffs(int x) { if (!x) return 0; @@ -26,7 +26,7 @@ static inline int ffs(int x) /* * asm-generic/bitops/__ffs.h */ -static __always_inline unsigned long __ffs(unsigned long x) +static __always_inline __attribute_const__ unsigned long __ffs(unsigned long x) { asm volatile ( "brev %0\n" @@ -39,7 +39,7 @@ static __always_inline unsigned long __ffs(unsigned long x) /* * asm-generic/bitops/fls.h */ -static __always_inline int fls(unsigned int x) +static __always_inline __attribute_const__ int fls(unsigned int x) { asm volatile( "ff1 %0\n" @@ -52,14 +52,13 @@ static __always_inline int fls(unsigned int x) /* * asm-generic/bitops/__fls.h */ -static __always_inline unsigned long __fls(unsigned long x) +static __always_inline __attribute_const__ unsigned long __fls(unsigned long x) { return fls(x) - 1; } #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> -#include <asm-generic/bitops/find.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly @@ -74,7 +73,6 @@ static __always_inline unsigned long __fls(unsigned long x) * bug fix, why only could use atomic!!!! */ #include <asm-generic/bitops/non-atomic.h> -#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> diff --git a/arch/csky/include/asm/cachetype.h b/arch/csky/include/asm/cachetype.h new file mode 100644 index 000000000000..98cbe3af662f --- /dev/null +++ b/arch/csky/include/asm/cachetype.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_CSKY_CACHETYPE_H +#define __ASM_CSKY_CACHETYPE_H + +#include <linux/types.h> + +#define cpu_dcache_is_aliasing() true + +#endif diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h index d1bef11f8dc9..db6dda47184e 100644 --- a/arch/csky/include/asm/cmpxchg.h +++ b/arch/csky/include/asm/cmpxchg.h @@ -4,9 +4,9 @@ #define __ASM_CSKY_CMPXCHG_H #ifdef CONFIG_SMP +#include <linux/bug.h> #include <asm/barrier.h> - -extern void __bad_xchg(void); +#include <linux/cmpxchg-emu.h> #define __xchg_relaxed(new, ptr, size) \ ({ \ @@ -15,6 +15,26 @@ extern void __bad_xchg(void); __typeof__(*(ptr)) __ret; \ unsigned long tmp; \ switch (size) { \ + case 2: { \ + u32 ret; \ + u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \ + u32 mask = 0xffff << shif; \ + __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%4)\n" \ + " and %1, %0, %2\n" \ + " or %1, %1, %3\n" \ + " stex.w %1, (%4)\n" \ + " bez %1, 1b\n" \ + : "=&r" (ret), "=&r" (tmp) \ + : "r" (~mask), \ + "r" ((u32)__new << shif), \ + "r" (__ptr) \ + : "memory"); \ + __ret = (__typeof__(*(ptr))) \ + ((ret & mask) >> shif); \ + break; \ + } \ case 4: \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ @@ -26,7 +46,7 @@ extern void __bad_xchg(void); :); \ break; \ default: \ - __bad_xchg(); \ + BUILD_BUG(); \ } \ __ret; \ }) @@ -42,6 +62,9 @@ extern void __bad_xchg(void); __typeof__(old) __old = (old); \ __typeof__(*(ptr)) __ret; \ switch (size) { \ + case 1: \ + __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \ + break; \ case 4: \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ @@ -56,7 +79,7 @@ extern void __bad_xchg(void); :); \ break; \ default: \ - __bad_xchg(); \ + BUILD_BUG(); \ } \ __ret; \ }) @@ -64,15 +87,77 @@ extern void __bad_xchg(void); #define arch_cmpxchg_relaxed(ptr, o, n) \ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) -#define arch_cmpxchg(ptr, o, n) \ +#define __cmpxchg_acquire(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(new) __tmp; \ + __typeof__(old) __old = (old); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 1: \ + __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \ + break; \ + case 4: \ + asm volatile ( \ + "1: ldex.w %0, (%3) \n" \ + " cmpne %0, %4 \n" \ + " bt 2f \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + ACQUIRE_FENCE \ + "2: \n" \ + : "=&r" (__ret), "=&r" (__tmp) \ + : "r" (__new), "r"(__ptr), "r"(__old) \ + :); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg_acquire(ptr, o, n) \ + (__cmpxchg_acquire((ptr), (o), (n), sizeof(*(ptr)))) + +#define __cmpxchg(ptr, old, new, size) \ ({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(new) __tmp; \ + __typeof__(old) __old = (old); \ __typeof__(*(ptr)) __ret; \ - __smp_release_fence(); \ - __ret = arch_cmpxchg_relaxed(ptr, o, n); \ - __smp_acquire_fence(); \ + switch (size) { \ + case 1: \ + __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \ + break; \ + case 4: \ + asm volatile ( \ + RELEASE_FENCE \ + "1: ldex.w %0, (%3) \n" \ + " cmpne %0, %4 \n" \ + " bt 2f \n" \ + " mov %1, %2 \n" \ + " stex.w %1, (%3) \n" \ + " bez %1, 1b \n" \ + FULL_FENCE \ + "2: \n" \ + : "=&r" (__ret), "=&r" (__tmp) \ + : "r" (__new), "r"(__ptr), "r"(__old) \ + :); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ __ret; \ }) +#define arch_cmpxchg(ptr, o, n) \ + (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) + +#define arch_cmpxchg_local(ptr, o, n) \ + (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) #else #include <asm-generic/cmpxchg.h> #endif diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h index 9b86341731b6..00f9f7647e3f 100644 --- a/arch/csky/include/asm/ftrace.h +++ b/arch/csky/include/asm/ftrace.h @@ -7,8 +7,6 @@ #define HAVE_FUNCTION_GRAPH_FP_TEST -#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR - #define ARCH_SUPPORTS_FTRACE_OPS 1 #define MCOUNT_ADDR ((unsigned long)_mcount) @@ -26,5 +24,9 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) struct dyn_arch_ftrace { }; + +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer); + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_CSKY_FTRACE_H */ diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h index f82654053dc0..536d3bf32ff1 100644 --- a/arch/csky/include/asm/io.h +++ b/arch/csky/include/asm/io.h @@ -5,7 +5,6 @@ #include <linux/pgtable.h> #include <linux/types.h> -#include <linux/version.h> /* * I/O memory access primitives. Reads are ordered relative to any @@ -37,7 +36,7 @@ */ #define ioremap_wc(addr, size) \ ioremap_prot((addr), (size), \ - (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED) + __pgprot((_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)) #include <asm-generic/io.h> diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h index 33aaf39d6f94..d39fcc1f5395 100644 --- a/arch/csky/include/asm/irq_work.h +++ b/arch/csky/include/asm/irq_work.h @@ -7,5 +7,5 @@ static inline bool arch_irq_work_has_interrupt(void) { return true; } -extern void arch_irq_work_raise(void); + #endif /* __ASM_CSKY_IRQ_WORK_H */ diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h new file mode 100644 index 000000000000..ef2e37a10a0f --- /dev/null +++ b/arch/csky/include/asm/jump_label.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CSKY_JUMP_LABEL_H +#define __ASM_CSKY_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) +{ + asm goto( + "1: nop32 \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" + " .long 1b - ., %l[label] - . \n" + " .long %0 - . \n" + " .popsection \n" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) +{ + asm goto( + "1: bsr32 %l[label] \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" + " .long 1b - ., %l[label] - . \n" + " .long %0 - . \n" + " .popsection \n" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +enum jump_label_type; +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type); +#define arch_jump_label_transform_static arch_jump_label_transform_static + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_CSKY_JUMP_LABEL_H */ diff --git a/arch/csky/include/asm/kprobes.h b/arch/csky/include/asm/kprobes.h index b647bbde4d6d..55267cbf5204 100644 --- a/arch/csky/include/asm/kprobes.h +++ b/arch/csky/include/asm/kprobes.h @@ -41,7 +41,7 @@ void arch_remove_kprobe(struct kprobe *p); int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); int kprobe_breakpoint_handler(struct pt_regs *regs); int kprobe_single_step_handler(struct pt_regs *regs); -void kretprobe_trampoline(void); +void __kretprobe_trampoline(void); void __kprobes *trampoline_probe_handler(struct pt_regs *regs); #endif /* CONFIG_KPROBES */ diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h index ed7451478b1b..4911d0892b71 100644 --- a/arch/csky/include/asm/page.h +++ b/arch/csky/include/asm/page.h @@ -7,12 +7,8 @@ #include <asm/cache.h> #include <linux/const.h> -/* - * PAGE_SHIFT determines the page size: 4KB - */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE - 1)) +#include <vdso/page.h> + #define THREAD_SIZE (PAGE_SIZE * 2) #define THREAD_MASK (~(THREAD_SIZE - 1)) #define THREAD_SHIFT (PAGE_SHIFT + 1) @@ -34,12 +30,8 @@ #include <linux/pfn.h> -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) - #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ (void *)(kaddr) < high_memory) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) extern void *memset(void *dest, int c, size_t l); extern void *memcpy(void *to, const void *from, size_t l); @@ -47,9 +39,6 @@ extern void *memcpy(void *to, const void *from, size_t l); #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) -#define phys_to_page(paddr) (pfn_to_page(PFN_DOWN(paddr))) - struct page; #include <abi/page.h> @@ -81,6 +70,11 @@ extern unsigned long va_pa_offset; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return __pa(kaddr) >> PAGE_SHIFT; +} + #define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \ PHYS_OFFSET_OFFSET) #define virt_to_page(x) (mem_map + MAP_NR(x)) diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h index ebc765b1f78b..42724c630d30 100644 --- a/arch/csky/include/asm/pci.h +++ b/arch/csky/include/asm/pci.h @@ -9,26 +9,7 @@ #include <asm/io.h> -#define PCIBIOS_MIN_IO 0 -#define PCIBIOS_MIN_MEM 0 - -/* C-SKY shim does not initialize PCI bus */ -#define pcibios_assign_all_busses() 1 - -extern int isa_dma_bridge_buggy; - -#ifdef CONFIG_PCI -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - /* no legacy IRQ on csky */ - return -ENODEV; -} - -static inline int pci_proc_domain(struct pci_bus *bus) -{ - /* always show the domain in /proc */ - return 1; -} -#endif /* CONFIG_PCI */ +/* Generic PCI */ +#include <asm-generic/pci.h> #endif /* __ASM_CSKY_PCI_H */ diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index bbbd0698b397..9ed2b15ffd94 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -29,7 +29,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) pte_t *pte; unsigned long i; - pte = (pte_t *) __get_free_page(GFP_KERNEL); + pte = __pte_alloc_one_kernel(mm); if (!pte) return NULL; @@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *ret; pgd_t *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = __pgd_alloc(mm, 0); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long *)ret); @@ -61,11 +61,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) return ret; } -#define __pte_free_tlb(tlb, pte, address) \ -do { \ - pgtable_pte_page_dtor(pte); \ - tlb_remove_page(tlb, pte); \ -} while (0) +#define __pte_free_tlb(tlb, pte, address) \ + tlb_remove_ptdesc((tlb), page_ptdesc(pte)) extern void pagetable_init(void); extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn); diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 151607ed5158..d606afbabce1 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -18,18 +18,18 @@ /* * C-SKY is two-level paging structure: */ -#define PGD_ORDER 0 -#define PTE_ORDER 0 -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PMD 1 -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) +#define PFN_PTE_SHIFT PAGE_SHIFT +#define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT) #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) #define pte_clear(mm, addr, ptep) set_pte((ptep), \ (((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) @@ -76,24 +76,6 @@ #define MAX_SWAPFILES_CHECK() \ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5) -#define __P000 PAGE_NONE -#define __P001 PAGE_READ -#define __P010 PAGE_READ -#define __P011 PAGE_READ -#define __P100 PAGE_READ -#define __P101 PAGE_READ -#define __P110 PAGE_READ -#define __P111 PAGE_READ - -#define __S000 PAGE_NONE -#define __S001 PAGE_READ -#define __S010 PAGE_WRITE -#define __S011 PAGE_WRITE -#define __S100 PAGE_READ -#define __S101 PAGE_READ -#define __S110 PAGE_WRITE -#define __S111 PAGE_WRITE - extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) @@ -109,7 +91,6 @@ static inline void set_pte(pte_t *p, pte_t pte) /* prevent out of order excution */ smp_mb(); } -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline pte_t *pmd_page_vaddr(pmd_t pmd) { @@ -195,7 +176,7 @@ static inline pte_t pte_mkold(pte_t pte) return pte; } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; if (pte_val(pte) & _PAGE_MODIFIED) @@ -219,6 +200,23 @@ static inline pte_t pte_mkyoung(pte_t pte) return pte; } +static inline bool pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, @@ -251,11 +249,6 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | @@ -265,13 +258,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t *pte); - -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -#define kern_addr_valid(addr) (1) - -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *pte, unsigned int nr); +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) #endif /* __ASM_CSKY_PGTABLE_H */ diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 9e933021fe8e..e487a46d1c37 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -4,10 +4,9 @@ #define __ASM_CSKY_PROCESSOR_H #include <linux/bitops.h> -#include <asm/segment.h> +#include <linux/cache.h> #include <asm/ptrace.h> #include <asm/current.h> -#include <asm/cache.h> #include <abi/reg_ops.h> #include <abi/regdef.h> #include <abi/switch_context.h> @@ -59,7 +58,6 @@ struct thread_struct { */ #define start_thread(_regs, _pc, _usp) \ do { \ - set_fs(USER_DS); /* reads from user space */ \ (_regs)->pc = (_pc); \ (_regs)->regs[1] = 0; /* ABIV1 is R7, uClibc_main rtdl arg */ \ (_regs)->regs[2] = 0; \ @@ -71,17 +69,10 @@ do { \ /* Forward declaration, a strange C thing */ struct task_struct; -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - /* Prepare to copy thread state - unlazy all lazy status */ #define prepare_to_copy(tsk) do { } while (0) -extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - -unsigned long get_wchan(struct task_struct *p); +unsigned long __get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) @@ -91,4 +82,6 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() +register unsigned long current_stack_pointer __asm__("sp"); + #endif /* __ASM_CSKY_PROCESSOR_H */ diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h index 4202aab6df42..0634b7895d81 100644 --- a/arch/csky/include/asm/ptrace.h +++ b/arch/csky/include/asm/ptrace.h @@ -96,5 +96,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, return *(unsigned long *)((unsigned long)regs + offset); } +asmlinkage int syscall_trace_enter(struct pt_regs *regs); +asmlinkage void syscall_trace_exit(struct pt_regs *regs); #endif /* __ASSEMBLY__ */ #endif /* __ASM_CSKY_PTRACE_H */ diff --git a/arch/csky/include/asm/sections.h b/arch/csky/include/asm/sections.h new file mode 100644 index 000000000000..83e82b7c0f6c --- /dev/null +++ b/arch/csky/include/asm/sections.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_SECTIONS_H +#define __ASM_SECTIONS_H + +#include <asm-generic/sections.h> + +extern char _start[]; + +asmlinkage void csky_start(unsigned int unused, void *dtb_start); + +#endif /* __ASM_SECTIONS_H */ diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h deleted file mode 100644 index 5bc1cc62b87f..000000000000 --- a/arch/csky/include/asm/segment.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __ASM_CSKY_SEGMENT_H -#define __ASM_CSKY_SEGMENT_H - -typedef struct { - unsigned long seg; -} mm_segment_t; - -#endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/csky/include/asm/smp.h b/arch/csky/include/asm/smp.h index 668b79ce29ea..d3db334f3196 100644 --- a/arch/csky/include/asm/smp.h +++ b/arch/csky/include/asm/smp.h @@ -23,7 +23,7 @@ void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq); int __cpu_disable(void); -void __cpu_die(unsigned int cpu); +static inline void __cpu_die(unsigned int cpu) { } #endif /* CONFIG_SMP */ diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h index 69f5aa249c5f..83a2005341f5 100644 --- a/arch/csky/include/asm/spinlock.h +++ b/arch/csky/include/asm/spinlock.h @@ -3,87 +3,10 @@ #ifndef __ASM_CSKY_SPINLOCK_H #define __ASM_CSKY_SPINLOCK_H -#include <linux/spinlock_types.h> -#include <asm/barrier.h> - -/* - * Ticket-based spin-locking. - */ -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - arch_spinlock_t lockval; - u32 ticket_next = 1 << TICKET_NEXT; - u32 *p = &lock->lock; - u32 tmp; - - asm volatile ( - "1: ldex.w %0, (%2) \n" - " mov %1, %0 \n" - " add %0, %3 \n" - " stex.w %0, (%2) \n" - " bez %0, 1b \n" - : "=&r" (tmp), "=&r" (lockval) - : "r"(p), "r"(ticket_next) - : "cc"); - - while (lockval.tickets.next != lockval.tickets.owner) - lockval.tickets.owner = READ_ONCE(lock->tickets.owner); - - smp_mb(); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - u32 tmp, contended, res; - u32 ticket_next = 1 << TICKET_NEXT; - u32 *p = &lock->lock; - - do { - asm volatile ( - " ldex.w %0, (%3) \n" - " movi %2, 1 \n" - " rotli %1, %0, 16 \n" - " cmpne %1, %0 \n" - " bt 1f \n" - " movi %2, 0 \n" - " add %0, %0, %4 \n" - " stex.w %0, (%3) \n" - "1: \n" - : "=&r" (res), "=&r" (tmp), "=&r" (contended) - : "r"(p), "r"(ticket_next) - : "cc"); - } while (!res); - - if (!contended) - smp_mb(); - - return !contended; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - smp_mb(); - WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1); -} - -static inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return lock.tickets.owner == lock.tickets.next; -} - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - return !arch_spin_value_unlocked(READ_ONCE(*lock)); -} - -static inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - struct __raw_tickets tickets = READ_ONCE(lock->tickets); - - return (tickets.next - tickets.owner) > 1; -} -#define arch_spin_is_contended arch_spin_is_contended - +#include <asm/qspinlock.h> #include <asm/qrwlock.h> +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + #endif /* __ASM_CSKY_SPINLOCK_H */ diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h index 8ff0f6ff3a00..75bdf3af80ba 100644 --- a/arch/csky/include/asm/spinlock_types.h +++ b/arch/csky/include/asm/spinlock_types.h @@ -3,25 +3,7 @@ #ifndef __ASM_CSKY_SPINLOCK_TYPES_H #define __ASM_CSKY_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - -#define TICKET_NEXT 16 - -typedef struct { - union { - u32 lock; - struct __raw_tickets { - /* little endian */ - u16 owner; - u16 next; - } tickets; - }; -} arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } - +#include <asm-generic/qspinlock_types.h> #include <asm-generic/qrwlock_types.h> #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h index d7cd4e51edd9..d23747447166 100644 --- a/arch/csky/include/asm/stackprotector.h +++ b/arch/csky/include/asm/stackprotector.h @@ -2,9 +2,6 @@ #ifndef _ASM_STACKPROTECTOR_H #define _ASM_STACKPROTECTOR_H 1 -#include <linux/random.h> -#include <linux/version.h> - extern unsigned long __stack_chk_guard; /* @@ -15,12 +12,7 @@ extern unsigned long __stack_chk_guard; */ static __always_inline void boot_init_stack_canary(void) { - unsigned long canary; - - /* Try to get a semi random initial value. */ - get_random_bytes(&canary, sizeof(canary)); - canary ^= LINUX_VERSION_CODE; - canary &= CANARY_MASK; + unsigned long canary = get_random_canary(); current->stack_canary = canary; __stack_chk_guard = current->stack_canary; diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h index f624fa3bbc22..717f44b4d26f 100644 --- a/arch/csky/include/asm/syscall.h +++ b/arch/csky/include/asm/syscall.h @@ -63,9 +63,13 @@ static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, const unsigned long *args) { - regs->orig_a0 = args[0]; - args++; - memcpy(®s->a1, args, 5 * sizeof(regs->a1)); + memcpy(®s->a0, args, 6 * sizeof(regs->a0)); + /* + * Also copy the first argument into orig_a0 + * so that syscall_get_arguments() would return it + * instead of the previous value. + */ + regs->orig_a0 = regs->a0; } static inline int diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h index 8c349a8f904d..b5ed788f0c68 100644 --- a/arch/csky/include/asm/thread_info.h +++ b/arch/csky/include/asm/thread_info.h @@ -16,7 +16,6 @@ struct thread_info { unsigned long flags; int preempt_count; unsigned long tp_value; - mm_segment_t addr_limit; struct restart_block restart_block; struct pt_regs *regs; unsigned int cpu; @@ -26,7 +25,6 @@ struct thread_info { { \ .task = &tsk, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ .cpu = 0, \ .restart_block = { \ .fn = do_no_restart_syscall, \ diff --git a/arch/csky/include/asm/tlb.h b/arch/csky/include/asm/tlb.h index 3498e65f59f8..702861c68874 100644 --- a/arch/csky/include/asm/tlb.h +++ b/arch/csky/include/asm/tlb.h @@ -4,21 +4,6 @@ #define __ASM_CSKY_TLB_H #include <asm/cacheflush.h> - -#define tlb_start_vma(tlb, vma) \ - do { \ - if (!(tlb)->fullmm) \ - flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \ - } while (0) - -#define tlb_end_vma(tlb, vma) \ - do { \ - if (!(tlb)->fullmm) \ - flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \ - } while (0) - -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #endif /* __ASM_CSKY_TLB_H */ diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h index 421a4195e2fe..6bbbbe43165f 100644 --- a/arch/csky/include/asm/traps.h +++ b/arch/csky/include/asm/traps.h @@ -3,6 +3,8 @@ #ifndef __ASM_CSKY_TRAPS_H #define __ASM_CSKY_TRAPS_H +#include <linux/linkage.h> + #define VEC_RESET 0 #define VEC_ALIGN 1 #define VEC_ACCESS 2 @@ -40,4 +42,19 @@ do { \ void csky_alignment(struct pt_regs *regs); +asmlinkage void do_trap_unknown(struct pt_regs *regs); +asmlinkage void do_trap_zdiv(struct pt_regs *regs); +asmlinkage void do_trap_buserr(struct pt_regs *regs); +asmlinkage void do_trap_misaligned(struct pt_regs *regs); +asmlinkage void do_trap_bkpt(struct pt_regs *regs); +asmlinkage void do_trap_illinsn(struct pt_regs *regs); +asmlinkage void do_trap_fpe(struct pt_regs *regs); +asmlinkage void do_trap_priv(struct pt_regs *regs); +asmlinkage void trap_c(struct pt_regs *regs); + +asmlinkage void do_notify_resume(struct pt_regs *regs, + unsigned long thread_info_flags); + +asmlinkage void do_page_fault(struct pt_regs *regs); + #endif /* __ASM_CSKY_TRAPS_H */ diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h index ac83823fc437..2e927c21d8a1 100644 --- a/arch/csky/include/asm/uaccess.h +++ b/arch/csky/include/asm/uaccess.h @@ -3,17 +3,6 @@ #ifndef __ASM_CSKY_UACCESS_H #define __ASM_CSKY_UACCESS_H -#define user_addr_max() \ - (uaccess_kernel() ? KERNEL_DS.seg : get_fs().seg) - -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - unsigned long limit = current_thread_info()->addr_limit.seg; - - return ((addr < limit) && ((addr + size) < limit)); -} -#define __access_ok __access_ok - /* * __put_user_fn */ @@ -209,13 +198,6 @@ unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); unsigned long __clear_user(void __user *to, unsigned long n); #define __clear_user __clear_user -long __strncpy_from_user(char *dst, const char *src, long count); -#define __strncpy_from_user __strncpy_from_user - -long __strnlen_user(const char *s, long n); -#define __strnlen_user __strnlen_user - -#include <asm/segment.h> #include <asm-generic/uaccess.h> #endif /* __ASM_CSKY_UACCESS_H */ diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h index 9cf97de9a26d..2c2c24de95d8 100644 --- a/arch/csky/include/asm/unistd.h +++ b/arch/csky/include/asm/unistd.h @@ -2,4 +2,7 @@ #include <uapi/asm/unistd.h> +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_CLONE + #define NR_syscalls (__NR_syscalls) diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h index bdce581b5fcb..181a15edafe8 100644 --- a/arch/csky/include/asm/vdso.h +++ b/arch/csky/include/asm/vdso.h @@ -5,11 +5,6 @@ #include <linux/types.h> -#ifndef GENERIC_TIME_VSYSCALL -struct vdso_data { -}; -#endif - /* * The VDSO symbols are mapped into Linux so we can just use regular symbol * addressing to get their offsets in userspace. The symbols are mapped at an diff --git a/arch/csky/include/asm/vdso/clocksource.h b/arch/csky/include/asm/vdso/clocksource.h deleted file mode 100644 index dfca7b4724b7..000000000000 --- a/arch/csky/include/asm/vdso/clocksource.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __ASM_VDSO_CSKY_CLOCKSOURCE_H -#define __ASM_VDSO_CSKY_CLOCKSOURCE_H - -#define VDSO_ARCH_CLOCKMODES \ - VDSO_CLOCKMODE_ARCHTIMER - -#endif /* __ASM_VDSO_CSKY_CLOCKSOURCE_H */ diff --git a/arch/csky/include/asm/vdso/gettimeofday.h b/arch/csky/include/asm/vdso/gettimeofday.h deleted file mode 100644 index 6c4f1446944f..000000000000 --- a/arch/csky/include/asm/vdso/gettimeofday.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __ASM_VDSO_CSKY_GETTIMEOFDAY_H -#define __ASM_VDSO_CSKY_GETTIMEOFDAY_H - -#ifndef __ASSEMBLY__ - -#include <asm/barrier.h> -#include <asm/unistd.h> -#include <abi/regdef.h> -#include <uapi/linux/time.h> - -#define VDSO_HAS_CLOCK_GETRES 1 - -static __always_inline -int gettimeofday_fallback(struct __kernel_old_timeval *_tv, - struct timezone *_tz) -{ - register struct __kernel_old_timeval *tv asm("a0") = _tv; - register struct timezone *tz asm("a1") = _tz; - register long ret asm("a0"); - register long nr asm(syscallid) = __NR_gettimeofday; - - asm volatile ("trap 0\n" - : "=r" (ret) - : "r"(tv), "r"(tz), "r"(nr) - : "memory"); - - return ret; -} - -static __always_inline -long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) -{ - register clockid_t clkid asm("a0") = _clkid; - register struct __kernel_timespec *ts asm("a1") = _ts; - register long ret asm("a0"); - register long nr asm(syscallid) = __NR_clock_gettime64; - - asm volatile ("trap 0\n" - : "=r" (ret) - : "r"(clkid), "r"(ts), "r"(nr) - : "memory"); - - return ret; -} - -static __always_inline -long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) -{ - register clockid_t clkid asm("a0") = _clkid; - register struct old_timespec32 *ts asm("a1") = _ts; - register long ret asm("a0"); - register long nr asm(syscallid) = __NR_clock_gettime; - - asm volatile ("trap 0\n" - : "=r" (ret) - : "r"(clkid), "r"(ts), "r"(nr) - : "memory"); - - return ret; -} - -static __always_inline -int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) -{ - register clockid_t clkid asm("a0") = _clkid; - register struct __kernel_timespec *ts asm("a1") = _ts; - register long ret asm("a0"); - register long nr asm(syscallid) = __NR_clock_getres_time64; - - asm volatile ("trap 0\n" - : "=r" (ret) - : "r"(clkid), "r"(ts), "r"(nr) - : "memory"); - - return ret; -} - -static __always_inline -int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) -{ - register clockid_t clkid asm("a0") = _clkid; - register struct old_timespec32 *ts asm("a1") = _ts; - register long ret asm("a0"); - register long nr asm(syscallid) = __NR_clock_getres; - - asm volatile ("trap 0\n" - : "=r" (ret) - : "r"(clkid), "r"(ts), "r"(nr) - : "memory"); - - return ret; -} - -uint64_t csky_pmu_read_cc(void); -static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, - const struct vdso_data *vd) -{ -#ifdef CONFIG_CSKY_PMU_V1 - return csky_pmu_read_cc(); -#else - return 0; -#endif -} - -static __always_inline const struct vdso_data *__arch_get_vdso_data(void) -{ - return _vdso_data; -} - -#endif /* !__ASSEMBLY__ */ - -#endif /* __ASM_VDSO_CSKY_GETTIMEOFDAY_H */ diff --git a/arch/csky/include/asm/vdso/processor.h b/arch/csky/include/asm/vdso/processor.h deleted file mode 100644 index 39a6b561d0cc..000000000000 --- a/arch/csky/include/asm/vdso/processor.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -#ifndef __ASM_VDSO_CSKY_PROCESSOR_H -#define __ASM_VDSO_CSKY_PROCESSOR_H - -#ifndef __ASSEMBLY__ - -#define cpu_relax() barrier() - -#endif /* __ASSEMBLY__ */ - -#endif /* __ASM_VDSO_CSKY_PROCESSOR_H */ diff --git a/arch/csky/include/asm/vdso/vsyscall.h b/arch/csky/include/asm/vdso/vsyscall.h deleted file mode 100644 index c276211a7c4d..000000000000 --- a/arch/csky/include/asm/vdso/vsyscall.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __ASM_VDSO_CSKY_VSYSCALL_H -#define __ASM_VDSO_CSKY_VSYSCALL_H - -#ifndef __ASSEMBLY__ - -#include <vdso/datapage.h> - -extern struct vdso_data *vdso_data; - -static __always_inline struct vdso_data *__csky_get_k_vdso_data(void) -{ - return vdso_data; -} -#define __arch_get_k_vdso_data __csky_get_k_vdso_data - -#include <asm-generic/vdso/vsyscall.h> - -#endif /* !__ASSEMBLY__ */ - -#endif /* __ASM_VDSO_CSKY_VSYSCALL_H */ |
