diff options
Diffstat (limited to 'arch/hexagon/include/asm')
25 files changed, 222 insertions, 447 deletions
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 84bb1ed1b931..3ece3c93fe08 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -1,39 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -generic-y += barrier.h -generic-y += bug.h -generic-y += bugs.h -generic-y += compat.h -generic-y += current.h -generic-y += device.h -generic-y += div64.h -generic-y += dma-mapping.h -generic-y += emergency-restart.h generic-y += extable.h -generic-y += fb.h -generic-y += ftrace.h -generic-y += hardirq.h -generic-y += hw_irq.h generic-y += iomap.h -generic-y += irq_regs.h -generic-y += irq_work.h -generic-y += kdebug.h -generic-y += kmap_types.h -generic-y += kprobes.h generic-y += kvm_para.h -generic-y += local.h -generic-y += local64.h generic-y += mcs_spinlock.h -generic-y += mm-arch-hooks.h -generic-y += mmiowb.h -generic-y += pci.h -generic-y += percpu.h -generic-y += preempt.h -generic-y += sections.h -generic-y += serial.h -generic-y += shmparam.h -generic-y += topology.h -generic-y += trace_clock.h -generic-y += unaligned.h -generic-y += vga.h -generic-y += word-at-a-time.h -generic-y += xor.h diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 12cd9231c4b8..2447d083c432 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -12,11 +12,9 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> -#define ATOMIC_INIT(i) { (i) } - /* Normal writes in our arch don't clear lock reservations */ -static inline void atomic_set(atomic_t *v, int new) +static inline void arch_atomic_set(atomic_t *v, int new) { asm volatile( "1: r6 = memw_locked(%0);\n" @@ -28,62 +26,12 @@ static inline void atomic_set(atomic_t *v, int new) ); } -#define atomic_set_release(v, i) atomic_set((v), (i)) - -/** - * atomic_read - reads a word, atomically - * @v: pointer to atomic value - * - * Assumes all word reads on our architecture are atomic. - */ -#define atomic_read(v) READ_ONCE((v)->counter) - -/** - * atomic_xchg - atomic - * @v: pointer to memory to change - * @new: new value (technically passed in a register -- see xchg) - */ -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) - +#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) -/** - * atomic_cmpxchg - atomic compare-and-exchange values - * @v: pointer to value to change - * @old: desired old value to match - * @new: new value to put in - * - * Parameters are then pointer, value-in-register, value-in-register, - * and the output is the old value. - * - * Apparently this is complicated for archs that don't support - * the memw_locked like we do (or it's broken or whatever). - * - * Kind of the lynchpin of the rest of the generically defined routines. - * Remember V2 had that bug with dotnew predicate set by memw_locked. - * - * "old" is "expected" old val, __oldval is actual old value - */ -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int __oldval; - - asm volatile( - "1: %0 = memw_locked(%1);\n" - " { P0 = cmp.eq(%0,%2);\n" - " if (!P0.new) jump:nt 2f; }\n" - " memw_locked(%1,P0) = %3;\n" - " if (!P0) jump 1b;\n" - "2:\n" - : "=&r" (__oldval) - : "r" (&v->counter), "r" (old), "r" (new) - : "memory", "p0" - ); - - return __oldval; -} +#define arch_atomic_read(v) READ_ONCE((v)->counter) #define ATOMIC_OP(op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ int output; \ \ @@ -91,7 +39,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -99,7 +47,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ int output; \ \ @@ -107,7 +55,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: %0 = memw_locked(%1);\n" \ " %0 = "#op "(%0,%2);\n" \ " memw_locked(%1,P3)=%0;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -116,7 +64,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ int output, val; \ \ @@ -124,7 +72,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ "1: %0 = memw_locked(%2);\n" \ " %1 = "#op "(%0,%3);\n" \ " memw_locked(%2,P3)=%1;\n" \ - " if !P3 jump 1b;\n" \ + " if (!P3) jump 1b;\n" \ : "=&r" (output), "=&r" (val) \ : "r" (&v->counter), "r" (i) \ : "memory", "p3" \ @@ -137,6 +85,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -144,22 +97,16 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/** - * atomic_fetch_add_unless - add unless the number is a given value - * @v: pointer to value - * @a: amount to add - * @u: unless value is equal to u - * - * Returns old value. - * - */ - -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int __oldval; register int tmp; @@ -173,7 +120,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) " }" " memw_locked(%2, p3) = %1;" " {" - " if !p3 jump 1b;" + " if (!p3) jump 1b;" " }" "2:" : "=&r" (__oldval), "=&r" (tmp) @@ -182,6 +129,6 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) ); return __oldval; } -#define atomic_fetch_add_unless atomic_fetch_add_unless +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #endif diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 47384b094b94..160d8f37fa1a 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) "1: R12 = memw_locked(R10);\n" " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" " memw_locked(R10,P1) = R12;\n" - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" : "=&r" (oldval) : "r" (addr), "r" (nr) : "r10", "r11", "r12", "p0", "p1", "memory" @@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr) * be atomic, particularly for things like slab_lock and slab_unlock. * */ -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { test_and_clear_bit(nr, addr); } -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { test_and_set_bit(nr, addr); } -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { test_and_change_bit(nr, addr); } /* Apparently, at least some of these are allowed to be non-atomic */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_clear_bit(nr, addr); } -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { return test_and_change_bit(nr, addr); } -static inline int __test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { int retval; @@ -172,7 +179,20 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr) return retval; } -#define test_bit(nr, addr) __test_bit(nr, addr) +static __always_inline bool +arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0", "memory" + ); + + return retval; +} /* * ffz - find first zero in word. @@ -223,7 +243,7 @@ static inline int ffs(int x) int r; asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" : "=&r" (r) : "r" (x) : "p0"); @@ -271,7 +291,7 @@ static inline unsigned long __fls(unsigned long word) } #include <asm-generic/bitops/lock.h> -#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/non-instrumented-non-atomic.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h index fb447de45d54..bfff514a81c8 100644 --- a/arch/hexagon/include/asm/cacheflush.h +++ b/arch/hexagon/include/asm/cacheflush.h @@ -18,36 +18,24 @@ * - flush_cache_range(vma, start, end) flushes a range of pages * - flush_icache_range(start, end) flush a range of instructions * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache - * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache + * - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache * * Need to doublecheck which one is really needed for ptrace stuff to work. */ #define LINESIZE 32 #define LINEBITS 5 -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_icache_page(vma, pg) do { } while (0) -#define flush_icache_user_range(vma, pg, adr, len) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) - /* * Flush Dcache range through current map. */ extern void flush_dcache_range(unsigned long start, unsigned long end); +#define flush_dcache_range flush_dcache_range /* * Flush Icache range through current map. */ extern void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range /* * Memory-management related flushes are there to ensure in non-physically @@ -70,14 +58,19 @@ extern void flush_cache_all_hexagon(void); * clean the cache when the PTE is set. * */ -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) { /* generic_ptrace_pokedata doesn't wind up here, does it? */ } +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) + void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len); +#define copy_to_user_page copy_to_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) @@ -85,4 +78,6 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end); extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end); +#include <asm-generic/cacheflush.h> + #endif diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h index a5c42f4614c1..4bc6ad96c4c5 100644 --- a/arch/hexagon/include/asm/checksum.h +++ b/arch/hexagon/include/asm/checksum.h @@ -10,17 +10,6 @@ unsigned int do_csum(const void *voidptr, int len); /* - * the same as csum_partial, but copies from src while it - * checksums - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ -#define csum_partial_copy_nocheck csum_partial_copy_nocheck -__wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); - -/* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index 6091322c3af9..bf6cf5579cf4 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -9,7 +9,7 @@ #define _ASM_CMPXCHG_H /* - * __xchg - atomically exchange a register and a memory location + * __arch_xchg - atomically exchange a register and a memory location * @x: value to swap * @ptr: pointer to memory * @size: size of the value @@ -19,8 +19,8 @@ * Note: there was an errata for V2 about .new's and memw_locked. * */ -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, - int size) +static inline unsigned long +__arch_xchg(unsigned long x, volatile void *ptr, int size) { unsigned long retval; @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __asm__ __volatile__ ( "1: %0 = memw_locked(%1);\n" /* load into retval */ " memw_locked(%1,P0) = %2;\n" /* store into memory */ - " if !P0 jump 1b;\n" + " if (!P0) jump 1b;\n" : "=&r" (retval) : "r" (ptr), "r" (x) : "memory", "p0" @@ -42,8 +42,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, * Atomically swap the contents of a register with memory. Should be atomic * between multiple CPU's and within interrupts on the same CPU. */ -#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ - sizeof(*(ptr)))) +#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \ + sizeof(*(ptr)))) /* * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps. @@ -51,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, * variable casting. */ -#define cmpxchg(ptr, old, new) \ +#define arch_cmpxchg(ptr, old, new) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(ptr)) __old = (old); \ diff --git a/arch/hexagon/include/asm/elf.h b/arch/hexagon/include/asm/elf.h index 9efa203e1164..5bfdd9b147fd 100644 --- a/arch/hexagon/include/asm/elf.h +++ b/arch/hexagon/include/asm/elf.h @@ -181,7 +181,6 @@ do { \ */ #define ELF_PLAT_INIT(regs, load_addr) do { } while (0) -#define USE_ELF_CORE_DUMP #define CORE_DUMP_USE_REGSET /* Hrm is this going to cause problems for changing PAGE_SIZE? */ diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h index 933dac167504..920660a04aa4 100644 --- a/arch/hexagon/include/asm/fixmap.h +++ b/arch/hexagon/include/asm/fixmap.h @@ -15,8 +15,4 @@ #include <asm-generic/fixmap.h> -#define kmap_get_fixmap_pte(vaddr) \ - pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \ - (vaddr)), (vaddr)), (vaddr)) - #endif diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h index cb635216a732..9fb00a0ae89f 100644 --- a/arch/hexagon/include/asm/futex.h +++ b/arch/hexagon/include/asm/futex.h @@ -16,12 +16,12 @@ /* For example: %1 = %4 */ \ insn \ "2: memw_locked(%3,p2) = %1;\n" \ - " if !p2 jump 1b;\n" \ + " if (!p2) jump 1b;\n" \ " %1 = #0;\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: %1 = #%5;\n" \ - " jump 3b\n" \ + " jump ##3b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ ".long 1b,4b,2b,4b\n" \ @@ -36,7 +36,8 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { int oldval = 0, ret; - pagefault_disable(); + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; switch (op) { case FUTEX_OP_SET: @@ -62,8 +63,6 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) ret = -ENOSYS; } - pagefault_enable(); - if (!ret) *oval = oldval; @@ -84,14 +83,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, "1: %1 = memw_locked(%3)\n" " {\n" " p2 = cmp.eq(%1,%4)\n" - " if !p2.new jump:NT 3f\n" + " if (!p2.new) jump:NT 3f\n" " }\n" "2: memw_locked(%3,p2) = %5\n" - " if !p2 jump 1b\n" + " if (!p2) jump 1b\n" "3:\n" ".section .fixup,\"ax\"\n" "4: %0 = #%6\n" - " jump 3b\n" + " jump ##3b\n" ".previous\n" ".section __ex_table,\"a\"\n" ".long 1b,4b,2b,4b\n" diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 539e3efcf39c..522d321ea85a 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -27,8 +27,6 @@ extern int remap_area_pages(unsigned long start, unsigned long phys_addr, unsigned long end, unsigned long flags); -extern void iounmap(const volatile void __iomem *addr); - /* Defined in lib/io.c, needed for smc91x driver. */ extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen); @@ -61,13 +59,6 @@ static inline void *phys_to_virt(unsigned long address) } /* - * convert a physical pointer to a virtual kernel pointer for - * /dev/mem access. - */ -#define xlate_dev_kmem_ptr(p) __va(p) -#define xlate_dev_mem_ptr(p) __va(p) - -/* * IO port access primitives. Hexagon doesn't have special IO access * instructions; all I/O is memory mapped. * @@ -171,9 +162,11 @@ static inline void writel(u32 data, volatile void __iomem *addr) #define writew_relaxed __raw_writew #define writel_relaxed __raw_writel -void __iomem *ioremap(unsigned long phys_addr, unsigned long size); -#define ioremap_nocache ioremap - +/* + * I/O memory mapping functions. + */ +#define _PAGE_IOREMAP (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + (__HEXAGON_C_DEV << 6)) #define __raw_writel writel @@ -309,6 +302,31 @@ static inline void outsl(unsigned long port, const void *buffer, int count) } } +/* + * These defines are necessary to use the generic io.h for filling in + * the missing parts of the API contract. This is because the platform + * uses (inline) functions rather than defines and the generic helper + * fills in the undefined. + */ +#define virt_to_phys virt_to_phys +#define phys_to_virt phys_to_virt +#define memset_io memset_io +#define memcpy_fromio memcpy_fromio +#define memcpy_toio memcpy_toio +#define readb readb +#define readw readw +#define readl readl +#define writeb writeb +#define writew writew +#define writel writel +#define insb insb +#define insw insw +#define insl insl +#define outsb outsb +#define outsw outsw +#define outsl outsl +#include <asm-generic/io.h> + #endif /* __KERNEL__ */ #endif diff --git a/arch/hexagon/include/asm/irq.h b/arch/hexagon/include/asm/irq.h index 1f7f1292f701..a60d26754caa 100644 --- a/arch/hexagon/include/asm/irq.h +++ b/arch/hexagon/include/asm/irq.h @@ -20,4 +20,7 @@ #include <asm-generic/irq.h> +struct pt_regs; +void arch_do_IRQ(struct pt_regs *); + #endif diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h index cdc4adc0300a..81947764c47d 100644 --- a/arch/hexagon/include/asm/mmu_context.h +++ b/arch/hexagon/include/asm/mmu_context.h @@ -15,39 +15,13 @@ #include <asm/pgalloc.h> #include <asm/mem-layout.h> -static inline void destroy_context(struct mm_struct *mm) -{ -} - /* * VM port hides all TLB management, so "lazy TLB" isn't very * meaningful. Even for ports to architectures with visble TLBs, * this is almost invariably a null function. + * + * mm->context is set up by pgd_alloc, so no init_new_context required. */ -static inline void enter_lazy_tlb(struct mm_struct *mm, - struct task_struct *tsk) -{ -} - -/* - * Architecture-specific actions, if any, for memory map deactivation. - */ -static inline void deactivate_mm(struct task_struct *tsk, - struct mm_struct *mm) -{ -} - -/** - * init_new_context - initialize context related info for new mm_struct instance - * @tsk: pointer to a task struct - * @mm: pointer to a new mm struct - */ -static inline int init_new_context(struct task_struct *tsk, - struct mm_struct *mm) -{ - /* mm->context is set up by pgd_alloc */ - return 0; -} /* * Switch active mm context @@ -74,6 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* * Activate new memory map for task */ +#define activate_mm activate_mm static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { unsigned long flags; @@ -86,4 +61,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) /* Generic hooks for arch_dup_mmap and arch_exit_mmap */ #include <asm-generic/mm_hooks.h> +#include <asm-generic/mmu_context.h> + #endif diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h index ee31f36f48f3..8a6af57274c2 100644 --- a/arch/hexagon/include/asm/page.h +++ b/arch/hexagon/include/asm/page.h @@ -13,27 +13,22 @@ /* This is probably not the most graceful way to handle this. */ #ifdef CONFIG_PAGE_SIZE_4KB -#define PAGE_SHIFT 12 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB #endif #ifdef CONFIG_PAGE_SIZE_16KB -#define PAGE_SHIFT 14 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB #endif #ifdef CONFIG_PAGE_SIZE_64KB -#define PAGE_SHIFT 16 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB #endif #ifdef CONFIG_PAGE_SIZE_256KB -#define PAGE_SHIFT 18 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB #endif #ifdef CONFIG_PAGE_SIZE_1MB -#define PAGE_SHIFT 20 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB #endif @@ -50,6 +45,7 @@ #define HVM_HUGEPAGE_SIZE 0x5 #endif +#define PAGE_SHIFT CONFIG_PAGE_SHIFT #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) @@ -78,6 +74,9 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) +/* Needed for PAGE_OFFSET used in the macro right below */ +#include <asm/mem-layout.h> + /* * We need a __pa and a __va routine for kernel space. * MIPS says they're only used during mem_init. @@ -93,10 +92,8 @@ struct page; #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) /* Default vm area behavior is non-executable. */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC -#define pfn_valid(pfn) ((pfn) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) /* Need to not use a define for linesize; may move this to another file. */ @@ -127,18 +124,13 @@ static inline void clear_page(void *page) */ #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) +static inline unsigned long virt_to_pfn(const void *kaddr) +{ + return __pa(kaddr) >> PAGE_SHIFT; +} #define page_to_virt(page) __va(page_to_phys(page)) -/* - * For port to Hexagon Virtual Machine, MAYBE we check for attempts - * to reference reserved HVM space, but in any case, the VM will be - * protected. - */ -#define kern_addr_valid(addr) (1) - #include <asm/mem-layout.h> #include <asm-generic/memory_model.h> /* XXX Todo: implement assembly-optimized version of getorder. */ diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index cc9be514a676..55988625e6fb 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h @@ -11,7 +11,7 @@ #include <asm/mem-layout.h> #include <asm/atomic.h> -#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ +#include <asm-generic/pgalloc.h> extern unsigned long long kmap_generation; @@ -41,11 +41,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long) pgd); -} - static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) { @@ -92,10 +87,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, max_kernel_seg = pmdindex; } -#define __pte_free_tlb(tlb, pte, addr) \ -do { \ - pgtable_pte_page_dtor((pte)); \ - tlb_remove_page((tlb), (pte)); \ +#define __pte_free_tlb(tlb, pte, addr) \ +do { \ + pagetable_pte_dtor((page_ptdesc(pte))); \ + tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \ } while (0) #endif diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 2fec20ad939e..8c5b7a1c3d90 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -12,7 +12,6 @@ * Page table definitions for Qualcomm Hexagon processor. */ #include <asm/page.h> -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> /* A handy thing to have if one has the RAM. Declared in head.S */ @@ -62,6 +61,9 @@ extern unsigned long empty_zero_page; * So we'll put up with a bit of inefficiency for now... */ +/* We borrow bit 6 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE (1<<6) + /* * Top "FOURTH" level (pgd), which for the Hexagon VM is really * only the second from the bottom, pgd and pud both being collapsed. @@ -127,40 +129,8 @@ extern unsigned long _dflt_cache_att; */ #define CACHEDEF (CACHE_DEFAULT << 6) -/* Private (copy-on-write) page protections. */ -#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) -#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) -#define __P010 __P000 /* Write-only copy-on-write */ -#define __P011 __P001 /* Read/Write copy-on-write */ -#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ - _PAGE_READ | CACHEDEF) -#define __P110 __P100 /* Write/execute copy-on-write */ -#define __P111 __P101 /* Read/Write/Execute, copy-on-write */ - -/* Shared page protections. */ -#define __S000 __P000 -#define __S001 __P001 -#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_WRITE | CACHEDEF) -#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_WRITE | CACHEDEF) -#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __S101 __P101 -#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) -#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ -/* Seems to be zero even in architectures where the zero page is firewalled? */ -#define FIRST_USER_ADDRESS 0UL -#define pte_special(pte) 0 -#define pte_mkspecial(pte) (pte) - /* HUGETLB not working currently */ #ifdef CONFIG_HUGETLB_PAGE #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) @@ -209,33 +179,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_val(*ptep) = _NULL_PTE; } -#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL -/** - * pmd_index - returns the index of the entry in the PMD page - * which would control the given virtual address - */ -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) - -#endif - -/** - * pgd_index - returns the index of the entry in the PGD page - * which would control the given virtual address - * - * This returns the *index* for the address in the pgd_t - */ -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) - -/* - * pgd_offset - find an offset in a page-table-directory - */ -#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) - -/* - * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr - */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - /** * pmd_none - check if pmd_entry is mapped * @pmd_entry: pmd entry @@ -269,10 +212,14 @@ static inline int pmd_bad(pmd_t pmd) } /* + * pmd_pfn - converts a PMD entry to a page frame number + */ +#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) + +/* * pmd_page - converts a PMD entry to a page pointer */ #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) -#define pmd_pgtable(pmd) pmd_page(pmd) /** * pte_none - check if pte is mapped @@ -353,7 +300,7 @@ static inline pte_t pte_wrprotect(pte_t pte) } /* pte_mkwrite - mark page as writable */ -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite_novma(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; @@ -391,6 +338,7 @@ static inline int pte_exec(pte_t pte) /* __swp_entry_to_pte - extract PTE from swap entry */ #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#define PFN_PTE_SHIFT PAGE_SHIFT /* pfn_pte - convert page number and protection value to page table entry */ #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) @@ -398,43 +346,21 @@ static inline int pte_exec(pte_t pte) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -/* - * set_pte_at - update page table and do whatever magic may be - * necessary to make the underlying hardware/firmware take note. - * - * VM may require a virtual instruction to alert the MMU. - */ -#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte) - -/* - * May need to invoke the virtual machine as well... - */ -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) - -/* - * pte_offset_map - returns the linear address of the page table entry - * corresponding to an address - */ -#define pte_offset_map(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) - -#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr) - -/* pte_offset_kernel - kernel version of pte_offset */ -#define pte_offset_kernel(dir, address) \ - ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \ - + __pte_offset(address)) +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK); +} /* ZERO_PAGE - returns the globally shared zero page */ #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) -#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) - /* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is * interpreted as swap information. The remaining free bits are interpreted as - * swap type/offset tuple. Rather than have the TLB fill handler test + * listed below. Rather than have the TLB fill handler test * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to * all zeros for swap entries, which speeds up the miss handler at the cost of * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon @@ -444,9 +370,10 @@ static inline int pte_exec(pte_t pte) * Format of swap PTE: * bit 0: Present (zero) * bits 1-5: swap type (arch independent layer uses 5 bits max) - * bits 6-9: bits 3:0 of offset + * bit 6: exclusive marker + * bits 7-9: bits 2:0 of offset * bits 10-12: effectively _PAGE_PROTNONE (all zero) - * bits 13-31: bits 22:4 of swap offset + * bits 13-31: bits 21:3 of swap offset * * The split offset makes some of the following macros a little gnarly, * but there's plenty of precedent for this sort of thing. @@ -456,14 +383,28 @@ static inline int pte_exec(pte_t pte) #define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f) #define __swp_offset(swp_pte) \ - ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0)) + ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x3ffff8)) #define __swp_entry(type, offset) \ ((swp_entry_t) { \ - ((type << 1) | \ - ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) }) + (((type & 0x1f) << 1) | \ + ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) }) + +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} -/* Oh boy. There are a lot of possible arch overrides found in this file. */ -#include <asm-generic/pgtable.h> +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} #endif diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h index 9f0cc99420be..0cd39c2cdf8f 100644 --- a/arch/hexagon/include/asm/processor.h +++ b/arch/hexagon/include/asm/processor.h @@ -60,11 +60,7 @@ struct thread_struct { #define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk))) #define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk))) -/* Free all resources held by a thread; defined in process.c */ -extern void release_thread(struct task_struct *dead_task); - -/* Get wait channel for task P. */ -extern unsigned long get_wchan(struct task_struct *p); +extern unsigned long __get_wchan(struct task_struct *p); /* The following stuff is pretty HEXAGON specific. */ diff --git a/arch/hexagon/include/asm/ptrace.h b/arch/hexagon/include/asm/ptrace.h new file mode 100644 index 000000000000..ed35da1ee685 --- /dev/null +++ b/arch/hexagon/include/asm/ptrace.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Ptrace definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_HEXAGON_PTRACE_H +#define _ASM_HEXAGON_PTRACE_H + +#include <uapi/asm/ptrace.h> + +/* kprobe-based event tracer support */ +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); + +#define current_pt_regs() \ + ((struct pt_regs *) \ + ((unsigned long)current_thread_info() + THREAD_SIZE) - 1) + +#if CONFIG_HEXAGON_ARCH_VERSION >= 4 +#define arch_has_single_step() (1) +#endif + +#endif diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h index bfe07d842ff3..ef103b73bec8 100644 --- a/arch/hexagon/include/asm/spinlock.h +++ b/arch/hexagon/include/asm/spinlock.h @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) "1: R6 = memw_locked(%0);\n" " R6 = add(R6,#-1);\n" " memw_locked(%0,P3) = R6\n" - " if !P3 jump 1b;\n" + " if (!P3) jump 1b;\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " { %0 = P3 }\n" "1:\n" @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0)\n" " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1)\n" " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" - " { if !P3 jump 1f; }\n" + " { if (!P3) jump 1f; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: R6 = memw_locked(%0);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1b; R6 = #1; }\n" + " { if (!P3) jump 1b; R6 = #1; }\n" " memw_locked(%0,P3) = R6;\n" - " { if !P3 jump 1b; }\n" + " { if (!P3) jump 1b; }\n" : : "r" (&lock->lock) : "memory", "r6", "p3" @@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) __asm__ __volatile__( " R6 = memw_locked(%1);\n" " P3 = cmp.eq(R6,#0);\n" - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" " memw_locked(%1,P3) = R6;\n" " %0 = P3;\n" "1:\n" diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h index 19d233497ba5..d5f66495b670 100644 --- a/arch/hexagon/include/asm/spinlock_types.h +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -8,7 +8,7 @@ #ifndef _ASM_SPINLOCK_TYPES_H #define _ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H # error "please don't include this file directly" #endif diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h index 563da1986464..e90f280b9ce3 100644 --- a/arch/hexagon/include/asm/thread_info.h +++ b/arch/hexagon/include/asm/thread_info.h @@ -22,10 +22,6 @@ #ifndef __ASSEMBLY__ -typedef struct { - unsigned long seg; -} mm_segment_t; - /* * This is union'd with the "bottom" of the kernel stack. * It keeps track of thread info which is handy for routines @@ -37,7 +33,6 @@ struct thread_info { unsigned long flags; /* low level flags */ __u32 cpu; /* current cpu */ int preempt_count; /* 0=>preemptible,<0=>BUG */ - mm_segment_t addr_limit; /* segmentation sux */ /* * used for syscalls somehow; * seems to have a function pointer and four arguments @@ -66,7 +61,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = 1, \ - .addr_limit = KERNEL_DS, \ .sp = 0, \ .regs = NULL, \ } @@ -95,6 +89,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */ #define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 17 /* OOM killer killed process */ @@ -103,6 +98,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG); #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) /* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */ #define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE) diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h deleted file mode 100644 index ee6c61423a05..000000000000 --- a/arch/hexagon/include/asm/timer-regs.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Timer support for Hexagon - * - * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. - */ - -#ifndef _ASM_TIMER_REGS_H -#define _ASM_TIMER_REGS_H - -/* This stuff should go into a platform specific file */ -#define TCX0_CLK_RATE 19200 -#define TIMER_ENABLE 0 -#define TIMER_CLR_ON_MATCH 1 - -/* - * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until - * release 1.1, and then it's "adjustable" and probably not defaulted. - */ -#define RTOS_TIMER_INT 3 -#ifdef CONFIG_HEXAGON_COMET -#define RTOS_TIMER_REGS_ADDR 0xAB000000UL -#endif -#define SLEEP_CLK_RATE 32000 - -#endif diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h index 78338d8ada83..dfe69e118b2b 100644 --- a/arch/hexagon/include/asm/timex.h +++ b/arch/hexagon/include/asm/timex.h @@ -7,16 +7,16 @@ #define _ASM_TIMEX_H #include <asm-generic/timex.h> -#include <asm/timer-regs.h> +#include <asm/hexagon_vm.h> /* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */ -#define CLOCK_TICK_RATE TCX0_CLK_RATE +#define CLOCK_TICK_RATE 19200 #define ARCH_HAS_READ_CURRENT_TIMER static inline int read_current_timer(unsigned long *timer_val) { - *timer_val = (unsigned long) __vmgettime(); + *timer_val = __vmgettime(); return 0; } diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index 00cb38faad0c..bff77efc0d9a 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -10,35 +10,9 @@ /* * User space memory access functions */ -#include <linux/mm.h> #include <asm/sections.h> /* - * access_ok: - Checks if a user space pointer is valid - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block *may* be valid, false (zero) - * if it is definitely invalid. - * - * User address space in Hexagon, like x86, goes to 0xbfffffff, so the - * simple MSB-based tests used by MIPS won't work. Some further - * optimization is probably possible here, but for now, keep it - * reasonably simple and not *too* slow. After all, we've got the - * MMU for backup. - */ - -#define __access_ok(addr, size) \ - ((get_fs().seg == KERNEL_DS.seg) || \ - (((unsigned long)addr < get_fs().seg) && \ - (unsigned long)size < (get_fs().seg - (unsigned long)addr))) - -/* * When a kernel-mode page fault is taken, the faulting instruction * address is checked against a table of exception_table_entries. * Each entry is a tuple of the address of an instruction that may @@ -58,38 +32,7 @@ unsigned long raw_copy_to_user(void __user *to, const void *from, __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count); #define __clear_user(a, s) __clear_user_hexagon((a), (s)) -#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n) - -/* get around the ifndef in asm-generic/uaccess.h */ -#define __strnlen_user __strnlen_user - -extern long __strnlen_user(const char __user *src, long n); - -static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, - long n); - #include <asm-generic/uaccess.h> -/* Todo: an actual accelerated version of this. */ -static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, - long n) -{ - long res = __strnlen_user(src, n); - - if (unlikely(!res)) - return -EFAULT; - - if (res > n) { - long left = raw_copy_from_user(dst, src, n); - if (unlikely(left)) - memset(dst + (n - left), 0, left); - return n; - } else { - long left = raw_copy_from_user(dst, src, res); - if (unlikely(left)) - memset(dst + (res - left), 0, left); - return res-1; - } -} #endif diff --git a/arch/hexagon/include/asm/module.h b/arch/hexagon/include/asm/vermagic.h index e8de4fe03543..0e8dedc8c486 100644 --- a/arch/hexagon/include/asm/module.h +++ b/arch/hexagon/include/asm/vermagic.h @@ -3,11 +3,11 @@ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. */ -#ifndef _ASM_MODULE_H -#define _ASM_MODULE_H +#ifndef _ASM_VERMAGIC_H +#define _ASM_VERMAGIC_H -#include <asm-generic/module.h> +#include <linux/stringify.h> #define MODULE_ARCH_VERMAGIC __stringify(PROCESSOR_MODEL_NAME) " " -#endif +#endif /* _ASM_VERMAGIC_H */ diff --git a/arch/hexagon/include/asm/vmalloc.h b/arch/hexagon/include/asm/vmalloc.h new file mode 100644 index 000000000000..7b04609e525c --- /dev/null +++ b/arch/hexagon/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_HEXAGON_VMALLOC_H +#define _ASM_HEXAGON_VMALLOC_H + +#endif /* _ASM_HEXAGON_VMALLOC_H */ |