From b7e5a591502b03b4a1408bb93a15968d6ea446ce Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Mon, 20 Nov 2017 10:33:09 -0800 Subject: RISC-V: Remove __vdso_cmpxchg{32,64} symbol versions These were left over from an earlier version of the port. Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vdso/vdso.lds.S | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index 8c9dce95c11d..3ac08eebd11d 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -70,8 +70,6 @@ VERSION LINUX_4.15 { global: __vdso_rt_sigreturn; - __vdso_cmpxchg32; - __vdso_cmpxchg64; local: *; }; } -- cgit From 28dfbe6ed483e8a589cce88095d7787d61bf9c16 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 23 Oct 2017 15:42:14 -0700 Subject: RISC-V: Add VDSO entries for clock_get/gettimeofday/getcpu For now these are just placeholders that execute the syscall. We will later optimize them to avoid kernel crossings, but we'd like to have the VDSO entries from the first released kernel version to make the ABI simpler. Signed-off-by: Andrew Waterman Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vdso/Makefile | 6 +++++- arch/riscv/kernel/vdso/clock_getres.S | 26 ++++++++++++++++++++++++++ arch/riscv/kernel/vdso/clock_gettime.S | 26 ++++++++++++++++++++++++++ arch/riscv/kernel/vdso/getcpu.S | 26 ++++++++++++++++++++++++++ arch/riscv/kernel/vdso/gettimeofday.S | 26 ++++++++++++++++++++++++++ arch/riscv/kernel/vdso/vdso.lds.S | 4 ++++ 6 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/kernel/vdso/clock_getres.S create mode 100644 arch/riscv/kernel/vdso/clock_gettime.S create mode 100644 arch/riscv/kernel/vdso/getcpu.S create mode 100644 arch/riscv/kernel/vdso/gettimeofday.S diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 523d0a8ac8db..2dcc4f3070bc 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -1,7 +1,11 @@ # Copied from arch/tile/kernel/vdso/Makefile # Symbols present in the vdso -vdso-syms = rt_sigreturn +vdso-syms = rt_sigreturn +vdso-syms += gettimeofday +vdso-syms += clock_gettime +vdso-syms += clock_getres +vdso-syms += getcpu # Files to link into the vdso obj-vdso = $(patsubst %, %.o, $(vdso-syms)) diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S new file mode 100644 index 000000000000..edf7e2339648 --- /dev/null +++ b/arch/riscv/kernel/vdso/clock_getres.S @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + + .text +/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */ +ENTRY(__vdso_clock_getres) + .cfi_startproc + /* For now, just do the syscall. */ + li a7, __NR_clock_getres + ecall + ret + .cfi_endproc +ENDPROC(__vdso_clock_getres) diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S new file mode 100644 index 000000000000..aac65676c6d5 --- /dev/null +++ b/arch/riscv/kernel/vdso/clock_gettime.S @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + + .text +/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */ +ENTRY(__vdso_clock_gettime) + .cfi_startproc + /* For now, just do the syscall. */ + li a7, __NR_clock_gettime + ecall + ret + .cfi_endproc +ENDPROC(__vdso_clock_gettime) diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S new file mode 100644 index 000000000000..cc7e98924484 --- /dev/null +++ b/arch/riscv/kernel/vdso/getcpu.S @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + + .text +/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */ +ENTRY(__vdso_getcpu) + .cfi_startproc + /* For now, just do the syscall. */ + li a7, __NR_getcpu + ecall + ret + .cfi_endproc +ENDPROC(__vdso_getcpu) diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S new file mode 100644 index 000000000000..da85d33e8990 --- /dev/null +++ b/arch/riscv/kernel/vdso/gettimeofday.S @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + + .text +/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */ +ENTRY(__vdso_gettimeofday) + .cfi_startproc + /* For now, just do the syscall. */ + li a7, __NR_gettimeofday + ecall + ret + .cfi_endproc +ENDPROC(__vdso_gettimeofday) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index 3ac08eebd11d..c7543c6a00f9 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -70,6 +70,10 @@ VERSION LINUX_4.15 { global: __vdso_rt_sigreturn; + __vdso_gettimeofday; + __vdso_clock_gettime; + __vdso_clock_getres; + __vdso_getcpu; local: *; }; } -- cgit From 4650d02ad2d9b2c1c7aa36055166db6aee68f72e Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 14 Nov 2017 11:35:37 -0800 Subject: RISC-V: Remove unused arguments from ATOMIC_OP Our atomics are generated from a complicated series of preprocessor macros, each of which is slightly different from the last. When writing the macros I'd accidentally left some unused arguments floating around. This patch removes the unused macro arguments. Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/atomic.h | 94 ++++++++++++++++++++--------------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index e2e37c57cbeb..40c73dd59c15 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i) * have the AQ or RL bits set. These don't return anything, so there's only * one version to worry about. */ -#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \ -static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ -{ \ - __asm__ __volatile__ ( \ - "amo" #asm_op "." #asm_type " zero, %1, %0" \ - : "+A" (v->counter) \ - : "r" (I) \ - : "memory"); \ +#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \ +static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ +{ \ + __asm__ __volatile__ ( \ + "amo" #asm_op "." #asm_type " zero, %1, %0" \ + : "+A" (v->counter) \ + : "r" (I) \ + : "memory"); \ } #ifdef CONFIG_GENERIC_ATOMIC64 -#define ATOMIC_OPS(op, asm_op, c_op, I) \ - ATOMIC_OP (op, asm_op, c_op, I, w, int, ) +#define ATOMIC_OPS(op, asm_op, I) \ + ATOMIC_OP (op, asm_op, I, w, int, ) #else -#define ATOMIC_OPS(op, asm_op, c_op, I) \ - ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \ - ATOMIC_OP (op, asm_op, c_op, I, d, long, 64) +#define ATOMIC_OPS(op, asm_op, I) \ + ATOMIC_OP (op, asm_op, I, w, int, ) \ + ATOMIC_OP (op, asm_op, I, d, long, 64) #endif -ATOMIC_OPS(add, add, +, i) -ATOMIC_OPS(sub, add, +, -i) -ATOMIC_OPS(and, and, &, i) -ATOMIC_OPS( or, or, |, i) -ATOMIC_OPS(xor, xor, ^, i) +ATOMIC_OPS(add, add, i) +ATOMIC_OPS(sub, add, -i) +ATOMIC_OPS(and, and, i) +ATOMIC_OPS( or, or, i) +ATOMIC_OPS(xor, xor, i) #undef ATOMIC_OP #undef ATOMIC_OPS @@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^, i) * There's two flavors of these: the arithmatic ops have both fetch and return * versions, while the logical ops only have fetch versions. */ -#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \ +#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix) \ static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \ { \ register c_type ret; \ @@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato #ifdef CONFIG_GENERIC_ATOMIC64 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ - ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ + ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \ ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) #else #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ - ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ + ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \ ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ - ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \ + ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, d, long, 64) \ ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) #endif @@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl, ) #undef ATOMIC_OPS #ifdef CONFIG_GENERIC_ATOMIC64 -#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ - ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) +#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \ + ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) #else -#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ - ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ - ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) +#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \ + ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) \ + ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64) #endif -ATOMIC_OPS(and, and, &, i, , _relaxed) -ATOMIC_OPS(and, and, &, i, .aq , _acquire) -ATOMIC_OPS(and, and, &, i, .rl , _release) -ATOMIC_OPS(and, and, &, i, .aqrl, ) +ATOMIC_OPS(and, and, i, , _relaxed) +ATOMIC_OPS(and, and, i, .aq , _acquire) +ATOMIC_OPS(and, and, i, .rl , _release) +ATOMIC_OPS(and, and, i, .aqrl, ) -ATOMIC_OPS( or, or, |, i, , _relaxed) -ATOMIC_OPS( or, or, |, i, .aq , _acquire) -ATOMIC_OPS( or, or, |, i, .rl , _release) -ATOMIC_OPS( or, or, |, i, .aqrl, ) +ATOMIC_OPS( or, or, i, , _relaxed) +ATOMIC_OPS( or, or, i, .aq , _acquire) +ATOMIC_OPS( or, or, i, .rl , _release) +ATOMIC_OPS( or, or, i, .aqrl, ) -ATOMIC_OPS(xor, xor, ^, i, , _relaxed) -ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire) -ATOMIC_OPS(xor, xor, ^, i, .rl , _release) -ATOMIC_OPS(xor, xor, ^, i, .aqrl, ) +ATOMIC_OPS(xor, xor, i, , _relaxed) +ATOMIC_OPS(xor, xor, i, .aq , _acquire) +ATOMIC_OPS(xor, xor, i, .rl , _release) +ATOMIC_OPS(xor, xor, i, .aqrl, ) #undef ATOMIC_OPS @@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add, <, 0) #undef ATOMIC_OP #undef ATOMIC_OPS -#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \ +#define ATOMIC_OP(op, func_op, I, c_type, prefix) \ static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \ { \ atomic##prefix##_##func_op(I, v); \ } -#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \ +#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \ static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \ { \ return atomic##prefix##_fetch_##func_op(I, v); \ @@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t #ifdef CONFIG_GENERIC_ATOMIC64 #define ATOMIC_OPS(op, asm_op, c_op, I) \ - ATOMIC_OP (op, asm_op, c_op, I, int, ) \ - ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ + ATOMIC_OP (op, asm_op, I, int, ) \ + ATOMIC_FETCH_OP (op, asm_op, I, int, ) \ ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) #else #define ATOMIC_OPS(op, asm_op, c_op, I) \ - ATOMIC_OP (op, asm_op, c_op, I, int, ) \ - ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ + ATOMIC_OP (op, asm_op, I, int, ) \ + ATOMIC_FETCH_OP (op, asm_op, I, int, ) \ ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \ - ATOMIC_OP (op, asm_op, c_op, I, long, 64) \ - ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \ + ATOMIC_OP (op, asm_op, I, long, 64) \ + ATOMIC_FETCH_OP (op, asm_op, I, long, 64) \ ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64) #endif -- cgit From 8286d51a6c244738aeb071fcd7d2e36a3374e150 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:02:50 -0800 Subject: RISC-V: Comment on why {,cmp}xchg is ordered how it is This is another memory model FIXME. Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/atomic.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 40c73dd59c15..e65d1cd89e28 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v) /* * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as - * {cmp,}xchg and the operations that return, so they need a barrier. We just - * use the other implementations directly. + * {cmp,}xchg and the operations that return, so they need a barrier. + */ +/* + * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by + * assigning the same barrier to both the LR and SC operations, but that might + * not make any sense. We're waiting on a memory model specification to + * determine exactly what the right thing to do is here. */ #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \ static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \ -- cgit From 61a60d35b7d1b0b3a31bc21d15805a3654f60920 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:03:48 -0800 Subject: RISC-V: Remove __smp_bp__{before,after}_atomic These duplicate the asm-generic definitions are therefor aren't useful. Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/barrier.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 183534b7c39b..455ee16127fb 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -38,21 +38,6 @@ #define smp_rmb() RISCV_FENCE(r,r) #define smp_wmb() RISCV_FENCE(w,w) -/* - * These fences exist to enforce ordering around the relaxed AMOs. The - * documentation defines that - * " - * atomic_fetch_add(); - * is equivalent to: - * smp_mb__before_atomic(); - * atomic_fetch_add_relaxed(); - * smp_mb__after_atomic(); - * " - * So we emit full fences on both sides. - */ -#define __smb_mb__before_atomic() smp_mb() -#define __smb_mb__after_atomic() smp_mb() - /* * These barriers prevent accesses performed outside a spinlock from being moved * inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only -- cgit From 3343eb6806f365b9e3d451040671fa9336e57513 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:03:55 -0800 Subject: RISC-V: Remove smb_mb__{before,after}_spinlock() These are obselete. Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/barrier.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 455ee16127fb..773c4e039cd7 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -38,14 +38,6 @@ #define smp_rmb() RISCV_FENCE(r,r) #define smp_wmb() RISCV_FENCE(w,w) -/* - * These barriers prevent accesses performed outside a spinlock from being moved - * inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only - * enforce release consistency, we need full fences here. - */ -#define smb_mb__before_spinlock() smp_mb() -#define smb_mb__after_spinlock() smp_mb() - #include #endif /* __ASSEMBLY__ */ -- cgit From 9347ce54cd699db92d37e66191aa4b9a0a92304e Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:04:05 -0800 Subject: RISC-V: __test_and_op_bit_ord should be strongly ordered I mis-read the documentation. After looking at it again the documentation is actually as clear as it can be, it's just that I didn't actually read it in order and therefor did the wrong thing. Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 7c281ef1d583..f30daf26f08f 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -67,7 +67,7 @@ : "memory"); #define __test_and_op_bit(op, mod, nr, addr) \ - __test_and_op_bit_ord(op, mod, nr, addr, ) + __test_and_op_bit_ord(op, mod, nr, addr, .aqrl) #define __op_bit(op, mod, nr, addr) \ __op_bit_ord(op, mod, nr, addr, ) -- cgit From 21db403660d1433b8a02b26d5d4084921b857c40 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:05:04 -0800 Subject: RISC-V: Add READ_ONCE in arch_spin_is_locked() This was just incorrect in the original version. Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/spinlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h index 04c71d938afd..a6a005c4f2fb 100644 --- a/arch/riscv/include/asm/spinlock.h +++ b/arch/riscv/include/asm/spinlock.h @@ -24,7 +24,7 @@ /* FIXME: Replace this with a ticket lock, like MIPS. */ -#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) static inline void arch_spin_unlock(arch_spinlock_t *lock) { -- cgit From c901e45a999a1935d7adf653e1cf12dfbcd737aa Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:06:17 -0800 Subject: RISC-V: `sfence.vma` orderes the instruction cache This is just a comment change, but it's one that bit me on the mailing list. It turns out that issuing a `sfence.vma` enforces instruction cache ordering in addition to TLB ordering. This isn't explicitly called out in the ISA manual, but Andrew will be making that more clear in a future revision. CC: Andrew Waterman Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/tlbflush.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 5ee4ae370b5e..c79fab3d377d 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -17,7 +17,10 @@ #ifdef CONFIG_MMU -/* Flush entire local TLB */ +/* + * Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction + * cache as well, so a 'fence.i' is not necessary. + */ static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); -- cgit From bf730552734372e45b10fe056726de1950fdfdde Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:06:31 -0800 Subject: RISC-V: remove spin_unlock_wait() This was removed from the other architectures in commit 952111d7db02 ("arch: Remove spin_unlock_wait() arch-specific definitions"). That landed between when we got upstream and when our patches were reviewed, so this is a followup patch. Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/spinlock.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h index a6a005c4f2fb..2fd27e8ef1fd 100644 --- a/arch/riscv/include/asm/spinlock.h +++ b/arch/riscv/include/asm/spinlock.h @@ -58,15 +58,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) } } -static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - smp_rmb(); - do { - cpu_relax(); - } while (arch_spin_is_locked(lock)); - smp_acquire__after_ctrl_dep(); -} - /***********************************************************/ static inline void arch_read_lock(arch_rwlock_t *lock) -- cgit From 5ddf755e4439833847a21bd5e2dc82d686679911 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:12 -0800 Subject: RISC-V: use generic serial.h Fixes this from allmodconfig: drivers/tty/serial/earlycon.c:27:10: fatal error: asm/serial.h: No such file or directory Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/Kbuild | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 18158be62a2b..970460a0b492 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -40,6 +40,7 @@ generic-y += resource.h generic-y += scatterlist.h generic-y += sections.h generic-y += sembuf.h +generic-y += serial.h generic-y += setup.h generic-y += shmbuf.h generic-y += shmparam.h -- cgit From 5e6f82b0fe7b7b4a204efeb0817fb8b0a2bc0373 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:13 -0800 Subject: RISC-V: use RISCV_{INT,SHORT} instead of {INT,SHORT} for asm macros INT and SHORT are used by some drivers that pull in the include files, so prefixing helps avoid namespace conflicts. Other constructs in the same file already uses this. Fixes, among others, these warnings with allmodconfig: ../sound/core/pcm_misc.c:43:0: warning: "INT" redefined #define INT __force int Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/asm.h | 12 ++++++------ arch/riscv/include/asm/bug.h | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 6cbbb6a68d76..5ad4cb622bed 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -58,17 +58,17 @@ #endif #if (__SIZEOF_INT__ == 4) -#define INT __ASM_STR(.word) -#define SZINT __ASM_STR(4) -#define LGINT __ASM_STR(2) +#define RISCV_INT __ASM_STR(.word) +#define RISCV_SZINT __ASM_STR(4) +#define RISCV_LGINT __ASM_STR(2) #else #error "Unexpected __SIZEOF_INT__" #endif #if (__SIZEOF_SHORT__ == 2) -#define SHORT __ASM_STR(.half) -#define SZSHORT __ASM_STR(2) -#define LGSHORT __ASM_STR(1) +#define RISCV_SHORT __ASM_STR(.half) +#define RISCV_SZSHORT __ASM_STR(2) +#define RISCV_LGSHORT __ASM_STR(1) #else #error "Unexpected __SIZEOF_SHORT__" #endif diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h index c3e13764a943..bfc7f099ab1f 100644 --- a/arch/riscv/include/asm/bug.h +++ b/arch/riscv/include/asm/bug.h @@ -27,8 +27,8 @@ typedef u32 bug_insn_t; #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS -#define __BUG_ENTRY_ADDR INT " 1b - 2b" -#define __BUG_ENTRY_FILE INT " %0 - 2b" +#define __BUG_ENTRY_ADDR RISCV_INT " 1b - 2b" +#define __BUG_ENTRY_FILE RISCV_INT " %0 - 2b" #else #define __BUG_ENTRY_ADDR RISCV_PTR " 1b" #define __BUG_ENTRY_FILE RISCV_PTR " %0" @@ -38,7 +38,7 @@ typedef u32 bug_insn_t; #define __BUG_ENTRY \ __BUG_ENTRY_ADDR "\n\t" \ __BUG_ENTRY_FILE "\n\t" \ - SHORT " %1" + RISCV_SHORT " %1" #else #define __BUG_ENTRY \ __BUG_ENTRY_ADDR -- cgit From fe2726af9fdc93ecf2469b7c512fc1a8936e128c Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:14 -0800 Subject: RISC-V: io.h: type fixes for warnings include for __iomem definition. Also, add volatile to iounmap() like other architectures have it to avoid "discarding volatile" warnings from some drivers. Finally, explicitly promote the base address for INB/OUTB functions to avoid some old legacy drivers complaining about int-to-ptr promotions. The drivers are unlikely to work but they're included in allmodconfig so the warnings are noisy. Fixes, among other warnings, these with allmodconfig: ../arch/riscv/include/asm/io.h:24:21: error: expected '=', ',', ';', 'asm' or '__attribute__' before '*' token extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); sound/pci/echoaudio/echoaudio.c: In function 'snd_echo_free': sound/pci/echoaudio/echoaudio.c:1879:10: warning: passing argument 1 of 'iounmap' discards 'volatile' qualifier from pointer target type [-Wdiscarded-qualifiers] Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/io.h | 16 +++++++++------- arch/riscv/mm/ioremap.c | 2 +- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index c1f32cfcc79b..2a5c7c371e2d 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -19,6 +19,8 @@ #ifndef _ASM_RISCV_IO_H #define _ASM_RISCV_IO_H +#include + #ifdef CONFIG_MMU extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); @@ -32,7 +34,7 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); #define ioremap_wc(addr, size) ioremap((addr), (size)) #define ioremap_wt(addr, size) ioremap((addr), (size)) -extern void iounmap(void __iomem *addr); +extern void iounmap(volatile void __iomem *addr); #endif /* CONFIG_MMU */ @@ -266,9 +268,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar()) __io_reads_ins(ins, u8, b, __io_pbr(), __io_par()) __io_reads_ins(ins, u16, w, __io_pbr(), __io_par()) __io_reads_ins(ins, u32, l, __io_pbr(), __io_par()) -#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count) -#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count) -#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count) +#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) +#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) +#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) __io_writes_outs(writes, u8, b, __io_bw(), __io_aw()) __io_writes_outs(writes, u16, w, __io_bw(), __io_aw()) @@ -280,9 +282,9 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw()) __io_writes_outs(outs, u8, b, __io_pbw(), __io_paw()) __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw()) __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) -#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count) -#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count) -#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count) +#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count) +#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count) +#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) #ifdef CONFIG_64BIT __io_reads_ins(reads, u64, q, __io_br(), __io_ar()) diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c index e99194a4077e..70ef2724cdf6 100644 --- a/arch/riscv/mm/ioremap.c +++ b/arch/riscv/mm/ioremap.c @@ -85,7 +85,7 @@ EXPORT_SYMBOL(ioremap); * * Caller must ensure there is only one unmapping for the same pointer. */ -void iounmap(void __iomem *addr) +void iounmap(volatile void __iomem *addr) { vunmap((void *)((unsigned long)addr & PAGE_MASK)); } -- cgit From 83e7b8769a08987f47117f3a065de153c839a0a8 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:15 -0800 Subject: RISC-V: move empty_zero_page definition to C and export it Needed by some modules (exported by other architectures). Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/head.S | 3 --- arch/riscv/kernel/setup.c | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 76af908f87c1..78f670d70133 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -152,6 +152,3 @@ END(_start) __PAGE_ALIGNED_BSS /* Empty zero page */ .balign PAGE_SIZE -ENTRY(empty_zero_page) - .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00 -END(empty_zero_page) diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index de7db114c315..4375375ad168 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -60,6 +60,9 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; unsigned long va_pa_offset; unsigned long pfn_base; +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); + /* The lucky hart to first increment this variable will boot the other cores */ atomic_t hart_lottery; -- cgit From 24948b7ec0f31d36dc900088b140c4f9551b6f56 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:16 -0800 Subject: RISC-V: Export some expected symbols for modules These are the ones needed by current allmodconfig, so add them instead of everything other architectures are exporting -- the rest can be added on demand later if needed. Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/riscv_ksyms.c | 3 +++ arch/riscv/kernel/setup.c | 2 ++ arch/riscv/lib/delay.c | 1 + 3 files changed, 6 insertions(+) diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index 23cc81ec9e94..551734248748 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c @@ -12,4 +12,7 @@ /* * Assembly functions that may be used (directly or indirectly) by modules */ +EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcpy); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 4375375ad168..8fbb6749910d 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -58,7 +58,9 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; #endif /* CONFIG_CMDLINE_BOOL */ unsigned long va_pa_offset; +EXPORT_SYMBOL(va_pa_offset); unsigned long pfn_base; +EXPORT_SYMBOL(pfn_base); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c index 1cc4ac3964b4..dce8ae24c6d3 100644 --- a/arch/riscv/lib/delay.c +++ b/arch/riscv/lib/delay.c @@ -84,6 +84,7 @@ void __delay(unsigned long cycles) while ((unsigned long)(get_cycles() - t0) < cycles) cpu_relax(); } +EXPORT_SYMBOL(__delay); void udelay(unsigned long usecs) { -- cgit From 4bde63286a6c7c76fe05ff0e03ad253f5260b104 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:17 -0800 Subject: RISC-V: Provide stub of setup_profiling_timer() Fixes the following on allmodconfig build: profile.c:(.text+0x3e4): undefined reference to `setup_profiling_timer' Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/smp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index b4a71ec5906f..17014c68e9fe 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -38,6 +38,13 @@ enum ipi_message_type { IPI_MAX }; + +/* Unsupported */ +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + irqreturn_t handle_ipi(void) { unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; -- cgit From 4a41d5dbb0bbd0c3faffb2ccd8ef1a7aeb12f978 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:18 -0800 Subject: RISC-V: Use define for get_cycles like other architectures Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/timex.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h index 3df4932d8964..2f26989cb864 100644 --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -18,7 +18,7 @@ typedef unsigned long cycles_t; -static inline cycles_t get_cycles(void) +static inline cycles_t get_cycles_inline(void) { cycles_t n; @@ -27,6 +27,7 @@ static inline cycles_t get_cycles(void) : "=r" (n)); return n; } +#define get_cycles get_cycles_inline #ifdef CONFIG_64BIT static inline uint64_t get_cycles64(void) -- cgit From 741fc3ff3a49509a7092d6d9eb51da6bd7577278 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 29 Nov 2017 17:55:20 -0800 Subject: RISC-V: Add missing include Fixes: include/asm-generic/mm_hooks.h:20:11: warning: 'struct vm_area_struct' declared inside parameter list will not be visible outside of this definition or declaration include/asm-generic/mm_hooks.h:19:38: warning: 'struct mm_struct' declared inside parameter list will not be visible outside of this definition or declaration Signed-off-by: Olof Johansson Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/mmu_context.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index de1fc1631fc4..1cd5172882c2 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -14,6 +14,7 @@ #ifndef _ASM_RISCV_MMU_CONTEXT_H #define _ASM_RISCV_MMU_CONTEXT_H +#include #include #include -- cgit From 08f051eda33b51e8ee0f45f05bcfe49d0f0caf6b Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 25 Oct 2017 14:30:32 -0700 Subject: RISC-V: Flush I$ when making a dirty page executable The RISC-V ISA allows for instruction caches that are not coherent WRT stores, even on a single hart. As a result, we need to explicitly flush the instruction cache whenever marking a dirty page as executable in order to preserve the correct system behavior. Local instruction caches aren't that scary (our implementations actually flush the cache, but RISC-V is defined to allow higher-performance implementations to exist), but RISC-V defines no way to perform an instruction cache shootdown. When explicitly asked to do so we can shoot down remote instruction caches via an IPI, but this is a bit on the slow side. Instead of requiring an IPI to all harts whenever marking a page as executable, we simply flush the currently running harts. In order to maintain correct behavior, we additionally mark every other hart as needing a deferred instruction cache which will be taken before anything runs on it. Signed-off-by: Andrew Waterman Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cacheflush.h | 24 ++++++++++++--- arch/riscv/include/asm/mmu.h | 4 +++ arch/riscv/include/asm/mmu_context.h | 44 +++++++++++++++++++++++++++ arch/riscv/include/asm/pgtable.h | 58 ++++++++++++++++++++---------------- arch/riscv/include/asm/tlbflush.h | 2 ++ arch/riscv/kernel/smp.c | 48 +++++++++++++++++++++++++++++ arch/riscv/mm/Makefile | 1 + arch/riscv/mm/cacheflush.c | 23 ++++++++++++++ 8 files changed, 174 insertions(+), 30 deletions(-) create mode 100644 arch/riscv/mm/cacheflush.c diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 0595585013b0..5c9ed39ee2a2 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -18,21 +18,37 @@ #undef flush_icache_range #undef flush_icache_user_range +#undef flush_dcache_page static inline void local_flush_icache_all(void) { asm volatile ("fence.i" ::: "memory"); } +#define PG_dcache_clean PG_arch_1 + +static inline void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} + +/* + * RISC-V doesn't have an instruction to flush parts of the instruction cache, + * so instead we just flush the whole thing. + */ +#define flush_icache_range(start, end) flush_icache_all() +#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all() + #ifndef CONFIG_SMP -#define flush_icache_range(start, end) local_flush_icache_all() -#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all() +#define flush_icache_all() local_flush_icache_all() +#define flush_icache_mm(mm, local) flush_icache_all() #else /* CONFIG_SMP */ -#define flush_icache_range(start, end) sbi_remote_fence_i(0) -#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0) +#define flush_icache_all() sbi_remote_fence_i(0) +void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 66805cba9a27..5df2dccdba12 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,6 +19,10 @@ typedef struct { void *vdso; +#ifdef CONFIG_SMP + /* A local icache flush is needed before user execution can resume. */ + cpumask_t icache_stale_mask; +#endif } mm_context_t; #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index de1fc1631fc4..b15b169e3d22 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -19,6 +20,7 @@ #include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *task) @@ -46,12 +48,54 @@ static inline void set_pgdir(pgd_t *pgd) csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE); } +/* + * When necessary, performs a deferred icache flush for the given MM context, + * on the local CPU. RISC-V has no direct mechanism for instruction cache + * shoot downs, so instead we send an IPI that informs the remote harts they + * need to flush their local instruction caches. To avoid pathologically slow + * behavior in a common case (a bunch of single-hart processes on a many-hart + * machine, ie 'make -j') we avoid the IPIs for harts that are not currently + * executing a MM context and instead schedule a deferred local instruction + * cache flush to be performed before execution resumes on each hart. This + * actually performs that local instruction cache flush, which implicitly only + * refers to the current hart. + */ +static inline void flush_icache_deferred(struct mm_struct *mm) +{ +#ifdef CONFIG_SMP + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_flush_icache_all(); + } +#endif +} + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *task) { if (likely(prev != next)) { + /* + * Mark the current MM context as inactive, and the next as + * active. This is at least used by the icache flushing + * routines in order to determine who should + */ + unsigned int cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + set_pgdir(next->pgd); local_flush_tlb_all(); + + flush_icache_deferred(next); } } diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 3399257780b2..2cbd92ed1629 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -178,28 +178,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) #define pte_unmap(pte) ((void)(pte)) -/* - * Certain architectures need to do special things when PTEs within - * a page table are directly modified. Thus, the following hook is - * made available. - */ -static inline void set_pte(pte_t *ptep, pte_t pteval) -{ - *ptep = pteval; -} - -static inline void set_pte_at(struct mm_struct *mm, - unsigned long addr, pte_t *ptep, pte_t pteval) -{ - set_pte(ptep, pteval); -} - -static inline void pte_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - set_pte_at(mm, addr, ptep, __pte(0)); -} - static inline int pte_present(pte_t pte) { return (pte_val(pte) & _PAGE_PRESENT); @@ -210,21 +188,22 @@ static inline int pte_none(pte_t pte) return (pte_val(pte) == 0); } -/* static inline int pte_read(pte_t pte) */ - static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_exec(pte_t pte) +{ + return pte_val(pte) & _PAGE_EXEC; +} + static inline int pte_huge(pte_t pte) { return pte_present(pte) && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); } -/* static inline int pte_exec(pte_t pte) */ - static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; @@ -311,6 +290,33 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) return pte_val(pte_a) == pte_val(pte_b); } +/* + * Certain architectures need to do special things when PTEs within + * a page table are directly modified. Thus, the following hook is + * made available. + */ +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; +} + +void flush_icache_pte(pte_t pte); + +static inline void set_pte_at(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pteval) +{ + if (pte_present(pteval) && pte_exec(pteval)) + flush_icache_pte(pteval); + + set_pte(ptep, pteval); +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + set_pte_at(mm, addr, ptep, __pte(0)); +} + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS static inline int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 5ee4ae370b5e..77edf2826c1f 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -17,6 +17,8 @@ #ifdef CONFIG_MMU +#include + /* Flush entire local TLB */ static inline void local_flush_tlb_all(void) { diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index b4a71ec5906f..1b27ade437b4 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -108,3 +108,51 @@ void smp_send_reschedule(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } + +/* + * Performs an icache flush for the given MM context. RISC-V has no direct + * mechanism for instruction cache shoot downs, so instead we send an IPI that + * informs the remote harts they need to flush their local instruction caches. + * To avoid pathologically slow behavior in a common case (a bunch of + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the + * IPIs for harts that are not currently executing a MM context and instead + * schedule a deferred local instruction cache flush to be performed before + * execution resumes on each hart. + */ +void flush_icache_mm(struct mm_struct *mm, bool local) +{ + unsigned int cpu; + cpumask_t others, *mask; + + preempt_disable(); + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_flush_icache_all(); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + local |= cpumask_empty(&others); + if (mm != current->active_mm || !local) + sbi_remote_fence_i(others.bits); + else { + /* + * It's assumed that at least one strongly ordered operation is + * performed on this hart between setting a hart's cpumask bit + * and scheduling this MM context on that hart. Sending an SBI + * remote message will do this, but in the case where no + * messages are sent we still need to order this hart's writes + * with flush_icache_deferred(). + */ + smp_mb(); + } + + preempt_enable(); +} diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 81f7d9ce6d88..eb22ab49b3e0 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -2,3 +2,4 @@ obj-y += init.o obj-y += fault.o obj-y += extable.o obj-y += ioremap.o +obj-y += cacheflush.o diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c new file mode 100644 index 000000000000..498c0a0814fe --- /dev/null +++ b/arch/riscv/mm/cacheflush.c @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +void flush_icache_pte(pte_t pte) +{ + struct page *page = pte_page(pte); + + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + flush_icache_all(); +} -- cgit From 921ebd8f2c081b3cf6c3b29ef4103eef3ff26054 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 25 Oct 2017 14:32:16 -0700 Subject: RISC-V: Allow userspace to flush the instruction cache Despite RISC-V having a direct 'fence.i' instruction available to userspace (which we can't trap!), that's not actually viable when running on Linux because the kernel might schedule a process on another hart. There is no way for userspace to handle this without invoking the kernel (as it doesn't know the thread->hart mappings), so we've defined a RISC-V specific system call to flush the instruction cache. This patch adds both a system call and a VDSO entry. If possible, we'd like to avoid having the system call be considered part of the user-facing ABI and instead restrict that to the VDSO entry -- both just in general to avoid having additional user-visible ABI to maintain, and because we'd prefer that users just call the VDSO entry because there might be a better way to do this in the future (ie, one that doesn't require entering the kernel). Signed-off-by: Andrew Waterman Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cacheflush.h | 6 ++++++ arch/riscv/include/asm/vdso-syscalls.h | 28 ++++++++++++++++++++++++++++ arch/riscv/include/asm/vdso.h | 4 ++++ arch/riscv/kernel/sys_riscv.c | 32 ++++++++++++++++++++++++++++++++ arch/riscv/kernel/syscall_table.c | 2 ++ arch/riscv/kernel/vdso/Makefile | 1 + arch/riscv/kernel/vdso/flush_icache.S | 31 +++++++++++++++++++++++++++++++ arch/riscv/kernel/vdso/vdso.lds.S | 1 + 8 files changed, 105 insertions(+) create mode 100644 arch/riscv/include/asm/vdso-syscalls.h create mode 100644 arch/riscv/kernel/vdso/flush_icache.S diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 5c9ed39ee2a2..efd89a88d2d0 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -52,4 +52,10 @@ void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ +/* + * Bits in sys_riscv_flush_icache()'s flags argument. + */ +#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL +#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL) + #endif /* _ASM_RISCV_CACHEFLUSH_H */ diff --git a/arch/riscv/include/asm/vdso-syscalls.h b/arch/riscv/include/asm/vdso-syscalls.h new file mode 100644 index 000000000000..a2ccf1894929 --- /dev/null +++ b/arch/riscv/include/asm/vdso-syscalls.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _ASM_RISCV_VDSO_SYSCALLS_H +#define _ASM_RISCV_VDSO_SYSCALLS_H + +#ifdef CONFIG_SMP + +/* These syscalls are only used by the vDSO and are not in the uapi. */ +#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) +__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) + +#endif + +#endif /* _ASM_RISCV_VDSO_H */ diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h index 602f61257553..541544d64c33 100644 --- a/arch/riscv/include/asm/vdso.h +++ b/arch/riscv/include/asm/vdso.h @@ -38,4 +38,8 @@ struct vdso_data { (void __user *)((unsigned long)(base) + __vdso_##name); \ }) +#ifdef CONFIG_SMP +asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t); +#endif + #endif /* _ASM_RISCV_VDSO_H */ diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 4351be7d0533..c6c037eabaf6 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -16,6 +16,7 @@ #include #include #include +#include static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -47,3 +48,34 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12); } #endif /* !CONFIG_64BIT */ + +#ifdef CONFIG_SMP +/* + * Allows the instruction cache to be flushed from userspace. Despite RISC-V + * having a direct 'fence.i' instruction available to userspace (which we + * can't trap!), that's not actually viable when running on Linux because the + * kernel might schedule a process on another hart. There is no way for + * userspace to handle this without invoking the kernel (as it doesn't know the + * thread->hart mappings), so we've defined a RISC-V specific system call to + * flush the instruction cache. + * + * sys_riscv_flush_icache() is defined to flush the instruction cache over an + * address range, with the flush applying to either all threads or just the + * caller. We don't currently do anything with the address range, that's just + * in there for forwards compatibility. + */ +SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, + uintptr_t, flags) +{ + struct mm_struct *mm = current->mm; + bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; + + /* Check the reserved flags. */ + if (unlikely(flags & !SYS_RISCV_FLUSH_ICACHE_ALL)) + return -EINVAL; + + flush_icache_mm(mm, local); + + return 0; +} +#endif diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c index 4e30dc5fb593..a5bd6401f95e 100644 --- a/arch/riscv/kernel/syscall_table.c +++ b/arch/riscv/kernel/syscall_table.c @@ -15,6 +15,7 @@ #include #include #include +#include #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), @@ -22,4 +23,5 @@ void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, #include +#include }; diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 2dcc4f3070bc..324568d33921 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -6,6 +6,7 @@ vdso-syms += gettimeofday vdso-syms += clock_gettime vdso-syms += clock_getres vdso-syms += getcpu +vdso-syms += flush_icache # Files to link into the vdso obj-vdso = $(patsubst %, %.o, $(vdso-syms)) diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S new file mode 100644 index 000000000000..b0fbad74e873 --- /dev/null +++ b/arch/riscv/kernel/vdso/flush_icache.S @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + + .text +/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */ +ENTRY(__vdso_flush_icache) + .cfi_startproc +#ifdef CONFIG_SMP + li a7, __NR_riscv_flush_icache + ecall +#else + fence.i + li a0, 0 +#endif + ret + .cfi_endproc +ENDPROC(__vdso_flush_icache) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index c7543c6a00f9..cd1d47e0724b 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -74,6 +74,7 @@ VERSION __vdso_clock_gettime; __vdso_clock_getres; __vdso_getcpu; + __vdso_flush_icache; local: *; }; } -- cgit From 0e710ac6521f13c68a5c634471f40ae448c31e0a Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 21 Nov 2017 07:59:28 -0800 Subject: RISC-V: Clean up an unused include We used to have some cmpxchg syscalls. They're no longer there, so we no longer need the include. CC: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/sys_riscv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index c6c037eabaf6..a2ae936a093e 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -14,7 +14,6 @@ */ #include -#include #include #include -- cgit From 4db2b604c05afc3d2678fe01d3136c015df313ec Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Nov 2017 11:47:28 +0100 Subject: move libgcc.h to include/linux Introducing a new include/lib directory just for this file totally messes up tab completion for include/linux, which is highly annoying. Move it to include/linux where we have headers for all kinds of other lib/ code as well. Signed-off-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- include/lib/libgcc.h | 43 ------------------------------------------- include/linux/libgcc.h | 43 +++++++++++++++++++++++++++++++++++++++++++ lib/ashldi3.c | 2 +- lib/ashrdi3.c | 2 +- lib/cmpdi2.c | 2 +- lib/lshrdi3.c | 2 +- lib/muldi3.c | 2 +- lib/ucmpdi2.c | 2 +- 8 files changed, 49 insertions(+), 49 deletions(-) delete mode 100644 include/lib/libgcc.h create mode 100644 include/linux/libgcc.h diff --git a/include/lib/libgcc.h b/include/lib/libgcc.h deleted file mode 100644 index 32e1e0f4b2d0..000000000000 --- a/include/lib/libgcc.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * include/lib/libgcc.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc. - */ - -#ifndef __LIB_LIBGCC_H -#define __LIB_LIBGCC_H - -#include - -typedef int word_type __attribute__ ((mode (__word__))); - -#ifdef __BIG_ENDIAN -struct DWstruct { - int high, low; -}; -#elif defined(__LITTLE_ENDIAN) -struct DWstruct { - int low, high; -}; -#else -#error I feel sick. -#endif - -typedef union { - struct DWstruct s; - long long ll; -} DWunion; - -#endif /* __ASM_LIBGCC_H */ diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h new file mode 100644 index 000000000000..32e1e0f4b2d0 --- /dev/null +++ b/include/linux/libgcc.h @@ -0,0 +1,43 @@ +/* + * include/lib/libgcc.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc. + */ + +#ifndef __LIB_LIBGCC_H +#define __LIB_LIBGCC_H + +#include + +typedef int word_type __attribute__ ((mode (__word__))); + +#ifdef __BIG_ENDIAN +struct DWstruct { + int high, low; +}; +#elif defined(__LITTLE_ENDIAN) +struct DWstruct { + int low, high; +}; +#else +#error I feel sick. +#endif + +typedef union { + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/lib/ashldi3.c b/lib/ashldi3.c index 1b6087db95a5..3ffc46e3bb6c 100644 --- a/lib/ashldi3.c +++ b/lib/ashldi3.c @@ -16,7 +16,7 @@ #include -#include +#include long long notrace __ashldi3(long long u, word_type b) { diff --git a/lib/ashrdi3.c b/lib/ashrdi3.c index 2e67c97ac65a..ea054550f0e8 100644 --- a/lib/ashrdi3.c +++ b/lib/ashrdi3.c @@ -16,7 +16,7 @@ #include -#include +#include long long notrace __ashrdi3(long long u, word_type b) { diff --git a/lib/cmpdi2.c b/lib/cmpdi2.c index 6d7ebf6c2b86..2250da7e503e 100644 --- a/lib/cmpdi2.c +++ b/lib/cmpdi2.c @@ -16,7 +16,7 @@ #include -#include +#include word_type notrace __cmpdi2(long long a, long long b) { diff --git a/lib/lshrdi3.c b/lib/lshrdi3.c index 8e845f4bb65f..99cfa5721f2d 100644 --- a/lib/lshrdi3.c +++ b/lib/lshrdi3.c @@ -17,7 +17,7 @@ */ #include -#include +#include long long notrace __lshrdi3(long long u, word_type b) { diff --git a/lib/muldi3.c b/lib/muldi3.c index 88938543e10a..54c8b3123376 100644 --- a/lib/muldi3.c +++ b/lib/muldi3.c @@ -15,7 +15,7 @@ */ #include -#include +#include #define W_TYPE_SIZE 32 diff --git a/lib/ucmpdi2.c b/lib/ucmpdi2.c index 49a53505c8e3..25ca2d4c1e19 100644 --- a/lib/ucmpdi2.c +++ b/lib/ucmpdi2.c @@ -15,7 +15,7 @@ */ #include -#include +#include word_type __ucmpdi2(unsigned long long a, unsigned long long b) { -- cgit From da894ff100be9044c490f47f61541481b4f42b1f Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Nov 2017 14:15:32 -0800 Subject: RISC-V: __io_writes should respect the length argument Whoops -- I must have just been being an idiot again. Thanks to Segher for finding the bug :). CC: Segher Boessenkool Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index c1f32cfcc79b..507ee6a16e67 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -250,7 +250,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) const ctype *buf = buffer; \ \ do { \ - __raw_writeq(*buf++, addr); \ + __raw_write ## len(*buf++, addr); \ } while (--count); \ } \ afence; \ -- cgit