summaryrefslogtreecommitdiff
path: root/arch/riscv/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include')
-rw-r--r--arch/riscv/include/asm/arch_hweight.h4
-rw-r--r--arch/riscv/include/asm/asm.h10
-rw-r--r--arch/riscv/include/asm/atomic.h165
-rw-r--r--arch/riscv/include/asm/barrier.h21
-rw-r--r--arch/riscv/include/asm/bitops.h142
-rw-r--r--arch/riscv/include/asm/cacheflush.h7
-rw-r--r--arch/riscv/include/asm/cfi.h17
-rw-r--r--arch/riscv/include/asm/checksum.h2
-rw-r--r--arch/riscv/include/asm/cmpxchg.h405
-rw-r--r--arch/riscv/include/asm/compat.h19
-rw-r--r--arch/riscv/include/asm/cpufeature.h35
-rw-r--r--arch/riscv/include/asm/crash_reserve.h (renamed from arch/riscv/include/asm/crash_core.h)4
-rw-r--r--arch/riscv/include/asm/csr.h2
-rw-r--r--arch/riscv/include/asm/elf.h11
-rw-r--r--arch/riscv/include/asm/errata_list.h13
-rw-r--r--arch/riscv/include/asm/fence.h10
-rw-r--r--arch/riscv/include/asm/ftrace.h19
-rw-r--r--arch/riscv/include/asm/hugetlb.h5
-rw-r--r--arch/riscv/include/asm/hwcap.h3
-rw-r--r--arch/riscv/include/asm/io.h8
-rw-r--r--arch/riscv/include/asm/jump_label.h4
-rw-r--r--arch/riscv/include/asm/membarrier.h50
-rw-r--r--arch/riscv/include/asm/mmio.h5
-rw-r--r--arch/riscv/include/asm/mmiowb.h2
-rw-r--r--arch/riscv/include/asm/page.h4
-rw-r--r--arch/riscv/include/asm/pgalloc.h53
-rw-r--r--arch/riscv/include/asm/pgtable-64.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h44
-rw-r--r--arch/riscv/include/asm/processor.h31
-rw-r--r--arch/riscv/include/asm/ptdump.h22
-rw-r--r--arch/riscv/include/asm/signal.h12
-rw-r--r--arch/riscv/include/asm/simd.h4
-rw-r--r--arch/riscv/include/asm/stacktrace.h5
-rw-r--r--arch/riscv/include/asm/suspend.h5
-rw-r--r--arch/riscv/include/asm/sync_core.h29
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h53
-rw-r--r--arch/riscv/include/asm/tlb.h20
-rw-r--r--arch/riscv/include/asm/tlbflush.h1
-rw-r--r--arch/riscv/include/asm/vector.h11
-rw-r--r--arch/riscv/include/asm/vendorid_list.h2
-rw-r--r--arch/riscv/include/asm/vmalloc.h61
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h3
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h1
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h30
44 files changed, 636 insertions, 722 deletions
diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h
index c20236a0725b..85b2c443823e 100644
--- a/arch/riscv/include/asm/arch_hweight.h
+++ b/arch/riscv/include/asm/arch_hweight.h
@@ -20,7 +20,7 @@
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
#ifdef CONFIG_RISCV_ISA_ZBB
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -51,7 +51,7 @@ static inline unsigned int __arch_hweight8(unsigned int w)
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
# ifdef CONFIG_RISCV_ISA_ZBB
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index b0487b39e674..776354895b81 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -183,6 +183,16 @@
REG_L x31, PT_T6(sp)
.endm
+/* Annotate a function as being unsuitable for kprobes. */
+#ifdef CONFIG_KPROBES
+#define ASM_NOKPROBE(name) \
+ .pushsection "_kprobe_blacklist", "aw"; \
+ RISCV_PTR name; \
+ .popsection
+#else
+#define ASM_NOKPROBE(name)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index f5dfef6c2153..5b96c2f61adb 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -17,7 +17,6 @@
#endif
#include <asm/cmpxchg.h>
-#include <asm/barrier.h>
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
@@ -196,22 +195,28 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
+#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " beq %[p], %[u], 1f\n" \
+ " add %[rc], %[p], %[a]\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : [a]"r" (_a), [u]"r" (_u) \
+ : "memory"); \
+})
+
/* This is required to provide a full barrier on success. */
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
+
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
@@ -222,77 +227,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
+
return prev;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
+#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bltz %[p], 1f\n" \
+ " addi %[rc], %[p], 1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
+
return !(prev < 0);
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bgtz %[p], 1f\n" \
+ " addi %[rc], %[p], -1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
+
return !(prev > 0);
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " addi %[rc], %[p], -1\n" \
+ " bltz %[rc], 1f\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
+
return prev - 1;
}
@@ -304,17 +318,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
+
return !(prev < 0);
}
@@ -325,17 +330,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
+
return !(prev > 0);
}
@@ -346,17 +342,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
+
return prev - 1;
}
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 110752594228..880b56d8480d 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -11,28 +11,27 @@
#define _ASM_RISCV_BARRIER_H
#ifndef __ASSEMBLY__
+#include <asm/fence.h>
#define nop() __asm__ __volatile__ ("nop")
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) __asm__ __volatile__ (__nops(n))
-#define RISCV_FENCE(p, s) \
- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
-#define mb() RISCV_FENCE(iorw,iorw)
-#define rmb() RISCV_FENCE(ir,ir)
-#define wmb() RISCV_FENCE(ow,ow)
+#define __mb() RISCV_FENCE(iorw, iorw)
+#define __rmb() RISCV_FENCE(ir, ir)
+#define __wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
-#define __smp_mb() RISCV_FENCE(rw,rw)
-#define __smp_rmb() RISCV_FENCE(r,r)
-#define __smp_wmb() RISCV_FENCE(w,w)
+#define __smp_mb() RISCV_FENCE(rw, rw)
+#define __smp_rmb() RISCV_FENCE(r, r)
+#define __smp_wmb() RISCV_FENCE(w, w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(rw,w); \
+ RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
@@ -40,7 +39,7 @@ do { \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(r,rw); \
+ RISCV_FENCE(r, rw); \
___p1; \
})
@@ -69,7 +68,7 @@ do { \
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
-#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
#include <asm-generic/barrier.h>
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 9ffc35537024..880606b0469a 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -22,6 +22,16 @@
#include <asm-generic/bitops/fls.h>
#else
+#define __HAVE_ARCH___FFS
+#define __HAVE_ARCH___FLS
+#define __HAVE_ARCH_FFS
+#define __HAVE_ARCH_FLS
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+
#include <asm/alternative-macros.h>
#include <asm/hwcap.h>
@@ -37,9 +47,7 @@
static __always_inline unsigned long variable__ffs(unsigned long word)
{
- int num;
-
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -52,32 +60,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
return word;
legacy:
- num = 0;
-#if BITS_PER_LONG == 64
- if ((word & 0xffffffff) == 0) {
- num += 32;
- word >>= 32;
- }
-#endif
- if ((word & 0xffff) == 0) {
- num += 16;
- word >>= 16;
- }
- if ((word & 0xff) == 0) {
- num += 8;
- word >>= 8;
- }
- if ((word & 0xf) == 0) {
- num += 4;
- word >>= 4;
- }
- if ((word & 0x3) == 0) {
- num += 2;
- word >>= 2;
- }
- if ((word & 0x1) == 0)
- num += 1;
- return num;
+ return generic___ffs(word);
}
/**
@@ -93,9 +76,7 @@ legacy:
static __always_inline unsigned long variable__fls(unsigned long word)
{
- int num;
-
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -108,32 +89,7 @@ static __always_inline unsigned long variable__fls(unsigned long word)
return BITS_PER_LONG - 1 - word;
legacy:
- num = BITS_PER_LONG - 1;
-#if BITS_PER_LONG == 64
- if (!(word & (~0ul << 32))) {
- num -= 32;
- word <<= 32;
- }
-#endif
- if (!(word & (~0ul << (BITS_PER_LONG - 16)))) {
- num -= 16;
- word <<= 16;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 8)))) {
- num -= 8;
- word <<= 8;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 4)))) {
- num -= 4;
- word <<= 4;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 2)))) {
- num -= 2;
- word <<= 2;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 1))))
- num -= 1;
- return num;
+ return generic___fls(word);
}
/**
@@ -149,46 +105,23 @@ legacy:
static __always_inline int variable_ffs(int x)
{
- int r;
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
if (!x)
return 0;
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
-
asm volatile (".option push\n"
".option arch,+zbb\n"
CTZW "%0, %1\n"
".option pop\n"
- : "=r" (r) : "r" (x) :);
+ : "=r" (x) : "r" (x) :);
- return r + 1;
+ return x + 1;
legacy:
- r = 1;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
+ return generic_ffs(x);
}
/**
@@ -204,46 +137,23 @@ legacy:
static __always_inline int variable_fls(unsigned int x)
{
- int r;
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
if (!x)
return 0;
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
-
asm volatile (".option push\n"
".option arch,+zbb\n"
CLZW "%0, %1\n"
".option pop\n"
- : "=r" (r) : "r" (x) :);
+ : "=r" (x) : "r" (x) :);
- return 32 - r;
+ return 32 - x;
legacy:
- r = 32;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
+ return generic_fls(x);
}
/**
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index a129dac4521d..dd8d07146116 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -33,8 +33,11 @@ static inline void flush_dcache_page(struct page *page)
* so instead we just flush the whole thing.
*/
#define flush_icache_range(start, end) flush_icache_all()
-#define flush_icache_user_page(vma, pg, addr, len) \
- flush_icache_mm(vma->vm_mm, 0)
+#define flush_icache_user_page(vma, pg, addr, len) \
+do { \
+ if (vma->vm_flags & VM_EXEC) \
+ flush_icache_mm(vma->vm_mm, 0); \
+} while (0)
#ifdef CONFIG_64BIT
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h
index 8f7a62257044..fb9696d7a3f2 100644
--- a/arch/riscv/include/asm/cfi.h
+++ b/arch/riscv/include/asm/cfi.h
@@ -13,11 +13,28 @@ struct pt_regs;
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+#define __bpfcall
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+
+#define cfi_get_offset cfi_get_offset
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+extern u32 cfi_get_func_hash(void *func);
#else
static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{
return BUG_TRAP_TYPE_NONE;
}
+
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+static inline u32 cfi_get_func_hash(void *func)
+{
+ return 0;
+}
#endif /* CONFIG_CFI_CLANG */
#endif /* _ASM_RISCV_CFI_H */
diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
index a5b60b54b101..88e6f1499e88 100644
--- a/arch/riscv/include/asm/checksum.h
+++ b/arch/riscv/include/asm/checksum.h
@@ -53,7 +53,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
unsigned long fold_temp;
- asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
:
:
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 2f4726d3cfcc..4d23f0c35b94 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -8,143 +8,81 @@
#include <linux/bug.h>
-#include <asm/barrier.h>
#include <asm/fence.h>
-#define __xchg_relaxed(ptr, new, size) \
+#define __arch_xchg_masked(prepend, append, r, p, n) \
+({ \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z4\n" \
+ " or %1, %1, %z3\n" \
+ " sc.w %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" (__newx), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+})
+
+#define __arch_xchg(sfx, prepend, append, r, p, n) \
+({ \
+ __asm__ __volatile__ ( \
+ prepend \
+ " amoswap" sfx " %0, %2, %1\n" \
+ append \
+ : "=r" (r), "+A" (*(p)) \
+ : "r" (n) \
+ : "memory"); \
+})
+
+#define _arch_xchg(ptr, new, sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
+ __typeof__(*(__ptr)) __new = (new); \
+ __typeof__(*(__ptr)) __ret; \
+ \
+ switch (sizeof(*__ptr)) { \
+ case 1: \
+ case 2: \
+ __arch_xchg_masked(prepend, append, \
+ __ret, __ptr, __new); \
+ break; \
case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ __arch_xchg(".w" sfx, prepend, append, \
+ __ret, __ptr, __new); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ __arch_xchg(".d" sfx, prepend, append, \
+ __ret, __ptr, __new); \
break; \
default: \
BUILD_BUG(); \
} \
- __ret; \
+ (__typeof__(*(__ptr)))__ret; \
})
#define arch_xchg_relaxed(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __xchg_acquire(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", "", "")
#define arch_xchg_acquire(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_acquire((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __xchg_release(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
#define arch_xchg_release(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_release((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __arch_xchg(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
#define arch_xchg(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \
-})
+ _arch_xchg(ptr, x, ".aqrl", "", "")
#define xchg32(ptr, x) \
({ \
@@ -163,190 +101,95 @@
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
-#define __cmpxchg_relaxed(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
-#define arch_cmpxchg_relaxed(ptr, o, n) \
+#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \
+({ \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __oldx = (ulong)(o) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z5\n" \
+ " bne %1, %z3, 1f\n" \
+ " and %1, %0, %z6\n" \
+ " or %1, %1, %z4\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ "1:\n" \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" ((long)__oldx), "rJ" (__newx), \
+ "rJ" (__mask), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+})
+
+#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \
({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
-
-#define __cmpxchg_acquire(ptr, old, new, size) \
+ register unsigned int __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr" lr_sfx " %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc" sc_sfx " %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ "1:\n" \
+ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
+ : "rJ" (co o), "rJ" (n) \
+ : "memory"); \
+})
+
+#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
+ __typeof__(*(__ptr)) __old = (old); \
+ __typeof__(*(__ptr)) __new = (new); \
+ __typeof__(*(__ptr)) __ret; \
+ \
+ switch (sizeof(*__ptr)) { \
+ case 1: \
+ case 2: \
+ __arch_cmpxchg_masked(sc_sfx, prepend, append, \
+ __ret, __ptr, __old, __new); \
+ break; \
case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
+ __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \
+ __ret, __ptr, (long), __old, __new); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
+ __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \
+ __ret, __ptr, /**/, __old, __new); \
break; \
default: \
BUILD_BUG(); \
} \
- __ret; \
+ (__typeof__(*(__ptr)))__ret; \
})
-#define arch_cmpxchg_acquire(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+ _arch_cmpxchg((ptr), (o), (n), "", "", "")
-#define __cmpxchg_release(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+#define arch_cmpxchg_acquire(ptr, o, n) \
+ _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
#define arch_cmpxchg_release(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
-
-#define __cmpxchg(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- " fence rw, rw\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- " fence rw, rw\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
#define arch_cmpxchg(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+ _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n")
#define arch_cmpxchg_local(ptr, o, n) \
- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+ arch_cmpxchg_relaxed((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) \
({ \
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
index 2ac955b51148..aa103530a5c8 100644
--- a/arch/riscv/include/asm/compat.h
+++ b/arch/riscv/include/asm/compat.h
@@ -14,9 +14,28 @@
static inline int is_compat_task(void)
{
+ if (!IS_ENABLED(CONFIG_COMPAT))
+ return 0;
+
return test_thread_flag(TIF_32BIT);
}
+static inline int is_compat_thread(struct thread_info *thread)
+{
+ if (!IS_ENABLED(CONFIG_COMPAT))
+ return 0;
+
+ return test_ti_thread_flag(thread, TIF_32BIT);
+}
+
+static inline void set_compat_task(bool is_compat)
+{
+ if (is_compat)
+ set_thread_flag(TIF_32BIT);
+ else
+ clear_thread_flag(TIF_32BIT);
+}
+
struct compat_user_regs_struct {
compat_ulong_t pc;
compat_ulong_t ra;
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 5a626ed2c47a..347805446151 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright 2022-2023 Rivos, Inc
+ * Copyright 2022-2024 Rivos, Inc
*/
#ifndef _ASM_CPUFEATURE_H
@@ -28,29 +28,38 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
-DECLARE_PER_CPU(long, misaligned_access_speed);
-
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
void riscv_user_isa_enable(void);
-#ifdef CONFIG_RISCV_MISALIGNED
-bool unaligned_ctl_available(void);
-bool check_unaligned_access_emulated(int cpu);
+#if defined(CONFIG_RISCV_MISALIGNED)
+bool check_unaligned_access_emulated_all_cpus(void);
void unaligned_emulation_finish(void);
+bool unaligned_ctl_available(void);
+DECLARE_PER_CPU(long, misaligned_access_speed);
#else
static inline bool unaligned_ctl_available(void)
{
return false;
}
+#endif
+
+#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
+DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
-static inline bool check_unaligned_access_emulated(int cpu)
+static __always_inline bool has_fast_unaligned_accesses(void)
{
- return false;
+ return static_branch_likely(&fast_unaligned_access_speed_key);
+}
+#else
+static __always_inline bool has_fast_unaligned_accesses(void)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ return true;
+ else
+ return false;
}
-
-static inline void unaligned_emulation_finish(void) {}
#endif
unsigned long riscv_get_elf_hwcap(void);
@@ -80,7 +89,7 @@ riscv_has_extension_likely(const unsigned long ext)
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
:
: [ext] "i" (ext)
@@ -103,7 +112,7 @@ riscv_has_extension_unlikely(const unsigned long ext)
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
:
: [ext] "i" (ext)
@@ -135,6 +144,4 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
-DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
-
#endif
diff --git a/arch/riscv/include/asm/crash_core.h b/arch/riscv/include/asm/crash_reserve.h
index e1874b23feaf..013962e63587 100644
--- a/arch/riscv/include/asm/crash_core.h
+++ b/arch/riscv/include/asm/crash_reserve.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _RISCV_CRASH_CORE_H
-#define _RISCV_CRASH_CORE_H
+#ifndef _RISCV_CRASH_RESERVE_H
+#define _RISCV_CRASH_RESERVE_H
#define CRASH_ALIGN PMD_SIZE
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 510014051f5d..2468c55933cd 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -424,6 +424,7 @@
# define CSR_STATUS CSR_MSTATUS
# define CSR_IE CSR_MIE
# define CSR_TVEC CSR_MTVEC
+# define CSR_ENVCFG CSR_MENVCFG
# define CSR_SCRATCH CSR_MSCRATCH
# define CSR_EPC CSR_MEPC
# define CSR_CAUSE CSR_MCAUSE
@@ -448,6 +449,7 @@
# define CSR_STATUS CSR_SSTATUS
# define CSR_IE CSR_SIE
# define CSR_TVEC CSR_STVEC
+# define CSR_ENVCFG CSR_SENVCFG
# define CSR_SCRATCH CSR_SSCRATCH
# define CSR_EPC CSR_SEPC
# define CSR_CAUSE CSR_SCAUSE
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 06c236bfab53..c7aea7886d22 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -53,13 +53,9 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2)
#ifdef CONFIG_64BIT
-#ifdef CONFIG_COMPAT
-#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+#define STACK_RND_MASK (is_compat_task() ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
-#else
-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
-#endif
#endif
/*
@@ -139,10 +135,7 @@ do { \
#ifdef CONFIG_COMPAT
#define SET_PERSONALITY(ex) \
-do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- set_thread_flag(TIF_32BIT); \
- else \
- clear_thread_flag(TIF_32BIT); \
+do { set_compat_task((ex).e_ident[EI_CLASS] == ELFCLASS32); \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index ea33288f8a25..1f2dbfb8a8bf 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -12,8 +12,8 @@
#include <asm/vendorid_list.h>
#ifdef CONFIG_ERRATA_ANDES
-#define ERRATA_ANDESTECH_NO_IOCP 0
-#define ERRATA_ANDESTECH_NUMBER 1
+#define ERRATA_ANDES_NO_IOCP 0
+#define ERRATA_ANDES_NUMBER 1
#endif
#ifdef CONFIG_ERRATA_SIFIVE
@@ -112,15 +112,6 @@ asm volatile(ALTERNATIVE( \
#define THEAD_C9XX_RV_IRQ_PMU 17
#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
-#define ALT_SBI_PMU_OVERFLOW(__ovl) \
-asm volatile(ALTERNATIVE( \
- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
- "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
- THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
- CONFIG_ERRATA_THEAD_PMU) \
- : "=r" (__ovl) : \
- : "memory")
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 2b443a3a487f..6bcd80325dfc 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,12 +1,18 @@
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
+#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
+#define RISCV_FENCE(p, s) \
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
+
#ifdef CONFIG_SMP
-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
+#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w)
+#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
+#define RISCV_FULL_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 329172122952..1276d7d9ca8b 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -13,19 +13,14 @@
#endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
-/*
- * Clang prior to 13 had "mcount" instead of "_mcount":
- * https://reviews.llvm.org/D98881
- */
-#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
-#define MCOUNT_NAME _mcount
-#else
-#define MCOUNT_NAME mcount
-#endif
-
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
-void MCOUNT_NAME(void);
+
+extern void *return_address(unsigned int level);
+
+#define ftrace_return_address(n) return_address(n)
+
+void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
@@ -75,7 +70,7 @@ struct dyn_arch_ftrace {
* both auipc and jalr at the same time.
*/
-#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
+#define MCOUNT_ADDR ((unsigned long)_mcount)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index 4c5b0e929890..22deb7a2a6ec 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -11,6 +11,11 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+bool arch_hugetlb_migration_supported(struct hstate *h);
+#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
+#endif
+
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define __HAVE_ARCH_HUGE_PTE_CLEAR
void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 5340f818746b..e17d0078a651 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -80,6 +80,9 @@
#define RISCV_ISA_EXT_ZFA 71
#define RISCV_ISA_EXT_ZTSO 72
#define RISCV_ISA_EXT_ZACAS 73
+#define RISCV_ISA_EXT_XANDESPMU 74
+
+#define RISCV_ISA_EXT_XLINUXENVCFG 127
#define RISCV_ISA_EXT_MAX 128
#define RISCV_ISA_EXT_INVALID U32_MAX
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 42497d487a17..1c5c641075d2 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+#define __io_pbr() RISCV_FENCE(io, i)
+#define __io_par(v) RISCV_FENCE(i, ior)
+#define __io_pbw() RISCV_FENCE(iow, o)
+#define __io_paw() RISCV_FENCE(o, io)
/*
* Accesses from a single hart to a single I/O address must be ordered. This
diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
index 14a5ea8d8ef0..4a35d787c019 100644
--- a/arch/riscv/include/asm/jump_label.h
+++ b/arch/riscv/include/asm/jump_label.h
@@ -17,7 +17,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
" .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
@@ -39,7 +39,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
" .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h
new file mode 100644
index 000000000000..47b240d0d596
--- /dev/null
+++ b/arch/riscv/include/asm/membarrier.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_RISCV_MEMBARRIER_H
+#define _ASM_RISCV_MEMBARRIER_H
+
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ /*
+ * Only need the full barrier when switching between processes.
+ * Barrier when switching from kernel to userspace is not
+ * required here, given that it is implied by mmdrop(). Barrier
+ * when switching from userspace to kernel is not needed after
+ * store to rq->curr.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ likely(!(atomic_read(&next->membarrier_state) &
+ (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
+ return;
+
+ /*
+ * The membarrier system call requires a full memory barrier
+ * after storing to rq->curr, before going back to user-space.
+ *
+ * This barrier is also needed for the SYNC_CORE command when
+ * switching between processes; in particular, on a transition
+ * from a thread belonging to another mm to a thread belonging
+ * to the mm for which a membarrier SYNC_CORE is done on CPU0:
+ *
+ * - [CPU0] sets all bits in the mm icache_stale_mask (in
+ * prepare_sync_core_cmd());
+ *
+ * - [CPU1] stores to rq->curr (by the scheduler);
+ *
+ * - [CPU0] loads rq->curr within membarrier and observes
+ * cpu_rq(1)->curr->mm != mm, so the IPI is skipped on
+ * CPU1; this means membarrier relies on switch_mm() to
+ * issue the sync-core;
+ *
+ * - [CPU1] switch_mm() loads icache_stale_mask; if the bit
+ * is zero, switch_mm() may incorrectly skip the sync-core.
+ *
+ * Matches a full barrier in the proximity of the membarrier
+ * system call entry.
+ */
+ smp_mb();
+}
+
+#endif /* _ASM_RISCV_MEMBARRIER_H */
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
index 4c58ee7f95ec..06cadfd7a237 100644
--- a/arch/riscv/include/asm/mmio.h
+++ b/arch/riscv/include/asm/mmio.h
@@ -12,6 +12,7 @@
#define _ASM_RISCV_MMIO_H
#include <linux/types.h>
+#include <asm/fence.h>
#include <asm/mmiowb.h>
/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_ar(v) RISCV_FENCE(i, ir)
+#define __io_bw() RISCV_FENCE(w, o)
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index 0b2333e71fdc..52ce4a399d9b 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+#define mmiowb() RISCV_FENCE(o, w)
#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 57e887bfa34c..115ac98b8d72 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -12,7 +12,7 @@
#include <linux/pfn.h>
#include <linux/const.h>
-#define PAGE_SHIFT (12)
+#define PAGE_SHIFT CONFIG_PAGE_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
@@ -89,7 +89,7 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
/*
* We override this value as its generic definition uses __pa too early in
* the boot process (before kernel_map.va_pa_offset is set).
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index d169a4f41a2e..deaf971253a2 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -95,7 +95,19 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
__pud_free(mm, pud);
}
-#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long addr)
+{
+ if (pgtable_l4_enabled) {
+ struct ptdesc *ptdesc = virt_to_ptdesc(pud);
+
+ pagetable_pud_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+ }
+}
#define p4d_alloc_one p4d_alloc_one
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -124,7 +136,16 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
__p4d_free(mm, p4d);
}
-#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
+static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
+ unsigned long addr)
+{
+ if (pgtable_l5_enabled) {
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
+ else
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
+ }
+}
#endif /* __PAGETABLE_PMD_FOLDED */
static inline void sync_kernel_mappings(pgd_t *pgd)
@@ -149,15 +170,31 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#ifndef __PAGETABLE_PMD_FOLDED
-#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
+
+ pagetable_pmd_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
#endif /* __PAGETABLE_PMD_FOLDED */
-#define __pte_free_tlb(tlb, pte, buf) \
-do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));\
-} while (0)
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = page_ptdesc(pte);
+
+ pagetable_pte_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index b42017d76924..221a5c1ee287 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -136,7 +136,7 @@ enum napot_cont_order {
* 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
*/
#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
-#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60))
+#define _PAGE_NOCACHE_THEAD ((1UL << 61) | (1UL << 60))
#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
@@ -190,7 +190,7 @@ static inline int pud_bad(pud_t pud)
}
#define pud_leaf pud_leaf
-static inline int pud_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
{
return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 0c94260b5d0c..58fd7b70b903 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -84,7 +84,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
-#define vmemmap ((struct page *)VMEMMAP_START)
+#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
@@ -127,17 +127,11 @@
#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
-#ifdef CONFIG_COMPAT
#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
#else
-#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
-#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
-#endif /* CONFIG_COMPAT */
-
-#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
@@ -241,7 +235,7 @@ static inline int pmd_bad(pmd_t pmd)
}
#define pmd_leaf pmd_leaf
-static inline int pmd_leaf(pmd_t pmd)
+static inline bool pmd_leaf(pmd_t pmd)
{
return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
}
@@ -439,6 +433,12 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#define pte_leaf_size(pte) (pte_napot(pte) ? \
+ napot_cont_size(napot_cont_order(pte)) :\
+ PAGE_SIZE)
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/asm-generic/pgtable.h
@@ -513,23 +513,25 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
WRITE_ONCE(*ptep, pteval);
}
-void flush_icache_pte(pte_t pte);
+void flush_icache_pte(struct mm_struct *mm, pte_t pte);
-static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
+static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
- flush_icache_pte(pteval);
+ flush_icache_pte(mm, pteval);
set_pte(ptep, pteval);
}
+#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
+
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval, unsigned int nr)
{
page_table_check_ptes_set(mm, ptep, pteval, nr);
for (;;) {
- __set_pte_at(ptep, pteval);
+ __set_pte_at(mm, ptep, pteval);
if (--nr == 0)
break;
ptep++;
@@ -541,7 +543,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
static inline void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- __set_pte_at(ptep, __pte(0));
+ __set_pte_at(mm, ptep, __pte(0));
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
@@ -656,6 +658,12 @@ static inline int pmd_write(pmd_t pmd)
return pte_write(pmd_pte(pmd));
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return pte_write(pud_pte(pud));
+}
+
#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
@@ -707,14 +715,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(mm, pmdp, pmd);
- return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
+ return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
page_table_check_pud_set(mm, pudp, pud);
- return __set_pte_at((pte_t *)pudp, pud_pte(pud));
+ return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
}
#ifdef CONFIG_PAGE_TABLE_CHECK
@@ -865,8 +873,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 (_AC(0x80000000, UL))
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE (is_compat_task() ? \
TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE TASK_SIZE_64
@@ -882,7 +890,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
-#define TASK_SIZE 0xffffffffUL
+#define TASK_SIZE _AC(-1, UL)
#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index a8509cc31ab2..0faf5f161f1e 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -14,22 +14,21 @@
#include <asm/ptrace.h>
-#ifdef CONFIG_64BIT
-#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
-#define STACK_TOP_MAX TASK_SIZE
-
+/*
+ * addr is a hint to the maximum userspace address that mmap should provide, so
+ * this macro needs to return the largest address space available so that
+ * mmap_end < addr, being mmap_end the top of that address space.
+ * See Documentation/arch/riscv/vm-layout.rst for more details.
+ */
#define arch_get_mmap_end(addr, len, flags) \
({ \
unsigned long mmap_end; \
typeof(addr) _addr = (addr); \
- if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
- mmap_end = STACK_TOP_MAX; \
- else if ((_addr) >= VA_USER_SV57) \
+ if ((_addr) == 0 || is_compat_task() || \
+ ((_addr + len) > BIT(VA_BITS - 1))) \
mmap_end = STACK_TOP_MAX; \
- else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
- mmap_end = VA_USER_SV48; \
else \
- mmap_end = VA_USER_SV39; \
+ mmap_end = (_addr + len); \
mmap_end; \
})
@@ -39,17 +38,17 @@
typeof(addr) _addr = (addr); \
typeof(base) _base = (base); \
unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
- if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ if ((_addr) == 0 || is_compat_task() || \
+ ((_addr + len) > BIT(VA_BITS - 1))) \
mmap_base = (_base); \
- else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
- mmap_base = VA_USER_SV57 - rnd_gap; \
- else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
- mmap_base = VA_USER_SV48 - rnd_gap; \
else \
- mmap_base = VA_USER_SV39 - rnd_gap; \
+ mmap_base = (_addr + len) - rnd_gap; \
mmap_base; \
})
+#ifdef CONFIG_64BIT
+#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+#define STACK_TOP_MAX TASK_SIZE_64
#else
#define DEFAULT_MAP_WINDOW TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/riscv/include/asm/ptdump.h b/arch/riscv/include/asm/ptdump.h
deleted file mode 100644
index 3c9ea6dd5af7..000000000000
--- a/arch/riscv/include/asm/ptdump.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2019 SiFive
- */
-
-#ifndef _ASM_RISCV_PTDUMP_H
-#define _ASM_RISCV_PTDUMP_H
-
-void ptdump_check_wx(void);
-
-#ifdef CONFIG_DEBUG_WX
-static inline void debug_checkwx(void)
-{
- ptdump_check_wx();
-}
-#else
-static inline void debug_checkwx(void)
-{
-}
-#endif
-
-#endif /* _ASM_RISCV_PTDUMP_H */
diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
deleted file mode 100644
index 956ae0a01bad..000000000000
--- a/arch/riscv/include/asm/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef __ASM_SIGNAL_H
-#define __ASM_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-#include <uapi/asm/ptrace.h>
-
-asmlinkage __visible
-void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
-
-#endif
diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
index 54efbf523d49..adb50f3ec205 100644
--- a/arch/riscv/include/asm/simd.h
+++ b/arch/riscv/include/asm/simd.h
@@ -34,9 +34,9 @@ static __must_check inline bool may_use_simd(void)
return false;
/*
- * Nesting is acheived in preempt_v by spreading the control for
+ * Nesting is achieved in preempt_v by spreading the control for
* preemptible and non-preemptible kernel-mode Vector into two fields.
- * Always try to match with prempt_v if kernel V-context exists. Then,
+ * Always try to match with preempt_v if kernel V-context exists. Then,
* fallback to check non preempt_v if nesting happens, or if the config
* is not set.
*/
diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h
index f7e8ef2418b9..b1495a7e06ce 100644
--- a/arch/riscv/include/asm/stacktrace.h
+++ b/arch/riscv/include/asm/stacktrace.h
@@ -21,4 +21,9 @@ static inline bool on_thread_stack(void)
return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
+
+#ifdef CONFIG_VMAP_STACK
+DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+#endif /* CONFIG_VMAP_STACK */
+
#endif /* _ASM_RISCV_STACKTRACE_H */
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 02f87867389a..4ffb022b097f 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -13,7 +13,7 @@ struct suspend_context {
/* Saved and restored by low-level functions */
struct pt_regs regs;
/* Saved and restored by high-level functions */
- unsigned long scratch;
+ unsigned long envcfg;
unsigned long tvec;
unsigned long ie;
#ifdef CONFIG_MMU
@@ -55,4 +55,7 @@ int hibernate_resume_nonboot_cpu_disable(void);
asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
unsigned long cpu_resume);
asmlinkage int hibernate_core_restore_code(void);
+bool riscv_sbi_hsm_is_supported(void);
+bool riscv_sbi_suspend_state_is_valid(u32 state);
+int riscv_sbi_hart_suspend(u32 state);
#endif
diff --git a/arch/riscv/include/asm/sync_core.h b/arch/riscv/include/asm/sync_core.h
new file mode 100644
index 000000000000..9153016da8f1
--- /dev/null
+++ b/arch/riscv/include/asm/sync_core.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_SYNC_CORE_H
+#define _ASM_RISCV_SYNC_CORE_H
+
+/*
+ * RISC-V implements return to user-space through an xRET instruction,
+ * which is not core serializing.
+ */
+static inline void sync_core_before_usermode(void)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Ensure the next switch_mm() on every CPU issues a core serializing
+ * instruction for the given @mm.
+ */
+static inline void prepare_sync_core_cmd(struct mm_struct *mm)
+{
+ cpumask_setall(&mm->context.icache_stale_mask);
+}
+#else
+static inline void prepare_sync_core_cmd(struct mm_struct *mm)
+{
+}
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_RISCV_SYNC_CORE_H */
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
index eeec04b7dae6..980094c2e976 100644
--- a/arch/riscv/include/asm/syscall_wrapper.h
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -12,25 +12,51 @@
asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
-#define SC_RISCV_REGS_TO_ARGS(x, ...) \
- __MAP(x,__SC_ARGS \
- ,,regs->orig_a0,,regs->a1,,regs->a2 \
+#ifdef CONFIG_64BIT
+
+#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \
+ static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->orig_a0,,regs->a1,,regs->a2 \
,,regs->a3,,regs->a4,,regs->a5,,regs->a6)
+#else
+/*
+ * Use type aliasing to ensure registers a0-a6 are correctly passed to the syscall
+ * implementation when >word-size arguments are used.
+ */
+#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments"); \
+ static long __se_##prefix##name(ulong, ulong, ulong, ulong, ulong, ulong, \
+ ulong) \
+ __attribute__((alias(__stringify(___se_##prefix##name)))); \
+ __diag_pop(); \
+ static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static long ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ regs->orig_a0,regs->a1,regs->a2,regs->a3,regs->a4,regs->a5,regs->a6
+
+#endif /* CONFIG_64BIT */
+
#ifdef CONFIG_COMPAT
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \
- static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
+ __SYSCALL_SE_DEFINEx(x, compat_sys, name, __VA_ARGS__) \
{ \
- return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
} \
- static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
{ \
- return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
} \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
@@ -51,19 +77,18 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
- { \
- return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
- } \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ __SYSCALL_SE_DEFINEx(x, sys, name, __VA_ARGS__) \
{ \
long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
__MAP(x,__SC_TEST,__VA_ARGS__); \
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#define SYSCALL_DEFINE0(sname) \
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 1eb5682b2af6..1f6c38420d8e 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -10,13 +10,31 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
+#ifdef CONFIG_MMU
+#include <linux/swap.h>
+
+/*
+ * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
+ * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
+ * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
+ * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
+ * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * for more details.
+ */
+static inline void __tlb_remove_table(void *table)
+{
+ free_page_and_swap_cache(table);
+}
+
+#endif /* CONFIG_MMU */
+
#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
static inline void tlb_flush(struct mmu_gather *tlb)
{
#ifdef CONFIG_MMU
- if (tlb->fullmm || tlb->need_flush_all)
+ if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
flush_tlb_mm(tlb->mm);
else
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 928f096dca21..4112cc8d1d69 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -75,6 +75,7 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
+#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 0cd6f0a027d1..731dcd0ed4de 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -284,4 +284,15 @@ static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
#endif /* CONFIG_RISCV_ISA_V */
+/*
+ * Return the implementation's vlen value.
+ *
+ * riscv_v_vsize contains the value of "32 vector registers with vlenb length"
+ * so rebuild the vlen value in bits from it.
+ */
+static inline int riscv_vector_vlen(void)
+{
+ return riscv_v_vsize / 32 * 8;
+}
+
#endif /* ! __ASM_RISCV_VECTOR_H */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index e55407ace0c3..2f2bb0c84f9a 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -5,7 +5,7 @@
#ifndef ASM_VENDOR_LIST_H
#define ASM_VENDOR_LIST_H
-#define ANDESTECH_VENDOR_ID 0x31e
+#define ANDES_VENDOR_ID 0x31e
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index 924d01b56c9a..51f6dfe19745 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
return true;
}
-#ifdef CONFIG_RISCV_ISA_SVNAPOT
-#include <linux/pgtable.h>
+#endif
-#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
-static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
- u64 pfn, unsigned int max_page_shift)
-{
- unsigned long map_size = PAGE_SIZE;
- unsigned long size, order;
-
- if (!has_svnapot())
- return map_size;
-
- for_each_napot_order_rev(order) {
- if (napot_cont_shift(order) > max_page_shift)
- continue;
-
- size = napot_cont_size(order);
- if (end - addr < size)
- continue;
-
- if (!IS_ALIGNED(addr, size))
- continue;
-
- if (!IS_ALIGNED(PFN_PHYS(pfn), size))
- continue;
-
- map_size = size;
- break;
- }
-
- return map_size;
-}
-
-#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
-static inline int arch_vmap_pte_supported_shift(unsigned long size)
-{
- int shift = PAGE_SHIFT;
- unsigned long order;
-
- if (!has_svnapot())
- return shift;
-
- WARN_ON_ONCE(size >= PMD_SIZE);
-
- for_each_napot_order_rev(order) {
- if (napot_cont_size(order) > size)
- continue;
-
- if (!IS_ALIGNED(size, napot_cont_size(order)))
- continue;
-
- shift = napot_cont_shift(order);
- break;
- }
-
- return shift;
-}
-
-#endif /* CONFIG_RISCV_ISA_SVNAPOT */
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#endif /* _ASM_RISCV_VMALLOC_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
index f3f031e34191..3802cda71ab7 100644
--- a/arch/riscv/include/asm/word-at-a-time.h
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -10,7 +10,8 @@
#include <asm/asm-extable.h>
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
struct word_at_a_time {
const unsigned long one_bits, high_bits;
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 9f2a8e3ff204..31c570cbd1c5 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -59,6 +59,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
+#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index d6b7a5b95874..b1c503c2959c 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -16,7 +16,6 @@
#include <asm/ptrace.h>
#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -139,6 +138,35 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZIHPM,
KVM_RISCV_ISA_EXT_SMSTATEEN,
KVM_RISCV_ISA_EXT_ZICOND,
+ KVM_RISCV_ISA_EXT_ZBC,
+ KVM_RISCV_ISA_EXT_ZBKB,
+ KVM_RISCV_ISA_EXT_ZBKC,
+ KVM_RISCV_ISA_EXT_ZBKX,
+ KVM_RISCV_ISA_EXT_ZKND,
+ KVM_RISCV_ISA_EXT_ZKNE,
+ KVM_RISCV_ISA_EXT_ZKNH,
+ KVM_RISCV_ISA_EXT_ZKR,
+ KVM_RISCV_ISA_EXT_ZKSED,
+ KVM_RISCV_ISA_EXT_ZKSH,
+ KVM_RISCV_ISA_EXT_ZKT,
+ KVM_RISCV_ISA_EXT_ZVBB,
+ KVM_RISCV_ISA_EXT_ZVBC,
+ KVM_RISCV_ISA_EXT_ZVKB,
+ KVM_RISCV_ISA_EXT_ZVKG,
+ KVM_RISCV_ISA_EXT_ZVKNED,
+ KVM_RISCV_ISA_EXT_ZVKNHA,
+ KVM_RISCV_ISA_EXT_ZVKNHB,
+ KVM_RISCV_ISA_EXT_ZVKSED,
+ KVM_RISCV_ISA_EXT_ZVKSH,
+ KVM_RISCV_ISA_EXT_ZVKT,
+ KVM_RISCV_ISA_EXT_ZFH,
+ KVM_RISCV_ISA_EXT_ZFHMIN,
+ KVM_RISCV_ISA_EXT_ZIHINTNTL,
+ KVM_RISCV_ISA_EXT_ZVFH,
+ KVM_RISCV_ISA_EXT_ZVFHMIN,
+ KVM_RISCV_ISA_EXT_ZFA,
+ KVM_RISCV_ISA_EXT_ZTSO,
+ KVM_RISCV_ISA_EXT_ZACAS,
KVM_RISCV_ISA_EXT_MAX,
};