summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/percpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/percpu.h')
-rw-r--r--arch/arm64/include/asm/percpu.h65
1 files changed, 53 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 0b6409b89e5e..9abcc8ef3087 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -10,6 +10,7 @@
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/stack_pointer.h>
+#include <asm/sysreg.h>
static inline void set_my_cpu_offset(unsigned long off)
{
@@ -19,7 +20,16 @@ static inline void set_my_cpu_offset(unsigned long off)
:: "r" (off) : "memory");
}
-static inline unsigned long __my_cpu_offset(void)
+static inline unsigned long __hyp_my_cpu_offset(void)
+{
+ /*
+ * Non-VHE hyp code runs with preemption disabled. No need to hazard
+ * the register access against barrier() as in __kern_my_cpu_offset.
+ */
+ return read_sysreg(tpidr_el2);
+}
+
+static inline unsigned long __kern_my_cpu_offset(void)
{
unsigned long off;
@@ -35,7 +45,12 @@ static inline unsigned long __my_cpu_offset(void)
return off;
}
-#define __my_cpu_offset __my_cpu_offset()
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define __my_cpu_offset __hyp_my_cpu_offset()
+#else
+#define __my_cpu_offset __kern_my_cpu_offset()
+#endif
#define PERCPU_RW_OPS(sz) \
static inline unsigned long __percpu_read_##sz(void *ptr) \
@@ -125,17 +140,11 @@ PERCPU_RET_OP(add, add, ldadd)
* re-enabling preemption for preemptible kernels, but doing that in a way
* which builds inside a module would mean messing directly with the preempt
* count. If you do this, peterz and tglx will hunt you down.
+ *
+ * Not to mention it'll break the actual preemption model for missing a
+ * preemption point when TIF_NEED_RESCHED gets set while preemption is
+ * disabled.
*/
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
-({ \
- int __ret; \
- preempt_disable_notrace(); \
- __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
- raw_cpu_ptr(&(ptr2)), \
- o1, o2, n1, n2); \
- preempt_enable_notrace(); \
- __ret; \
-})
#define _pcp_protect(op, pcp, ...) \
({ \
@@ -225,6 +234,38 @@ PERCPU_RET_OP(add, add, ldadd)
#define this_cpu_cmpxchg_8(pcp, o, n) \
_pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+#define this_cpu_cmpxchg64(pcp, o, n) this_cpu_cmpxchg_8(pcp, o, n)
+
+#define this_cpu_cmpxchg128(pcp, o, n) \
+({ \
+ typedef typeof(pcp) pcp_op_T__; \
+ u128 old__, new__, ret__; \
+ pcp_op_T__ *ptr__; \
+ old__ = o; \
+ new__ = n; \
+ preempt_disable_notrace(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+ ret__ = cmpxchg128_local((void *)ptr__, old__, new__); \
+ preempt_enable_notrace(); \
+ ret__; \
+})
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
+#define __per_cpu_offset
+#define per_cpu_offset(cpu) __hyp_per_cpu_offset((cpu))
+#endif
+
#include <asm-generic/percpu.h>
+/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
+#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
+#undef this_cpu_ptr
+#define this_cpu_ptr raw_cpu_ptr
+#undef __this_cpu_read
+#define __this_cpu_read raw_cpu_read
+#undef __this_cpu_write
+#define __this_cpu_write raw_cpu_write
+#endif
+
#endif /* __ASM_PERCPU_H */