summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2023-04-08 12:17:49 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2023-04-20 12:54:22 +1000
commitdc5dac748af9087e9240bd2ae6ae7db48d5360ae (patch)
tree59998f55199668c054226f3bc29fc4963fc35741 /arch/powerpc/include/asm
parentb270bebd34e36fb69363d65e24b00a9d148903e8 (diff)
powerpc/64: Add support to build with prefixed instructions
Add an option to build kernel and module with prefixed instructions if the CPU and toolchain support it. This is not related to kernel support for userspace execution of prefixed instructions. Building with prefixed instructions breaks some extended inline asm memory addressing, for example it will provide immediates that exceed the range of simple load/store displacement. Whether this is a toolchain or a kernel asm problem remains to be seen. For now, these are replaced with simpler and less efficient direct register addressing when compiling with prefixed. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230408021752.862660-4-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/atomic.h24
-rw-r--r--arch/powerpc/include/asm/io.h37
-rw-r--r--arch/powerpc/include/asm/uaccess.h28
3 files changed, 83 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b3a53830446b..47228b177478 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -27,14 +27,22 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
- __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+ else
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+ else
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
@@ -197,14 +205,22 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+ else
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+ else
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index fc112a91d0c2..f1e657c9bbe8 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -97,6 +97,42 @@ extern bool isa_io_special;
*
*/
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define DEF_MMIO_IN_X(name, size, insn) \
+static inline u##size name(const volatile u##size __iomem *addr) \
+{ \
+ u##size ret; \
+ __asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync" \
+ : "=r" (ret) : "r" (addr) : "memory"); \
+ return ret; \
+}
+
+#define DEF_MMIO_OUT_X(name, size, insn) \
+static inline void name(volatile u##size __iomem *addr, u##size val) \
+{ \
+ __asm__ __volatile__("sync;"#insn" %1,0,%0" \
+ : : "r" (addr), "r" (val) : "memory"); \
+ mmiowb_set_pending(); \
+}
+
+#define DEF_MMIO_IN_D(name, size, insn) \
+static inline u##size name(const volatile u##size __iomem *addr) \
+{ \
+ u##size ret; \
+ __asm__ __volatile__("sync;"#insn" %0,0(%1);twi 0,%0,0;isync"\
+ : "=r" (ret) : "b" (addr) : "memory"); \
+ return ret; \
+}
+
+#define DEF_MMIO_OUT_D(name, size, insn) \
+static inline void name(volatile u##size __iomem *addr, u##size val) \
+{ \
+ __asm__ __volatile__("sync;"#insn" %1,0(%0)" \
+ : : "b" (addr), "r" (val) : "memory"); \
+ mmiowb_set_pending(); \
+}
+#else
#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
@@ -130,6 +166,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
: "=m<>" (*addr) : "r" (val) : "memory"); \
mmiowb_set_pending(); \
}
+#endif
DEF_MMIO_IN_D(in_8, 8, lbz);
DEF_MMIO_OUT_D(out_8, 8, stb);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 52378e641d38..a2d255aa9627 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -71,14 +71,26 @@ __pu_failed: \
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
*/
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define __put_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: " op " %0,0(%1) # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), "b" (addr) \
+ : \
+ : label)
+#else
#define __put_user_asm_goto(x, addr, label, op) \
asm_volatile_goto( \
"1: " op "%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
- : "r" (x), "m<>" (*addr) \
+ : "r" (x), "m<>" (*addr) \
: \
: label)
+#endif
#ifdef __powerpc64__
#define __put_user_asm2_goto(x, ptr, label) \
@@ -131,14 +143,26 @@ do { \
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+/* -mprefixed can generate offsets beyond range, fall back hack */
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
+#define __get_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: "op" %0,0(%1) # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : "b" (addr) \
+ : \
+ : label)
+#else
#define __get_user_asm_goto(x, addr, label, op) \
asm_volatile_goto( \
"1: "op"%U1%X1 %0, %1 # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
- : "m<>" (*addr) \
+ : "m<>" (*addr) \
: \
: label)
+#endif
#ifdef __powerpc64__
#define __get_user_asm2_goto(x, addr, label) \