summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/asm.h51
-rw-r--r--arch/s390/include/asm/atomic.h28
-rw-r--r--arch/s390/include/asm/atomic_ops.h75
-rw-r--r--arch/s390/include/asm/cmpxchg.h374
-rw-r--r--arch/s390/include/asm/cpacf.h2
-rw-r--r--arch/s390/include/asm/cpu_mf.h57
-rw-r--r--arch/s390/include/asm/facility.h18
-rw-r--r--arch/s390/include/asm/ftrace.h29
-rw-r--r--arch/s390/include/asm/gmap.h3
-rw-r--r--arch/s390/include/asm/io.h2
-rw-r--r--arch/s390/include/asm/kexec.h3
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/include/asm/lowcore.h3
-rw-r--r--arch/s390/include/asm/page.h20
-rw-r--r--arch/s390/include/asm/pai.h10
-rw-r--r--arch/s390/include/asm/pci.h15
-rw-r--r--arch/s390/include/asm/pci_clp.h13
-rw-r--r--arch/s390/include/asm/pci_io.h6
-rw-r--r--arch/s390/include/asm/perf_event.h7
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/physmem_info.h3
-rw-r--r--arch/s390/include/asm/preempt.h9
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/ptrace.h2
-rw-r--r--arch/s390/include/asm/set_memory.h1
-rw-r--r--arch/s390/include/asm/sigp.h11
-rw-r--r--arch/s390/include/asm/sparsemem.h18
-rw-r--r--arch/s390/include/asm/spinlock.h13
-rw-r--r--arch/s390/include/asm/timex.h32
-rw-r--r--arch/s390/include/asm/uv.h176
-rw-r--r--arch/s390/include/asm/vdso.h3
-rw-r--r--arch/s390/include/asm/vdso/data.h12
-rw-r--r--arch/s390/include/asm/vdso/time_data.h12
-rw-r--r--arch/s390/include/asm/vdso/vsyscall.h5
-rw-r--r--arch/s390/include/uapi/asm/dasd.h2
-rw-r--r--arch/s390/include/uapi/asm/pkey.h38
-rw-r--r--arch/s390/include/uapi/asm/uvdevice.h32
37 files changed, 671 insertions, 426 deletions
diff --git a/arch/s390/include/asm/asm.h b/arch/s390/include/asm/asm.h
new file mode 100644
index 000000000000..ec011b94af2a
--- /dev/null
+++ b/arch/s390/include/asm/asm.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ASM_H
+#define _ASM_S390_ASM_H
+
+#include <linux/stringify.h>
+
+/*
+ * Helper macros to be used for flag output operand handling.
+ * Inline assemblies must use four of the five supplied macros:
+ *
+ * Use CC_IPM(sym) at the end of the inline assembly; this extracts the
+ * condition code and program mask with the ipm instruction and writes it to
+ * the variable with symbolic name [sym] if the compiler has no support for
+ * flag output operands. If the compiler has support for flag output operands
+ * this generates no code.
+ *
+ * Use CC_OUT(sym, var) at the output operand list of an inline assembly. This
+ * defines an output operand with symbolic name [sym] for the variable
+ * [var]. [var] must be an int variable and [sym] must be identical with [sym]
+ * used with CC_IPM().
+ *
+ * Use either CC_CLOBBER or CC_CLOBBER_LIST() for the clobber list. Use
+ * CC_CLOBBER if the clobber list contains only "cc", otherwise use
+ * CC_CLOBBER_LIST() and add all clobbers as argument to the macro.
+ *
+ * Use CC_TRANSFORM() to convert the variable [var] which contains the
+ * extracted condition code. If the condition code is extracted with ipm, the
+ * [var] also contains the program mask. CC_TRANSFORM() moves the condition
+ * code to the two least significant bits and sets all other bits to zero.
+ */
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_GCC_ASM_FLAG_OUTPUT_BROKEN))
+
+#define __HAVE_ASM_FLAG_OUTPUTS__
+
+#define CC_IPM(sym)
+#define CC_OUT(sym, var) "=@cc" (var)
+#define CC_TRANSFORM(cc) ({ cc; })
+#define CC_CLOBBER
+#define CC_CLOBBER_LIST(...) __VA_ARGS__
+
+#else
+
+#define CC_IPM(sym) " ipm %[" __stringify(sym) "]\n"
+#define CC_OUT(sym, var) [sym] "=d" (var)
+#define CC_TRANSFORM(cc) ({ (cc) >> 28; })
+#define CC_CLOBBER "cc"
+#define CC_CLOBBER_LIST(...) "cc", __VA_ARGS__
+
+#endif
+
+#endif /* _ASM_S390_ASM_H */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 0c4cad7d5a5b..6723fca64018 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -72,14 +72,24 @@ ATOMIC_OPS(xor)
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
+{
+ return arch_xchg(&v->counter, new);
+}
+#define arch_atomic_xchg arch_atomic_xchg
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
- return __atomic_cmpxchg(&v->counter, old, new);
+ return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ return arch_try_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+
#define ATOMIC64_INIT(i) { (i) }
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
@@ -112,14 +122,24 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
}
#define arch_atomic64_add arch_atomic64_add
-#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
+static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
+{
+ return arch_xchg(&v->counter, new);
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- return __atomic64_cmpxchg((long *)&v->counter, old, new);
+ return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ return arch_try_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+
#define ATOMIC64_OPS(op) \
static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 65380da9e75f..1d6b2056fad8 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -169,79 +169,4 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#endif /* MARCH_HAS_Z196_FEATURES */
-static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
-{
- asm volatile(
- " cs %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old;
-}
-
-static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
-{
- asm volatile(
- " csg %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+QS" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old;
-}
-
-/* GCC versions before 14.2.0 may die with an ICE in some configurations. */
-#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_IS_GCC) && (GCC_VERSION < 140200))
-
-static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
-{
- int cc;
-
- asm volatile(
- " cs %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr), "=@cc" (cc)
- : [new] "d" (new)
- : "memory");
- return cc == 0;
-}
-
-static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
-{
- int cc;
-
- asm volatile(
- " csg %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+QS" (*ptr), "=@cc" (cc)
- : [new] "d" (new)
- : "memory");
- return cc == 0;
-}
-
-#else /* __GCC_ASM_FLAG_OUTPUTS__ */
-
-static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
-{
- int old_expected = old;
-
- asm volatile(
- " cs %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old == old_expected;
-}
-
-static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
-{
- long old_expected = old;
-
- asm volatile(
- " csg %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+QS" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old == old_expected;
-}
-
-#endif /* __GCC_ASM_FLAG_OUTPUTS__ */
-
#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index aae0315374de..a9e2006033b7 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,185 +11,231 @@
#include <linux/mmdebug.h>
#include <linux/types.h>
#include <linux/bug.h>
+#include <asm/asm.h>
-void __xchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
+{
+ asm volatile(
+ " cs %[old],%[new],%[ptr]\n"
+ : [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr)
+ : [new] "d" (new)
+ : "memory", "cc");
+ return old;
+}
+
+static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new)
+{
+ asm volatile(
+ " csg %[old],%[new],%[ptr]\n"
+ : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr)
+ : [new] "d" (new)
+ : "memory", "cc");
+ return old;
+}
-static __always_inline unsigned long
-__arch_xchg(unsigned long x, unsigned long address, int size)
+static inline u8 __arch_cmpxchg1(u64 ptr, u8 old, u8 new)
{
- unsigned long old;
- int shift;
+ union {
+ u8 b[4];
+ u32 w;
+ } old32, new32;
+ u32 prev;
+ int i;
+
+ i = ptr & 3;
+ ptr &= ~0x3;
+ prev = READ_ONCE(*(u32 *)ptr);
+ do {
+ old32.w = prev;
+ if (old32.b[i] != old)
+ return old32.b[i];
+ new32.w = old32.w;
+ new32.b[i] = new;
+ prev = __cs_asm(ptr, old32.w, new32.w);
+ } while (prev != old32.w);
+ return old;
+}
+
+static inline u16 __arch_cmpxchg2(u64 ptr, u16 old, u16 new)
+{
+ union {
+ u16 b[2];
+ u32 w;
+ } old32, new32;
+ u32 prev;
+ int i;
+
+ i = (ptr & 3) >> 1;
+ ptr &= ~0x3;
+ prev = READ_ONCE(*(u32 *)ptr);
+ do {
+ old32.w = prev;
+ if (old32.b[i] != old)
+ return old32.b[i];
+ new32.w = old32.w;
+ new32.b[i] = new;
+ prev = __cs_asm(ptr, old32.w, new32.w);
+ } while (prev != old32.w);
+ return old;
+}
+static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
+{
switch (size) {
- case 1:
- shift = (3 ^ (address & 3)) << 3;
- address ^= address & 3;
- asm volatile(
- " l %0,%1\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) address)
- : "d" ((x & 0xff) << shift), "d" (~(0xff << shift))
- : "memory", "cc", "0");
- return old >> shift;
- case 2:
- shift = (2 ^ (address & 2)) << 3;
- address ^= address & 2;
- asm volatile(
- " l %0,%1\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) address)
- : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift))
- : "memory", "cc", "0");
- return old >> shift;
- case 4:
- asm volatile(
- " l %0,%1\n"
- "0: cs %0,%2,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) address)
- : "d" (x)
- : "memory", "cc");
- return old;
- case 8:
- asm volatile(
- " lg %0,%1\n"
- "0: csg %0,%2,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+QS" (*(long *) address)
- : "d" (x)
- : "memory", "cc");
- return old;
+ case 1: return __arch_cmpxchg1(ptr, old & 0xff, new & 0xff);
+ case 2: return __arch_cmpxchg2(ptr, old & 0xffff, new & 0xffff);
+ case 4: return __cs_asm(ptr, old & 0xffffffff, new & 0xffffffff);
+ case 8: return __csg_asm(ptr, old, new);
+ default: __cmpxchg_called_with_bad_pointer();
}
- __xchg_called_with_bad_pointer();
- return x;
+ return old;
}
-#define arch_xchg(ptr, x) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
- __typeof__(*(ptr)) __ret; \
+ (__typeof__(*(ptr)))__arch_cmpxchg((unsigned long)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+
+#define arch_cmpxchg64 arch_cmpxchg
+#define arch_cmpxchg_local arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg
+
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+
+#define arch_try_cmpxchg(ptr, oldp, new) \
+({ \
+ __typeof__(ptr) __oldp = (__typeof__(ptr))(oldp); \
+ __typeof__(*(ptr)) __old = *__oldp; \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __prev; \
+ int __cc; \
\
- __ret = (__typeof__(*(ptr))) \
- __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \
- sizeof(*(ptr))); \
- __ret; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ case 2: { \
+ __prev = arch_cmpxchg((ptr), (__old), (__new)); \
+ __cc = (__prev != __old); \
+ if (unlikely(__cc)) \
+ *__oldp = __prev; \
+ break; \
+ } \
+ case 4: { \
+ asm volatile( \
+ " cs %[__old],%[__new],%[__ptr]\n" \
+ : [__old] "+d" (*__oldp), \
+ [__ptr] "+Q" (*(ptr)), \
+ "=@cc" (__cc) \
+ : [__new] "d" (__new) \
+ : "memory"); \
+ break; \
+ } \
+ case 8: { \
+ asm volatile( \
+ " csg %[__old],%[__new],%[__ptr]\n" \
+ : [__old] "+d" (*__oldp), \
+ [__ptr] "+QS" (*(ptr)), \
+ "=@cc" (__cc) \
+ : [__new] "d" (__new) \
+ : "memory"); \
+ break; \
+ } \
+ default: \
+ __cmpxchg_called_with_bad_pointer(); \
+ } \
+ likely(__cc == 0); \
})
-void __cmpxchg_called_with_bad_pointer(void);
+#else /* __HAVE_ASM_FLAG_OUTPUTS__ */
-static __always_inline unsigned long __cmpxchg(unsigned long address,
- unsigned long old,
- unsigned long new, int size)
+#define arch_try_cmpxchg(ptr, oldp, new) \
+({ \
+ __typeof__((ptr)) __oldp = (__typeof__(ptr))(oldp); \
+ __typeof__(*(ptr)) __old = *__oldp; \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __prev; \
+ \
+ __prev = arch_cmpxchg((ptr), (__old), (__new)); \
+ if (unlikely(__prev != __old)) \
+ *__oldp = __prev; \
+ likely(__prev == __old); \
+})
+
+#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
+
+#define arch_try_cmpxchg64 arch_try_cmpxchg
+#define arch_try_cmpxchg_local arch_try_cmpxchg
+#define arch_try_cmpxchg64_local arch_try_cmpxchg
+
+void __xchg_called_with_bad_pointer(void);
+
+static inline u8 __arch_xchg1(u64 ptr, u8 x)
+{
+ int shift = (3 ^ (ptr & 3)) << 3;
+ u32 mask, old, new;
+
+ ptr &= ~0x3;
+ mask = ~(0xff << shift);
+ old = READ_ONCE(*(u32 *)ptr);
+ do {
+ new = old & mask;
+ new |= x << shift;
+ } while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
+ return old >> shift;
+}
+
+static inline u16 __arch_xchg2(u64 ptr, u16 x)
+{
+ int shift = (2 ^ (ptr & 2)) << 3;
+ u32 mask, old, new;
+
+ ptr &= ~0x3;
+ mask = ~(0xffff << shift);
+ old = READ_ONCE(*(u32 *)ptr);
+ do {
+ new = old & mask;
+ new |= x << shift;
+ } while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
+ return old >> shift;
+}
+
+static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size)
{
switch (size) {
- case 1: {
- unsigned int prev, shift, mask;
-
- shift = (3 ^ (address & 3)) << 3;
- address ^= address & 3;
- old = (old & 0xff) << shift;
- new = (new & 0xff) << shift;
- mask = ~(0xff << shift);
- asm volatile(
- " l %[prev],%[address]\n"
- " nr %[prev],%[mask]\n"
- " xilf %[mask],0xffffffff\n"
- " or %[new],%[prev]\n"
- " or %[prev],%[tmp]\n"
- "0: lr %[tmp],%[prev]\n"
- " cs %[prev],%[new],%[address]\n"
- " jnl 1f\n"
- " xr %[tmp],%[prev]\n"
- " xr %[new],%[tmp]\n"
- " nr %[tmp],%[mask]\n"
- " jz 0b\n"
- "1:"
- : [prev] "=&d" (prev),
- [address] "+Q" (*(int *)address),
- [tmp] "+&d" (old),
- [new] "+&d" (new),
- [mask] "+&d" (mask)
- :: "memory", "cc");
- return prev >> shift;
- }
- case 2: {
- unsigned int prev, shift, mask;
-
- shift = (2 ^ (address & 2)) << 3;
- address ^= address & 2;
- old = (old & 0xffff) << shift;
- new = (new & 0xffff) << shift;
- mask = ~(0xffff << shift);
- asm volatile(
- " l %[prev],%[address]\n"
- " nr %[prev],%[mask]\n"
- " xilf %[mask],0xffffffff\n"
- " or %[new],%[prev]\n"
- " or %[prev],%[tmp]\n"
- "0: lr %[tmp],%[prev]\n"
- " cs %[prev],%[new],%[address]\n"
- " jnl 1f\n"
- " xr %[tmp],%[prev]\n"
- " xr %[new],%[tmp]\n"
- " nr %[tmp],%[mask]\n"
- " jz 0b\n"
- "1:"
- : [prev] "=&d" (prev),
- [address] "+Q" (*(int *)address),
- [tmp] "+&d" (old),
- [new] "+&d" (new),
- [mask] "+&d" (mask)
- :: "memory", "cc");
- return prev >> shift;
- }
+ case 1:
+ return __arch_xchg1(ptr, x & 0xff);
+ case 2:
+ return __arch_xchg2(ptr, x & 0xffff);
case 4: {
- unsigned int prev = old;
-
- asm volatile(
- " cs %[prev],%[new],%[address]\n"
- : [prev] "+&d" (prev),
- [address] "+Q" (*(int *)address)
- : [new] "d" (new)
- : "memory", "cc");
- return prev;
+ u32 old = READ_ONCE(*(u32 *)ptr);
+
+ do {
+ } while (!arch_try_cmpxchg((u32 *)ptr, &old, x & 0xffffffff));
+ return old;
}
case 8: {
- unsigned long prev = old;
-
- asm volatile(
- " csg %[prev],%[new],%[address]\n"
- : [prev] "+&d" (prev),
- [address] "+QS" (*(long *)address)
- : [new] "d" (new)
- : "memory", "cc");
- return prev;
+ u64 old = READ_ONCE(*(u64 *)ptr);
+
+ do {
+ } while (!arch_try_cmpxchg((u64 *)ptr, &old, x));
+ return old;
}
}
- __cmpxchg_called_with_bad_pointer();
- return old;
+ __xchg_called_with_bad_pointer();
+ return x;
}
-#define arch_cmpxchg(ptr, o, n) \
+#define arch_xchg(ptr, x) \
({ \
- __typeof__(*(ptr)) __ret; \
- \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- __ret; \
+ (__typeof__(*(ptr)))__arch_xchg((unsigned long)(ptr), \
+ (unsigned long)(x), \
+ sizeof(*(ptr))); \
})
-#define arch_cmpxchg64 arch_cmpxchg
-#define arch_cmpxchg_local arch_cmpxchg
-#define arch_cmpxchg64_local arch_cmpxchg
-
#define system_has_cmpxchg128() 1
static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
@@ -203,5 +249,25 @@ static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 n
}
#define arch_cmpxchg128 arch_cmpxchg128
+#define arch_cmpxchg128_local arch_cmpxchg128
+
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+
+static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new)
+{
+ int cc;
+
+ asm volatile(
+ " cdsg %[old],%[new],%[ptr]\n"
+ : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc)
+ : [new] "d" (new)
+ : "memory");
+ return likely(cc == 0);
+}
+
+#define arch_try_cmpxchg128 arch_try_cmpxchg128
+#define arch_try_cmpxchg128_local arch_try_cmpxchg128
+
+#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 1d3a4b0c650f..59ab1192e2d5 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -56,6 +56,8 @@
#define CPACF_KM_PXTS_256 0x3c
#define CPACF_KM_XTS_128_FULL 0x52
#define CPACF_KM_XTS_256_FULL 0x54
+#define CPACF_KM_PXTS_128_FULL 0x5a
+#define CPACF_KM_PXTS_256_FULL 0x5c
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 9e4bbc3e53f8..e1a279e0d6a6 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -13,6 +13,7 @@
#include <linux/kmsan-checks.h>
#include <asm/asm-extable.h>
#include <asm/facility.h>
+#include <asm/asm.h>
asm(".include \"asm/cpu_mf-insn.h\"\n");
@@ -185,11 +186,12 @@ static inline int lcctl(u64 ctl)
int cc;
asm volatile (
- " lcctl %1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc) : "Q" (ctl) : "cc");
- return cc;
+ " lcctl %[ctl]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [ctl] "Q" (ctl)
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
/* Extract CPU counter */
@@ -199,12 +201,13 @@ static inline int __ecctr(u64 ctr, u64 *content)
int cc;
asm volatile (
- " ecctr %0,%2\n"
- " ipm %1\n"
- " srl %1,28\n"
- : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc");
+ " ecctr %[_content],%[ctr]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [_content] "=d" (_content)
+ : [ctr] "d" (ctr)
+ : CC_CLOBBER);
*content = _content;
- return cc;
+ return CC_TRANSFORM(cc);
}
/* Extract CPU counter */
@@ -234,18 +237,17 @@ static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
int cc;
asm volatile (
- " STCCTM %2,%3,%1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "Q" (*dest), "d" (range), "i" (set)
- : "cc", "memory");
+ " STCCTM %[range],%[set],%[dest]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [dest] "Q" (*dest), [range] "d" (range), [set] "i" (set)
+ : CC_CLOBBER_LIST("memory"));
/*
* If cc == 2, less than RANGE counters are stored, but it's not easy
* to tell how many. Always unpoison the whole range for simplicity.
*/
kmsan_unpoison_memory(dest, range * sizeof(u64));
- return cc;
+ return CC_TRANSFORM(cc);
}
/* Query sampling information */
@@ -265,19 +267,20 @@ static inline int qsi(struct hws_qsi_info_block *info)
/* Load sampling controls */
static inline int lsctl(struct hws_lsctl_request_block *req)
{
- int cc;
+ int cc, exception;
- cc = 1;
+ exception = 1;
asm volatile(
- "0: lsctl 0(%1)\n"
- "1: ipm %0\n"
- " srl %0,28\n"
+ "0: lsctl %[req]\n"
+ "1: lhi %[exc],0\n"
"2:\n"
+ CC_IPM(cc)
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
- : "+d" (cc), "+a" (req)
- : "m" (*req)
- : "cc", "memory");
-
- return cc ? -EINVAL : 0;
+ : CC_OUT(cc, cc), [exc] "+d" (exception)
+ : [req] "Q" (*req)
+ : CC_CLOBBER);
+ if (exception || CC_TRANSFORM(cc))
+ return -EINVAL;
+ return 0;
}
#endif /* _ASM_S390_CPU_MF_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 715bcf8fb69a..5f5b1aa6c233 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -88,7 +88,7 @@ static __always_inline bool test_facility(unsigned long nr)
return __test_facility(nr, &stfle_fac_list);
}
-static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
+static inline unsigned long __stfle_asm(u64 *fac_list, int size)
{
unsigned long reg0 = size - 1;
@@ -96,7 +96,7 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
" lgr 0,%[reg0]\n"
" .insn s,0xb2b00000,%[list]\n" /* stfle */
" lgr %[reg0],0\n"
- : [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list)
+ : [reg0] "+&d" (reg0), [list] "+Q" (*fac_list)
:
: "memory", "cc", "0");
return reg0;
@@ -104,10 +104,10 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
/**
* stfle - Store facility list extended
- * @stfle_fac_list: array where facility list can be stored
+ * @fac_list: array where facility list can be stored
* @size: size of passed in array in double words
*/
-static inline void __stfle(u64 *stfle_fac_list, int size)
+static inline void __stfle(u64 *fac_list, int size)
{
unsigned long nr;
u32 stfl_fac_list;
@@ -116,20 +116,20 @@ static inline void __stfle(u64 *stfle_fac_list, int size)
" stfl 0(0)\n"
: "=m" (get_lowcore()->stfl_fac_list));
stfl_fac_list = get_lowcore()->stfl_fac_list;
- memcpy(stfle_fac_list, &stfl_fac_list, 4);
+ memcpy(fac_list, &stfl_fac_list, 4);
nr = 4; /* bytes stored by stfl */
if (stfl_fac_list & 0x01000000) {
/* More facility bits available with stfle */
- nr = __stfle_asm(stfle_fac_list, size);
+ nr = __stfle_asm(fac_list, size);
nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
}
- memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+ memset((char *)fac_list + nr, 0, size * 8 - nr);
}
-static inline void stfle(u64 *stfle_fac_list, int size)
+static inline void stfle(u64 *fac_list, int size)
{
preempt_disable();
- __stfle(stfle_fac_list, size);
+ __stfle(fac_list, size);
preempt_enable();
}
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 406746666eb7..fc97d75dc752 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -51,13 +51,11 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
-struct ftrace_regs {
- struct pt_regs regs;
-};
+#include <linux/ftrace_regs.h>
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
{
- struct pt_regs *regs = &fregs->regs;
+ struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS))
return regs;
@@ -81,32 +79,13 @@ static __always_inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-static __always_inline unsigned long
-ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
-{
- return fregs->regs.psw.addr;
-}
-
static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
unsigned long ip)
{
- fregs->regs.psw.addr = ip;
+ arch_ftrace_regs(fregs)->regs.psw.addr = ip;
}
-#define ftrace_regs_get_argument(fregs, n) \
- regs_get_kernel_argument(&(fregs)->regs, n)
-#define ftrace_regs_get_stack_pointer(fregs) \
- kernel_stack_pointer(&(fregs)->regs)
-#define ftrace_regs_return_value(fregs) \
- regs_return_value(&(fregs)->regs)
-#define ftrace_regs_set_return_value(fregs, ret) \
- regs_set_return_value(&(fregs)->regs, ret)
-#define ftrace_override_function_with_return(fregs) \
- override_function_with_return(&(fregs)->regs)
-#define ftrace_regs_query_register_offset(name) \
- regs_query_register_offset(name)
-
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* When an ftrace registered caller is tracing a function that is
@@ -117,7 +96,7 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
*/
static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
- struct pt_regs *regs = &fregs->regs;
+ struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
regs->orig_gpr2 = addr;
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 9725586f4259..64761c78f774 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -107,9 +107,6 @@ void gmap_remove(struct gmap *gmap);
struct gmap *gmap_get(struct gmap *gmap);
void gmap_put(struct gmap *gmap);
-void gmap_enable(struct gmap *gmap);
-void gmap_disable(struct gmap *gmap);
-struct gmap *gmap_get_enabled(void);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 0fbc992d7a5e..fc9933a743d6 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -16,8 +16,10 @@
#include <asm/pci_io.h>
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
void *xlate_dev_mem_ptr(phys_addr_t phys);
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 1bd08eb56d5f..9084b750350d 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -94,6 +94,9 @@ void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
+
+bool is_kdump_kernel(void);
+#define is_kdump_kernel is_kdump_kernel
#endif
#ifdef CONFIG_KEXEC_FILE
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8e77afbed58e..51201b4ac93a 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -527,6 +527,9 @@ struct kvm_vcpu_stat {
#define PGM_REGION_FIRST_TRANS 0x39
#define PGM_REGION_SECOND_TRANS 0x3a
#define PGM_REGION_THIRD_TRANS 0x3b
+#define PGM_SECURE_STORAGE_ACCESS 0x3d
+#define PGM_NON_SECURE_STORAGE_ACCESS 0x3e
+#define PGM_SECURE_STORAGE_VIOLATION 0x3f
#define PGM_MONITOR 0x40
#define PGM_PER 0x80
#define PGM_CRYPTO_OPERATION 0x119
@@ -747,8 +750,6 @@ struct kvm_vcpu_arch {
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
struct gmap *gmap;
- /* backup location for the currently enabled gmap when scheduled out */
- struct gmap *enabled_gmap;
struct kvm_guestdbg_info_arch guestdbg;
unsigned long pfault_token;
unsigned long pfault_select;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 48c64716d1f2..42a092fa1029 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -165,8 +165,7 @@ struct lowcore {
__u64 percpu_offset; /* 0x03b8 */
__u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */
__u64 machine_flags; /* 0x03c8 */
- __u64 gmap; /* 0x03d0 */
- __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
+ __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */
__u32 return_lpswe; /* 0x0400 */
__u32 return_mcck_lpswe; /* 0x0404 */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 16d62a4ecccc..b13a46e2e931 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -10,15 +10,10 @@
#include <linux/const.h>
#include <asm/types.h>
+#include <asm/asm.h>
-#define _PAGE_SHIFT CONFIG_PAGE_SHIFT
-#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT)
-#define _PAGE_MASK (~(_PAGE_SIZE - 1))
+#include <vdso/page.h>
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT _PAGE_SHIFT
-#define PAGE_SIZE _PAGE_SIZE
-#define PAGE_MASK _PAGE_MASK
#define PAGE_DEFAULT_ACC _AC(0, UL)
/* storage-protection override */
#define PAGE_SPO_ACC 9
@@ -148,11 +143,12 @@ static inline int page_reset_referenced(unsigned long addr)
int cc;
asm volatile(
- " rrbe 0,%1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc) : "a" (addr) : "cc");
- return cc;
+ " rrbe 0,%[addr]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [addr] "a" (addr)
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
/* Bits int the storage key */
diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h
index 25f2077ba3c9..ebeabd0aaa51 100644
--- a/arch/s390/include/asm/pai.h
+++ b/arch/s390/include/asm/pai.h
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
#include <asm/lowcore.h>
#include <asm/ptrace.h>
+#include <asm/asm.h>
struct qpaci_info_block {
u64 header;
@@ -33,12 +34,11 @@ static inline int qpaci(struct qpaci_info_block *info)
" lgr 0,%[size]\n"
" .insn s,0xb28f0000,%[info]\n"
" lgr %[size],0\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [info] "=Q" (*info), [size] "+&d" (size)
:
- : "0", "cc", "memory");
- return cc ? (size + 1) * sizeof(u64) : 0;
+ : CC_CLOBBER_LIST("0", "memory"));
+ return CC_TRANSFORM(cc) ? (size + 1) * sizeof(u64) : 0;
}
#define PAI_CRYPTO_BASE 0x1000 /* First event number */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 9d920ced6047..5013a690837e 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -107,9 +107,10 @@ struct zpci_bus {
struct list_head resources;
struct list_head bus_next;
struct resource bus_resource;
- int pchid;
+ int topo; /* TID if topo_is_tid, PCHID otherwise */
int domain_nr;
- bool multifunction;
+ u8 multifunction : 1;
+ u8 topo_is_tid : 1;
enum pci_bus_speed max_bus_speed;
};
@@ -130,9 +131,12 @@ struct zpci_dev {
u16 vfn; /* virtual function number */
u16 pchid; /* physical channel ID */
u16 maxstbl; /* Maximum store block size */
+ u16 rid; /* RID as supplied by firmware */
+ u16 tid; /* Topology for which RID is valid */
u8 pfgid; /* function group ID */
u8 pft; /* pci function type */
u8 port;
+ u8 fidparm;
u8 dtsm; /* Supported DT mask */
u8 rid_available : 1;
u8 has_hp_slot : 1;
@@ -140,7 +144,8 @@ struct zpci_dev {
u8 is_physfn : 1;
u8 util_str_avail : 1;
u8 irqs_registered : 1;
- u8 reserved : 2;
+ u8 tid_avail : 1;
+ u8 reserved : 1;
unsigned int devfn; /* DEVFN part of the RID*/
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
@@ -210,12 +215,14 @@ extern struct airq_iv *zpci_aif_sbv;
----------------------------------------------------------------------------- */
/* Base stuff */
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
+int zpci_add_device(struct zpci_dev *zdev);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
int zpci_deconfigure_device(struct zpci_dev *zdev);
void zpci_device_reserved(struct zpci_dev *zdev);
bool zpci_is_device_configured(struct zpci_dev *zdev);
+int zpci_scan_devices(void);
int zpci_hot_reset_device(struct zpci_dev *zdev);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *);
@@ -225,7 +232,7 @@ void zpci_update_fh(struct zpci_dev *zdev, u32 fh);
/* CLP */
int clp_setup_writeback_mio(void);
-int clp_scan_pci_devices(void);
+int clp_scan_pci_devices(struct list_head *scan_list);
int clp_query_pci_fn(struct zpci_dev *zdev);
int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index f0c677ddd270..3fff2f7095c8 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -110,7 +110,8 @@ struct clp_req_query_pci {
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u16 vfn; /* virtual fn number */
- u16 : 3;
+ u16 : 2;
+ u16 tid_avail : 1;
u16 rid_avail : 1;
u16 is_physfn : 1;
u16 reserved1 : 1;
@@ -122,16 +123,18 @@ struct clp_rsp_query_pci {
u16 pchid;
__le32 bar[PCI_STD_NUM_BARS];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
- u16 : 12;
- u16 port : 4;
+ u8 fidparm;
+ u8 reserved3 : 4;
+ u8 port : 4;
u8 fmb_len;
u8 pft; /* pci function type */
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
#define ZPCI_RID_MASK_DEVFN 0x00ff
u16 rid; /* BUS/DEVFN PCI address */
- u16 reserved0;
- u32 reserved[10];
+ u32 reserved0;
+ u16 tid;
+ u32 reserved[9];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
u32 reserved2[16];
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 2686bee800e3..43a5ea4ee20f 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -143,7 +143,7 @@ static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
- unsigned long n)
+ size_t n)
{
int size, rc = 0;
@@ -162,7 +162,7 @@ static inline int zpci_memcpy_fromio(void *dst,
}
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
- const void *src, unsigned long n)
+ const void *src, size_t n)
{
int size, rc = 0;
@@ -187,7 +187,7 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
}
static inline int zpci_memset_io(volatile void __iomem *dst,
- unsigned char val, size_t count)
+ int val, size_t count)
{
u8 *src = kmalloc(count, GFP_KERNEL);
int rc;
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 66200d4a2134..e53894cedf08 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -37,9 +37,9 @@ extern ssize_t cpumf_events_sysfs_show(struct device *dev,
/* Perf callbacks */
struct pt_regs;
-extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
+extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_arch_misc_flags(struct pt_regs *regs);
+#define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs)
#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
/* Perf pt_regs extension for sample-data-entry indicators */
@@ -49,6 +49,7 @@ struct perf_sf_sde_regs {
};
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
+ (regs)->psw.mask = 0; \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain); \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0ffbaf741955..8b67036edb69 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -338,7 +338,7 @@ static inline int is_module_addr(void *addr)
#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
-#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
+#define _PAGE_INDEX (0xffUL << PAGE_SHIFT)
#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
index f45cfc8bc233..51b68a43e195 100644
--- a/arch/s390/include/asm/physmem_info.h
+++ b/arch/s390/include/asm/physmem_info.h
@@ -9,6 +9,7 @@ enum physmem_info_source {
MEM_DETECT_NONE = 0,
MEM_DETECT_SCLP_STOR_INFO,
MEM_DETECT_DIAG260,
+ MEM_DETECT_DIAG500_STOR_LIMIT,
MEM_DETECT_SCLP_READ_INFO,
MEM_DETECT_BIN_SEARCH
};
@@ -107,6 +108,8 @@ static inline const char *get_physmem_info_source(void)
return "sclp storage info";
case MEM_DETECT_DIAG260:
return "diag260";
+ case MEM_DETECT_DIAG500_STOR_LIMIT:
+ return "diag500 storage limit";
case MEM_DETECT_SCLP_READ_INFO:
return "sclp read info";
case MEM_DETECT_BIN_SEARCH:
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index deca3f221836..0cde7e240373 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -5,6 +5,7 @@
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
+#include <asm/cmpxchg.h>
#include <asm/march.h>
#ifdef MARCH_HAS_Z196_FEATURES
@@ -22,12 +23,10 @@ static __always_inline void preempt_count_set(int pc)
{
int old, new;
+ old = READ_ONCE(get_lowcore()->preempt_count);
do {
- old = READ_ONCE(get_lowcore()->preempt_count);
- new = (old & PREEMPT_NEED_RESCHED) |
- (pc & ~PREEMPT_NEED_RESCHED);
- } while (__atomic_cmpxchg(&get_lowcore()->preempt_count,
- old, new) != old);
+ new = (old & PREEMPT_NEED_RESCHED) | (pc & ~PREEMPT_NEED_RESCHED);
+ } while (!arch_try_cmpxchg(&get_lowcore()->preempt_count, &old, new));
}
static __always_inline void set_preempt_need_resched(void)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9a5236acc0a8..8761fd01a9f0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -39,6 +39,7 @@
#include <asm/runtime_instr.h>
#include <asm/irqflags.h>
#include <asm/alternative.h>
+#include <asm/fault.h>
struct pcpu {
unsigned long ec_mask; /* bit mask for ec_xxx functions */
@@ -187,10 +188,8 @@ struct thread_struct {
unsigned long hardirq_timer; /* task cputime in hardirq context */
unsigned long softirq_timer; /* task cputime in softirq context */
const sys_call_ptr_t *sys_call_table; /* system call table address */
- unsigned long gmap_addr; /* address of last gmap fault. */
- unsigned int gmap_write_flag; /* gmap fault write indication */
+ union teid gmap_teid; /* address and flags of last gmap fault */
unsigned int gmap_int_code; /* int code of last gmap fault */
- unsigned int gmap_pfault; /* signal of a pending guest pfault */
int ufpu_flags; /* user fpu flags */
int kfpu_flags; /* kernel fpu flags */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 2ad9324f6338..788bc4467445 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -14,11 +14,13 @@
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */
#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
+#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */
#define _PIF_SYSCALL BIT(PIF_SYSCALL)
#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART)
#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
+#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)
#define PSW32_MASK_PER _AC(0x40000000, UL)
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index 06fbabe2f66c..cb4cc0f59012 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -62,5 +62,6 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
#endif
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index edee63da08e7..472943b77066 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -38,6 +38,8 @@
#ifndef __ASSEMBLY__
+#include <asm/asm.h>
+
static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
u32 *status)
{
@@ -46,13 +48,12 @@ static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
asm volatile(
" sigp %[r1],%[addr],0(%[order])\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (cc), [r1] "+&d" (r1.pair)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [r1] "+d" (r1.pair)
: [addr] "d" (addr), [order] "a" (order)
- : "cc");
+ : CC_CLOBBER);
*status = r1.even;
- return cc;
+ return CC_TRANSFORM(cc);
}
static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index c549893602ea..668dfc5de538 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -2,7 +2,23 @@
#ifndef _ASM_S390_SPARSEMEM_H
#define _ASM_S390_SPARSEMEM_H
-#define SECTION_SIZE_BITS 28
+#define SECTION_SIZE_BITS 27
#define MAX_PHYSMEM_BITS CONFIG_MAX_PHYSMEM_BITS
+#ifdef CONFIG_NUMA
+
+static inline int memory_add_physaddr_to_nid(u64 addr)
+{
+ return 0;
+}
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+
+static inline int phys_to_target_node(u64 start)
+{
+ return 0;
+}
+#define phys_to_target_node phys_to_target_node
+
+#endif /* CONFIG_NUMA */
+
#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 77d5e804af93..ac868a9bb0d1 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -57,8 +57,10 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
+ int old = 0;
+
barrier();
- return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
+ return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -118,7 +120,9 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
+ int old = 0;
+
+ if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000))
arch_write_lock_wait(rw);
}
@@ -133,8 +137,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
int old;
old = READ_ONCE(rw->cnts);
- return (!(old & 0xffff0000) &&
- __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
+ return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1));
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -142,7 +145,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
int old;
old = READ_ONCE(rw->cnts);
- return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
+ return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000);
}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 640901f2fbc3..8fe56456feab 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -13,6 +13,7 @@
#include <linux/preempt.h>
#include <linux/time64.h>
#include <asm/lowcore.h>
+#include <asm/asm.h>
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -44,11 +45,12 @@ static inline int set_tod_clock(__u64 time)
int cc;
asm volatile(
- " sck %1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc) : "Q" (time) : "cc");
- return cc;
+ " sck %[time]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [time] "Q" (time)
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
static inline int store_tod_clock_ext_cc(union tod_clock *clk)
@@ -56,11 +58,12 @@ static inline int store_tod_clock_ext_cc(union tod_clock *clk)
int cc;
asm volatile(
- " stcke %1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc), "=Q" (*clk) : : "cc");
- return cc;
+ " stcke %[clk]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [clk] "=Q" (*clk)
+ :
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
static __always_inline void store_tod_clock_ext(union tod_clock *tod)
@@ -149,12 +152,11 @@ struct ptff_qui {
" lgr 0,%[reg0]\n" \
" lgr 1,%[reg1]\n" \
" ptff\n" \
- " ipm %[rc]\n" \
- " srl %[rc],28\n" \
- : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \
+ CC_IPM(rc) \
+ : CC_OUT(rc, rc), "+m" (*(struct addrtype *)reg1) \
: [reg0] "d" (reg0), [reg1] "d" (reg1) \
- : "cc", "0", "1"); \
- rc; \
+ : CC_CLOBBER_LIST("0", "1")); \
+ CC_TRANSFORM(rc); \
})
static inline unsigned long local_tick_disable(void)
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 153d93468b77..dc332609f2c3 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -2,7 +2,7 @@
/*
* Ultravisor Interfaces
*
- * Copyright IBM Corp. 2019, 2022
+ * Copyright IBM Corp. 2019, 2024
*
* Author(s):
* Vasily Gorbik <gor@linux.ibm.com>
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/gmap.h>
+#include <asm/asm.h>
#define UVC_CC_OK 0
#define UVC_CC_ERROR 1
@@ -28,9 +29,11 @@
#define UVC_RC_INV_STATE 0x0003
#define UVC_RC_INV_LEN 0x0005
#define UVC_RC_NO_RESUME 0x0007
+#define UVC_RC_MORE_DATA 0x0100
#define UVC_RC_NEED_DESTROY 0x8000
#define UVC_CMD_QUI 0x0001
+#define UVC_CMD_QUERY_KEYS 0x0002
#define UVC_CMD_INIT_UV 0x000f
#define UVC_CMD_CREATE_SEC_CONF 0x0100
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
@@ -61,6 +64,7 @@
#define UVC_CMD_ADD_SECRET 0x1031
#define UVC_CMD_LIST_SECRETS 0x1033
#define UVC_CMD_LOCK_SECRETS 0x1034
+#define UVC_CMD_RETR_SECRET 0x1035
/* Bits in installed uv calls */
enum uv_cmds_inst {
@@ -94,6 +98,8 @@ enum uv_cmds_inst {
BIT_UVC_CMD_ADD_SECRET = 29,
BIT_UVC_CMD_LIST_SECRETS = 30,
BIT_UVC_CMD_LOCK_SECRETS = 31,
+ BIT_UVC_CMD_RETR_SECRET = 33,
+ BIT_UVC_CMD_QUERY_KEYS = 34,
};
enum uv_feat_ind {
@@ -140,11 +146,27 @@ struct uv_cb_qui {
u64 reservedf0; /* 0x00f0 */
u64 supp_add_secret_req_ver; /* 0x00f8 */
u64 supp_add_secret_pcf; /* 0x0100 */
- u64 supp_secret_types; /* 0x0180 */
- u16 max_secrets; /* 0x0110 */
- u8 reserved112[0x120 - 0x112]; /* 0x0112 */
+ u64 supp_secret_types; /* 0x0108 */
+ u16 max_assoc_secrets; /* 0x0110 */
+ u16 max_retr_secrets; /* 0x0112 */
+ u8 reserved114[0x120 - 0x114]; /* 0x0114 */
} __packed __aligned(8);
+struct uv_key_hash {
+ u64 dword[4];
+} __packed __aligned(8);
+
+#define UVC_QUERY_KEYS_IDX_HK 0
+#define UVC_QUERY_KEYS_IDX_BACK_HK 1
+
+/* Query Ultravisor Keys */
+struct uv_cb_query_keys {
+ struct uv_cb_header header; /* 0x0000 */
+ u64 reserved08[3]; /* 0x0008 */
+ struct uv_key_hash key_hashes[15]; /* 0x0020 */
+} __packed __aligned(8);
+static_assert(sizeof(struct uv_cb_query_keys) == 0x200);
+
/* Initialize Ultravisor */
struct uv_cb_init {
struct uv_cb_header header;
@@ -317,7 +339,6 @@ struct uv_cb_dump_complete {
* A common UV call struct for pv guests that contains a single address
* Examples:
* Add Secret
- * List Secrets
*/
struct uv_cb_guest_addr {
struct uv_cb_header header;
@@ -326,18 +347,102 @@ struct uv_cb_guest_addr {
u64 reserved28[4];
} __packed __aligned(8);
+#define UVC_RC_RETR_SECR_BUF_SMALL 0x0109
+#define UVC_RC_RETR_SECR_STORE_EMPTY 0x010f
+#define UVC_RC_RETR_SECR_INV_IDX 0x0110
+#define UVC_RC_RETR_SECR_INV_SECRET 0x0111
+
+struct uv_cb_retr_secr {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u16 secret_idx;
+ u16 reserved1a;
+ u32 buf_size;
+ u64 buf_addr;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
+struct uv_cb_list_secrets {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u8 reserved18[6];
+ u16 start_idx;
+ u64 list_addr;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
+enum uv_secret_types {
+ UV_SECRET_INVAL = 0x0,
+ UV_SECRET_NULL = 0x1,
+ UV_SECRET_ASSOCIATION = 0x2,
+ UV_SECRET_PLAIN = 0x3,
+ UV_SECRET_AES_128 = 0x4,
+ UV_SECRET_AES_192 = 0x5,
+ UV_SECRET_AES_256 = 0x6,
+ UV_SECRET_AES_XTS_128 = 0x7,
+ UV_SECRET_AES_XTS_256 = 0x8,
+ UV_SECRET_HMAC_SHA_256 = 0x9,
+ UV_SECRET_HMAC_SHA_512 = 0xa,
+ /* 0x0b - 0x10 reserved */
+ UV_SECRET_ECDSA_P256 = 0x11,
+ UV_SECRET_ECDSA_P384 = 0x12,
+ UV_SECRET_ECDSA_P521 = 0x13,
+ UV_SECRET_ECDSA_ED25519 = 0x14,
+ UV_SECRET_ECDSA_ED448 = 0x15,
+};
+
+/**
+ * uv_secret_list_item_hdr - UV secret metadata.
+ * @index: Index of the secret in the secret list.
+ * @type: Type of the secret. See `enum uv_secret_types`.
+ * @length: Length of the stored secret.
+ */
+struct uv_secret_list_item_hdr {
+ u16 index;
+ u16 type;
+ u32 length;
+} __packed __aligned(8);
+
+#define UV_SECRET_ID_LEN 32
+/**
+ * uv_secret_list_item - UV secret entry.
+ * @hdr: The metadata of this secret.
+ * @id: The ID of this secret, not the secret itself.
+ */
+struct uv_secret_list_item {
+ struct uv_secret_list_item_hdr hdr;
+ u64 reserverd08;
+ u8 id[UV_SECRET_ID_LEN];
+} __packed __aligned(8);
+
+/**
+ * uv_secret_list - UV secret-metadata list.
+ * @num_secr_stored: Number of secrets stored in this list.
+ * @total_num_secrets: Number of secrets stored in the UV for this guest.
+ * @next_secret_idx: positive number if there are more secrets available or zero.
+ * @secrets: Up to 85 UV-secret metadata entries.
+ */
+struct uv_secret_list {
+ u16 num_secr_stored;
+ u16 total_num_secrets;
+ u16 next_secret_idx;
+ u16 reserved_06;
+ u64 reserved_08;
+ struct uv_secret_list_item secrets[85];
+} __packed __aligned(8);
+static_assert(sizeof(struct uv_secret_list) == PAGE_SIZE);
+
static inline int __uv_call(unsigned long r1, unsigned long r2)
{
int cc;
asm volatile(
- " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc)
+ " .insn rrf,0xb9a40000,%[r1],%[r2],0,0\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
: [r1] "a" (r1), [r2] "a" (r2)
- : "memory", "cc");
- return cc;
+ : CC_CLOBBER_LIST("memory"));
+ return CC_TRANSFORM(cc);
}
static inline int uv_call(unsigned long r1, unsigned long r2)
@@ -382,6 +487,48 @@ static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
return cc ? -EINVAL : 0;
}
+/**
+ * uv_list_secrets() - Do a List Secrets UVC.
+ *
+ * @buf: Buffer to write list into; size of one page.
+ * @start_idx: The smallest index that should be included in the list.
+ * For the fist invocation use 0.
+ * @rc: Pointer to store the return code or NULL.
+ * @rrc: Pointer to store the return reason code or NULL.
+ *
+ * This function calls the List Secrets UVC. The result is written into `buf`,
+ * that needs to be at least one page of writable memory.
+ * `buf` consists of:
+ * * %struct uv_secret_list_hdr
+ * * %struct uv_secret_list_item (multiple)
+ *
+ * For `start_idx` use _0_ for the first call. If there are more secrets available
+ * but could not fit into the page then `rc` is `UVC_RC_MORE_DATA`.
+ * In this case use `uv_secret_list_hdr.next_secret_idx` for `start_idx`.
+ *
+ * Context: might sleep.
+ *
+ * Return: The UVC condition code.
+ */
+static inline int uv_list_secrets(struct uv_secret_list *buf, u16 start_idx,
+ u16 *rc, u16 *rrc)
+{
+ struct uv_cb_list_secrets uvcb = {
+ .header.len = sizeof(uvcb),
+ .header.cmd = UVC_CMD_LIST_SECRETS,
+ .start_idx = start_idx,
+ .list_addr = (u64)buf,
+ };
+ int cc = uv_call_sched(0, (u64)&uvcb);
+
+ if (rc)
+ *rc = uvcb.header.rc;
+ if (rrc)
+ *rrc = uvcb.header.rrc;
+
+ return cc;
+}
+
struct uv_info {
unsigned long inst_calls_list[4];
unsigned long uv_base_stor_len;
@@ -402,7 +549,8 @@ struct uv_info {
unsigned long supp_add_secret_req_ver;
unsigned long supp_add_secret_pcf;
unsigned long supp_secret_types;
- unsigned short max_secrets;
+ unsigned short max_assoc_secrets;
+ unsigned short max_retr_secrets;
};
extern struct uv_info uv_info;
@@ -468,6 +616,10 @@ static inline int uv_remove_shared(unsigned long addr)
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
}
+int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
+ struct uv_secret_list_item_hdr *secret);
+int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size);
+
extern int prot_virt_host;
static inline int is_prot_virt_host(void)
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 91061f0279be..92c73e4d97a9 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -12,9 +12,6 @@ int vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
-/* Default link address for the vDSO */
-#define VDSO_LBASE 0
-
#define __VVAR_PAGES 2
#define VDSO_VERSION_STRING LINUX_2.6.29
diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h
deleted file mode 100644
index 0e2b40ef69b0..000000000000
--- a/arch/s390/include/asm/vdso/data.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __S390_ASM_VDSO_DATA_H
-#define __S390_ASM_VDSO_DATA_H
-
-#include <linux/types.h>
-
-struct arch_vdso_data {
- __s64 tod_steering_delta;
- __u64 tod_steering_end;
-};
-
-#endif /* __S390_ASM_VDSO_DATA_H */
diff --git a/arch/s390/include/asm/vdso/time_data.h b/arch/s390/include/asm/vdso/time_data.h
new file mode 100644
index 000000000000..8a08752422e6
--- /dev/null
+++ b/arch/s390/include/asm/vdso/time_data.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __S390_ASM_VDSO_TIME_DATA_H
+#define __S390_ASM_VDSO_TIME_DATA_H
+
+#include <linux/types.h>
+
+struct arch_vdso_time_data {
+ __s64 tod_steering_delta;
+ __u64 tod_steering_end;
+};
+
+#endif /* __S390_ASM_VDSO_TIME_DATA_H */
diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h
index 3c5d5e47814e..3eb576ecd3bd 100644
--- a/arch/s390/include/asm/vdso/vsyscall.h
+++ b/arch/s390/include/asm/vdso/vsyscall.h
@@ -7,7 +7,6 @@
#ifndef __ASSEMBLY__
#include <linux/hrtimer.h>
-#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
@@ -17,10 +16,6 @@ enum vvar_pages {
VVAR_NR_PAGES
};
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
-
static __always_inline struct vdso_data *__s390_get_k_vdso_data(void)
{
return vdso_data;
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index b11d98800458..7c364b33c84d 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -294,7 +294,7 @@ struct dasd_snid_ioctl_data {
/********************************************************************************
* SECTION: Definition of IOCTLs
*
- * Here ist how the ioctl-nr should be used:
+ * Here is how the ioctl-nr should be used:
* 0 - 31 DASD driver itself
* 32 - 239 still open
* 240 - 255 reserved for EMC
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 60431d00e6bd..ca42e941675d 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -48,21 +48,22 @@
/* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type {
- PKEY_TYPE_CCA_DATA = (__u32) 1,
- PKEY_TYPE_CCA_CIPHER = (__u32) 2,
- PKEY_TYPE_EP11 = (__u32) 3,
- PKEY_TYPE_CCA_ECC = (__u32) 0x1f,
- PKEY_TYPE_EP11_AES = (__u32) 6,
- PKEY_TYPE_EP11_ECC = (__u32) 7,
- PKEY_TYPE_PROTKEY = (__u32) 8,
+ PKEY_TYPE_CCA_DATA = (__u32)1,
+ PKEY_TYPE_CCA_CIPHER = (__u32)2,
+ PKEY_TYPE_EP11 = (__u32)3,
+ PKEY_TYPE_CCA_ECC = (__u32)0x1f,
+ PKEY_TYPE_EP11_AES = (__u32)6,
+ PKEY_TYPE_EP11_ECC = (__u32)7,
+ PKEY_TYPE_PROTKEY = (__u32)8,
+ PKEY_TYPE_UVSECRET = (__u32)9,
};
/* the newer ioctls use a pkey_key_size enum for key size information */
enum pkey_key_size {
- PKEY_SIZE_AES_128 = (__u32) 128,
- PKEY_SIZE_AES_192 = (__u32) 192,
- PKEY_SIZE_AES_256 = (__u32) 256,
- PKEY_SIZE_UNKNOWN = (__u32) 0xFFFFFFFF,
+ PKEY_SIZE_AES_128 = (__u32)128,
+ PKEY_SIZE_AES_192 = (__u32)192,
+ PKEY_SIZE_AES_256 = (__u32)256,
+ PKEY_SIZE_UNKNOWN = (__u32)0xFFFFFFFF,
};
/* some of the newer ioctls use these flags */
@@ -125,6 +126,7 @@ struct pkey_genseck {
__u32 keytype; /* in: key type to generate */
struct pkey_seckey seckey; /* out: the secure key blob */
};
+
#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
/*
@@ -137,6 +139,7 @@ struct pkey_clr2seck {
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_seckey seckey; /* out: the secure key blob */
};
+
#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
/*
@@ -148,6 +151,7 @@ struct pkey_sec2protk {
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
/*
@@ -158,6 +162,7 @@ struct pkey_clr2protk {
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
/*
@@ -169,6 +174,7 @@ struct pkey_findcard {
__u16 cardnr; /* out: card number */
__u16 domain; /* out: domain number */
};
+
#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
/*
@@ -178,6 +184,7 @@ struct pkey_skey2pkey {
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
/*
@@ -195,6 +202,7 @@ struct pkey_verifykey {
__u16 keysize; /* out: key size in bits */
__u32 attributes; /* out: attribute bits */
};
+
#define PKEY_VERIFYKEY _IOWR(PKEY_IOCTL_MAGIC, 0x07, struct pkey_verifykey)
#define PKEY_VERIFY_ATTR_AES 0x00000001 /* key is an AES key */
#define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */
@@ -226,6 +234,7 @@ struct pkey_kblob2pkey {
__u32 keylen; /* in: the key blob length */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey)
/*
@@ -258,6 +267,7 @@ struct pkey_genseck2 {
__u32 keylen; /* in: available key blob buffer size */
/* out: actual key blob size */
};
+
#define PKEY_GENSECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x11, struct pkey_genseck2)
/*
@@ -292,6 +302,7 @@ struct pkey_clr2seck2 {
__u32 keylen; /* in: available key blob buffer size */
/* out: actual key blob size */
};
+
#define PKEY_CLR2SECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x12, struct pkey_clr2seck2)
/*
@@ -329,6 +340,7 @@ struct pkey_verifykey2 {
enum pkey_key_size size; /* out: the key size */
__u32 flags; /* out: additional key info flags */
};
+
#define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2)
/*
@@ -351,6 +363,7 @@ struct pkey_kblob2pkey2 {
__u32 apqn_entries; /* in: # of apqn target list entries */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_KBLOB2PROTK2 _IOWR(PKEY_IOCTL_MAGIC, 0x1A, struct pkey_kblob2pkey2)
/*
@@ -387,6 +400,7 @@ struct pkey_apqns4key {
__u32 apqn_entries; /* in: max # of apqn entries in the list */
/* out: # apqns stored into the list */
};
+
#define PKEY_APQNS4K _IOWR(PKEY_IOCTL_MAGIC, 0x1B, struct pkey_apqns4key)
/*
@@ -426,6 +440,7 @@ struct pkey_apqns4keytype {
__u32 apqn_entries; /* in: max # of apqn entries in the list */
/* out: # apqns stored into the list */
};
+
#define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype)
/*
@@ -452,6 +467,7 @@ struct pkey_kblob2pkey3 {
__u32 pkeylen; /* in/out: size of pkey buffer/actual len of pkey */
__u8 __user *pkey; /* in: pkey blob buffer space ptr */
};
+
#define PKEY_KBLOB2PROTK3 _IOWR(PKEY_IOCTL_MAGIC, 0x1D, struct pkey_kblob2pkey3)
#endif /* _UAPI_PKEY_H */
diff --git a/arch/s390/include/uapi/asm/uvdevice.h b/arch/s390/include/uapi/asm/uvdevice.h
index b9c2f14a6af3..4947f26ad9fb 100644
--- a/arch/s390/include/uapi/asm/uvdevice.h
+++ b/arch/s390/include/uapi/asm/uvdevice.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright IBM Corp. 2022
+ * Copyright IBM Corp. 2022, 2024
* Author(s): Steffen Eiden <seiden@linux.ibm.com>
*/
#ifndef __S390_ASM_UVDEVICE_H
@@ -52,7 +52,7 @@ struct uvio_uvdev_info {
__u64 supp_uvio_cmds;
/*
* If bit `n` is set, the Ultravisor(UV) supports the UV-call
- * corresponding to the IOCTL with nr `n` in the calling contextx (host
+ * corresponding to the IOCTL with nr `n` in the calling context (host
* or guest). The value is only valid if the corresponding bit in
* @supp_uvio_cmds is set as well.
*/
@@ -71,6 +71,7 @@ struct uvio_uvdev_info {
#define UVIO_ATT_ADDITIONAL_MAX_LEN 0x8000
#define UVIO_ADD_SECRET_MAX_LEN 0x100000
#define UVIO_LIST_SECRETS_LEN 0x1000
+#define UVIO_RETR_SECRET_MAX_LEN 0x2000
#define UVIO_DEVICE_NAME "uv"
#define UVIO_TYPE_UVC 'u'
@@ -81,22 +82,25 @@ enum UVIO_IOCTL_NR {
UVIO_IOCTL_ADD_SECRET_NR,
UVIO_IOCTL_LIST_SECRETS_NR,
UVIO_IOCTL_LOCK_SECRETS_NR,
+ UVIO_IOCTL_RETR_SECRET_NR,
/* must be the last entry */
UVIO_IOCTL_NUM_IOCTLS
};
-#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
-#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
-#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
-#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
-#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
-#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
+#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
+#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
+#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_IOCTL_RETR_SECRET UVIO_IOCTL(UVIO_IOCTL_RETR_SECRET_NR)
-#define UVIO_SUPP_CALL(nr) (1ULL << (nr))
-#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
-#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
-#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
-#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
-#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_SUPP_CALL(nr) (1ULL << (nr))
+#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
+#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
+#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_SUPP_RETR_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_RETR_SECRET_NR)
#endif /* __S390_ASM_UVDEVICE_H */