summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/alternative.h6
-rw-r--r--arch/s390/include/asm/arch_hweight.h15
-rw-r--r--arch/s390/include/asm/atomic_ops.h7
-rw-r--r--arch/s390/include/asm/barrier.h4
-rw-r--r--arch/s390/include/asm/cpacf.h207
-rw-r--r--arch/s390/include/asm/ctlreg.h5
-rw-r--r--arch/s390/include/asm/diag.h9
-rw-r--r--arch/s390/include/asm/facility.h37
-rw-r--r--arch/s390/include/asm/fpu-insn-asm.h22
-rw-r--r--arch/s390/include/asm/ftrace.h17
-rw-r--r--arch/s390/include/asm/hiperdispatch.h14
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/march.h38
-rw-r--r--arch/s390/include/asm/mmzone.h17
-rw-r--r--arch/s390/include/asm/module.h14
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pci.h9
-rw-r--r--arch/s390/include/asm/percpu.h7
-rw-r--r--arch/s390/include/asm/perf_event.h24
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/include/asm/pkey.h4
-rw-r--r--arch/s390/include/asm/preempt.h7
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/include/asm/trace/hiperdispatch.h58
-rw-r--r--arch/s390/include/asm/vdso-symbols.h17
-rw-r--r--arch/s390/include/asm/vdso.h12
-rw-r--r--arch/s390/include/asm/vdso/getrandom.h40
-rw-r--r--arch/s390/include/asm/vdso/vsyscall.h15
34 files changed, 480 insertions, 148 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 4b904110d27c..297bf7157968 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -7,3 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index de980c938a3e..73e781b56bfe 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -39,11 +39,7 @@
#define ALT_TYPE_SHIFT 20
#define ALT_CTX_SHIFT 28
-#define ALT_FACILITY_EARLY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
- ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
- (facility) << ALT_DATA_SHIFT)
-
-#define ALT_FACILITY(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
+#define ALT_FACILITY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
diff --git a/arch/s390/include/asm/arch_hweight.h b/arch/s390/include/asm/arch_hweight.h
index 50e23ce854e5..aca08b0acbc1 100644
--- a/arch/s390/include/asm/arch_hweight.h
+++ b/arch/s390/include/asm/arch_hweight.h
@@ -4,6 +4,7 @@
#define _ASM_S390_ARCH_HWEIGHT_H
#include <linux/types.h>
+#include <asm/march.h>
static __always_inline unsigned long popcnt_z196(unsigned long w)
{
@@ -29,9 +30,9 @@ static __always_inline unsigned long popcnt_z15(unsigned long w)
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 32;
w += w >> 16;
@@ -43,9 +44,9 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15(w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 16;
w += w >> 8;
@@ -56,9 +57,9 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
static __always_inline unsigned int __arch_hweight16(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z15_FEATURES))
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
return popcnt_z15((unsigned short)w);
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES)) {
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
w = popcnt_z196(w);
w += w >> 8;
return w & 0xff;
@@ -68,7 +69,7 @@ static __always_inline unsigned int __arch_hweight16(unsigned int w)
static __always_inline unsigned int __arch_hweight8(unsigned int w)
{
- if (IS_ENABLED(CONFIG_HAVE_MARCH_Z196_FEATURES))
+ if (__is_defined(MARCH_HAS_Z196_FEATURES))
return popcnt_z196((unsigned char)w);
return __sw_hweight8(w);
}
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 742c7919cbcd..65380da9e75f 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -9,6 +9,7 @@
#define __ARCH_S390_ATOMIC_OPS__
#include <linux/limits.h>
+#include <asm/march.h>
static __always_inline int __atomic_read(const atomic_t *v)
{
@@ -56,7 +57,7 @@ static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
}
}
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
static __always_inline op_type op_name(op_type val, op_type *ptr) \
@@ -107,7 +108,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#undef __ATOMIC_CONST_OPS
#undef __ATOMIC_CONST_OP
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
static __always_inline int op_name(int val, int *ptr) \
@@ -166,7 +167,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 82de2a7c4160..d82130d7f2b6 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -8,13 +8,15 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
+#include <asm/march.h>
+
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
#else
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index dae8843b164f..1d3a4b0c650f 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -54,6 +54,8 @@
#define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
+#define CPACF_KM_XTS_128_FULL 0x52
+#define CPACF_KM_XTS_256_FULL 0x54
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@@ -121,23 +123,31 @@
#define CPACF_KMAC_DEA 0x01
#define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03
+#define CPACF_KMAC_HMAC_SHA_224 0x70
+#define CPACF_KMAC_HMAC_SHA_256 0x71
+#define CPACF_KMAC_HMAC_SHA_384 0x72
+#define CPACF_KMAC_HMAC_SHA_512 0x73
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
-#define CPACF_PCKMO_QUERY 0x00
-#define CPACF_PCKMO_ENC_DES_KEY 0x01
-#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
-#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
-#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
-#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
-#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
-#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
-#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
-#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
-#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
-#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_QUERY 0x00
+#define CPACF_PCKMO_ENC_DES_KEY 0x01
+#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
+#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
+#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
+#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
+#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
+#define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15
+#define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16
+#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
+#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
+#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
+#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
+#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76
+#define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a
/*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
@@ -165,7 +175,40 @@
#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
+/*
+ * Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST)
+ * instructions
+ */
+#define CPACF_KIMD_NIP 0x8000
+#define CPACF_KLMD_DUFOP 0x4000
+#define CPACF_KLMD_NIP 0x8000
+
+/*
+ * Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION)
+ * instruction
+ */
+#define CPACF_KDSA_QUERY 0x00
+#define CPACF_KDSA_ECDSA_VERIFY_P256 0x01
+#define CPACF_KDSA_ECDSA_VERIFY_P384 0x02
+#define CPACF_KDSA_ECDSA_VERIFY_P521 0x03
+#define CPACF_KDSA_ECDSA_SIGN_P256 0x09
+#define CPACF_KDSA_ECDSA_SIGN_P384 0x0a
+#define CPACF_KDSA_ECDSA_SIGN_P521 0x0b
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13
+#define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20
+#define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24
+#define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28
+#define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34
+
+#define CPACF_FC_QUERY 0x00
+#define CPACF_FC_QUERY_AUTH_INFO 0x7F
+
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+typedef struct { unsigned char bytes[256]; } cpacf_qai_t;
/*
* Prototype for a not existing function to produce a link
@@ -175,80 +218,85 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
void __cpacf_bad_opcode(void);
static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
- cpacf_mask_t *mask)
+ u8 *pb, u8 fc)
{
asm volatile(
- " la %%r1,%[mask]\n"
- " xgr %%r0,%%r0\n"
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
" .insn rre,%[opc] << 16,%[r1],%[r2]\n"
- : [mask] "=R" (*mask)
- : [opc] "i" (opc),
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc),
[r1] "i" (r1), [r2] "i" (r2)
- : "cc", "r0", "r1");
+ : "cc", "memory", "r0", "r1");
}
-static __always_inline void __cpacf_query_rrf(u32 opc,
- u8 r1, u8 r2, u8 r3, u8 m4,
- cpacf_mask_t *mask)
+static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3,
+ u8 m4, u8 *pb, u8 fc)
{
asm volatile(
- " la %%r1,%[mask]\n"
- " xgr %%r0,%%r0\n"
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
" .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
- : [mask] "=R" (*mask)
- : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
- [r3] "i" (r3), [m4] "i" (m4)
- : "cc", "r0", "r1");
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1),
+ [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4)
+ : "cc", "memory", "r0", "r1");
}
-static __always_inline void __cpacf_query(unsigned int opcode,
- cpacf_mask_t *mask)
+static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb,
+ u8 fc)
{
switch (opcode) {
case CPACF_KDSA:
- __cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc);
break;
case CPACF_KIMD:
- __cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc);
break;
case CPACF_KLMD:
- __cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc);
break;
case CPACF_KM:
- __cpacf_query_rre(CPACF_KM, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KM, 2, 4, pb, fc);
break;
case CPACF_KMA:
- __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
+ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMAC:
- __cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
+ __cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc);
break;
case CPACF_KMC:
- __cpacf_query_rre(CPACF_KMC, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc);
break;
case CPACF_KMCTR:
- __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
+ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc);
break;
case CPACF_KMF:
- __cpacf_query_rre(CPACF_KMF, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc);
break;
case CPACF_KMO:
- __cpacf_query_rre(CPACF_KMO, 2, 4, mask);
+ __cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc);
break;
case CPACF_PCC:
- __cpacf_query_rre(CPACF_PCC, 0, 0, mask);
+ __cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc);
break;
case CPACF_PCKMO:
- __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
+ __cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc);
break;
case CPACF_PRNO:
- __cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
+ __cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc);
break;
default:
__cpacf_bad_opcode();
}
}
+static __always_inline void __cpacf_query(unsigned int opcode,
+ cpacf_mask_t *mask)
+{
+ __cpacf_query_insn(opcode, mask, CPACF_FC_QUERY);
+}
+
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
{
switch (opcode) {
@@ -269,6 +317,8 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(57); /* check for MSA5 */
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
+ case CPACF_KDSA:
+ return test_facility(155); /* check for MSA9 */
default:
__cpacf_bad_opcode();
return 0;
@@ -276,14 +326,15 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
}
/**
- * cpacf_query() - check if a specific CPACF function is available
+ * cpacf_query() - Query the function code mask for this CPACF opcode
* @opcode: the opcode of the crypto instruction
- * @func: the function code to test for
+ * @mask: ptr to struct cpacf_mask_t
*
* Executes the query function for the given crypto instruction @opcode
* and checks if @func is available
*
- * Returns 1 if @func is available for @opcode, 0 otherwise
+ * On success 1 is returned and the mask is filled with the function
+ * code mask for this CPACF opcode, otherwise 0 is returned.
*/
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
@@ -300,7 +351,8 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
}
-static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+static __always_inline int cpacf_query_func(unsigned int opcode,
+ unsigned int func)
{
cpacf_mask_t mask;
@@ -309,6 +361,32 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
return 0;
}
+static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ __cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO);
+}
+
+/**
+ * cpacf_qai() - Get the query authentication information for a CPACF opcode
+ * @opcode: the opcode of the crypto instruction
+ * @mask: ptr to struct cpacf_qai_t
+ *
+ * Executes the query authentication information function for the given crypto
+ * instruction @opcode and checks if @func is available
+ *
+ * On success 1 is returned and the mask is filled with the query authentication
+ * information for this CPACF opcode, otherwise 0 is returned.
+ */
+static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) {
+ __cpacf_qai(opcode, qai);
+ return 1;
+ }
+ memset(qai, 0, sizeof(*qai));
+ return 0;
+}
+
/**
* cpacf_km() - executes the KM (CIPHER MESSAGE) instruction
* @func: the function code passed to KM; see CPACF_KM_xxx defines
@@ -391,7 +469,7 @@ static inline void cpacf_kimd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
@@ -416,7 +494,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
@@ -425,29 +503,30 @@ static inline void cpacf_klmd(unsigned long func, void *param,
}
/**
- * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
- * instruction
- * @func: the function code passed to KM; see CPACF_KMAC_xxx defines
+ * _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction and updates flags in gr0
+ * @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kmac(unsigned long func, void *param,
- const u8 *src, long src_len)
+static inline int _cpacf_kmac(unsigned long *gr0, void *param,
+ const u8 *src, long src_len)
{
union register_pair s;
s.even = (unsigned long)src;
s.odd = (unsigned long)src_len;
asm volatile(
- " lgr 0,%[fc]\n"
+ " lgr 0,%[r0]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
- : [src] "+&d" (s.pair)
- : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+ " lgr %[r0],0\n"
+ : [r0] "+d" (*gr0), [src] "+&d" (s.pair)
+ : [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMAC)
: "cc", "memory", "0", "1");
@@ -455,6 +534,22 @@ static inline int cpacf_kmac(unsigned long func, void *param,
}
/**
+ * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction
+ * @func: function code passed to KMAC; see CPACF_KMAC_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for digest funcs
+ */
+static inline int cpacf_kmac(unsigned long func, void *param,
+ const u8 *src, long src_len)
+{
+ return _cpacf_kmac(&func, param, src, src_len);
+}
+
+/**
* cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction
* @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines
* @param: address of parameter block; see POP for details on each func
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
index 72a9556d04f3..e6527f51ad0b 100644
--- a/arch/s390/include/asm/ctlreg.h
+++ b/arch/s390/include/asm/ctlreg.h
@@ -202,8 +202,9 @@ union ctlreg0 {
unsigned long : 3;
unsigned long ccc : 1; /* Cryptography counter control */
unsigned long pec : 1; /* PAI extension control */
- unsigned long : 17;
- unsigned long : 3;
+ unsigned long : 15;
+ unsigned long wti : 1; /* Warning-track */
+ unsigned long : 4;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index c0d43512f4fc..e1316e181230 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -38,6 +38,7 @@ enum diag_stat_enum {
DIAG_STAT_X308,
DIAG_STAT_X318,
DIAG_STAT_X320,
+ DIAG_STAT_X49C,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@@ -363,4 +364,12 @@ void _diag0c_amode31(unsigned long rx);
void _diag308_reset_amode31(void);
int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
+/* diag 49c subcodes */
+enum diag49c_sc {
+ DIAG49C_SUBC_ACK = 0,
+ DIAG49C_SUBC_REG = 1
+};
+
+int diag49c(unsigned long subcode);
+
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index b7d234838a36..715bcf8fb69a 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -14,7 +14,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/preempt.h>
-
+#include <asm/alternative.h>
#include <asm/lowcore.h>
#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
@@ -39,28 +39,51 @@ static inline void __clear_facility(unsigned long nr, void *facilities)
ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
}
-static inline int __test_facility(unsigned long nr, void *facilities)
+static __always_inline bool __test_facility(unsigned long nr, void *facilities)
{
unsigned char *ptr;
if (nr >= MAX_FACILITY_BIT)
- return 0;
+ return false;
ptr = (unsigned char *) facilities + (nr >> 3);
return (*ptr & (0x80 >> (nr & 7))) != 0;
}
/*
+ * __test_facility_constant() generates a single instruction branch. If the
+ * tested facility is available (likely) the branch is patched into a nop.
+ *
+ * Do not use this function unless you know what you are doing. All users are
+ * supposed to use test_facility() which will do the right thing.
+ */
+static __always_inline bool __test_facility_constant(unsigned long nr)
+{
+ asm goto(
+ ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FACILITY(%[nr]))
+ :
+ : [nr] "i" (nr)
+ :
+ : l_no);
+ return true;
+l_no:
+ return false;
+}
+
+/*
* The test_facility function uses the bit ordering where the MSB is bit 0.
* That makes it easier to query facility bits with the bit number as
* documented in the Principles of Operation.
*/
-static inline int test_facility(unsigned long nr)
+static __always_inline bool test_facility(unsigned long nr)
{
unsigned long facilities_als[] = { FACILITIES_ALS };
- if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
- if (__test_facility(nr, &facilities_als))
- return 1;
+ if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(nr)) {
+ if (nr < sizeof(facilities_als) * 8) {
+ if (__test_facility(nr, &facilities_als))
+ return true;
+ }
+ return __test_facility_constant(nr);
}
return __test_facility(nr, &stfle_fac_list);
}
diff --git a/arch/s390/include/asm/fpu-insn-asm.h b/arch/s390/include/asm/fpu-insn-asm.h
index 02ccfe46050a..d296322be4bc 100644
--- a/arch/s390/include/asm/fpu-insn-asm.h
+++ b/arch/s390/include/asm/fpu-insn-asm.h
@@ -407,6 +407,28 @@
MRXBOPC 0, 0x0E, v1
.endm
+/* VECTOR STORE BYTE REVERSED ELEMENTS */
+ .macro VSTBR vr1, disp, index="%r0", base, m
+ VX_NUM v1, \vr1
+ GR_NUM x2, \index
+ GR_NUM b2, \base
+ .word 0xE600 | ((v1&15) << 4) | (x2&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \m, 0x0E, v1
+.endm
+.macro VSTBRH vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 1
+.endm
+.macro VSTBRF vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 2
+.endm
+.macro VSTBRG vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 3
+.endm
+.macro VSTBRQ vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 4
+.endm
+
/* VECTOR STORE MULTIPLE */
.macro VSTM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index fbadca645af7..406746666eb7 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -6,8 +6,23 @@
#define MCOUNT_INSN_SIZE 6
#ifndef __ASSEMBLY__
+#include <asm/stacktrace.h>
-unsigned long return_address(unsigned int n);
+static __always_inline unsigned long return_address(unsigned int n)
+{
+ struct stack_frame *sf;
+
+ if (!n)
+ return (unsigned long)__builtin_return_address(0);
+
+ sf = (struct stack_frame *)current_frame_address();
+ do {
+ sf = (struct stack_frame *)sf->back_chain;
+ if (!sf)
+ return 0;
+ } while (--n);
+ return sf->gprs[8];
+}
#define ftrace_return_address(n) return_address(n)
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/hiperdispatch.h b/arch/s390/include/asm/hiperdispatch.h
new file mode 100644
index 000000000000..27e23aa27a24
--- /dev/null
+++ b/arch/s390/include/asm/hiperdispatch.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#ifndef _ASM_HIPERDISPATCH_H
+#define _ASM_HIPERDISPATCH_H
+
+void hd_reset_state(void);
+void hd_add_core(int cpu);
+void hd_disable_hiperdispatch(void);
+int hd_enable_hiperdispatch(void);
+
+#endif /* _ASM_HIPERDISPATCH_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 54b42817f70a..d9e705f4a697 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,6 +47,7 @@ enum interruption_class {
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_FTP,
+ IRQEXT_WTI,
IRQIO_CIO,
IRQIO_DAS,
IRQIO_C15,
@@ -99,6 +100,7 @@ int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
+ IRQ_SUBCLASS_WARNING_TRACK = 33,
};
#define CR0_IRQ_SUBCLASS_MASK \
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 183ac29afaf8..48c64716d1f2 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -98,8 +98,8 @@ struct lowcore {
psw_t io_new_psw; /* 0x01f0 */
/* Save areas. */
- __u64 save_area_sync[8]; /* 0x0200 */
- __u64 save_area_async[8]; /* 0x0240 */
+ __u64 save_area[8]; /* 0x0200 */
+ __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */
__u64 pcpu; /* 0x0288 */
diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h
new file mode 100644
index 000000000000..fd9eef3be44c
--- /dev/null
+++ b/arch/s390/include/asm/march.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_S390_MARCH_H
+#define __ASM_S390_MARCH_H
+
+#include <linux/kconfig.h>
+
+#define MARCH_HAS_Z10_FEATURES 1
+
+#ifndef __DECOMPRESSOR
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#define MARCH_HAS_Z196_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+#define MARCH_HAS_ZEC12_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+#define MARCH_HAS_Z13_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES
+#define MARCH_HAS_Z14_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
+#define MARCH_HAS_Z15_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z16_FEATURES
+#define MARCH_HAS_Z16_FEATURES 1
+#endif
+
+#endif /* __DECOMPRESSOR */
+
+#endif /* __ASM_S390_MARCH_H */
diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h
deleted file mode 100644
index 73e3e7c6976c..000000000000
--- a/arch/s390/include/asm/mmzone.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * NUMA support for s390
- *
- * Copyright IBM Corp. 2015
- */
-
-#ifndef _ASM_S390_MMZONE_H
-#define _ASM_S390_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-
-#endif /* CONFIG_NUMA */
-#endif /* _ASM_S390_MMZONE_H */
diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h
index 9f1eea15872c..916ab59e458a 100644
--- a/arch/s390/include/asm/module.h
+++ b/arch/s390/include/asm/module.h
@@ -38,4 +38,18 @@ struct mod_arch_specific {
#endif /* CONFIG_FUNCTION_TRACER */
};
+static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ const Elf_Shdr *s, *se;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+ return NULL;
+}
+
#endif /* _ASM_S390_MODULE_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 16e4caa931f1..73e1e03317b4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -176,8 +176,6 @@ static inline int devmem_is_allowed(unsigned long pfn)
int arch_make_folio_accessible(struct folio *folio);
#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
-int arch_make_page_accessible(struct page *page);
-#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
struct vm_layout {
unsigned long kaslr_offset;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 30820a649e6e..9d920ced6047 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -191,7 +191,14 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
return (zdev->fh & (1UL << 31)) ? true : false;
}
-extern const struct attribute_group *zpci_attr_groups[];
+extern const struct attribute_group zpci_attr_group;
+extern const struct attribute_group pfip_attr_group;
+extern const struct attribute_group zpci_ident_attr_group;
+
+#define ARCH_PCI_DEV_GROUPS &zpci_attr_group, \
+ &pfip_attr_group, \
+ &zpci_ident_attr_group,
+
extern unsigned int s390_pci_force_floating __initdata;
extern unsigned int s390_pci_no_rid;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 89a28740b6ab..84f6b8357b45 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <asm/cmpxchg.h>
+#include <asm/march.h>
/*
* s390 uses its own implementation for per cpu data, the offset of
@@ -50,7 +51,7 @@
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifndef MARCH_HAS_Z196_FEATURES
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
@@ -61,7 +62,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
{ \
@@ -129,7 +130,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 9917e2717b2b..66200d4a2134 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -48,30 +48,6 @@ struct perf_sf_sde_regs {
unsigned long reserved:63; /* reserved */
};
-/* Perf PMU definitions for the counter facility */
-#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
-
-/* Perf PMU definitions for the sampling facility */
-#define PERF_CPUM_SF_MAX_CTR 2
-#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
-#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
-#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
-#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
-#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
-#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
- PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
-
-#define REG_NONE 0
-#define REG_OVERFLOW 1
-#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
-#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
-#define TEAR_REG(hwc) ((hwc)->last_tag)
-#define SAMPL_RATE(hwc) ((hwc)->event_base)
-#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
-#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
-
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3fa280d0672a..0ffbaf741955 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -955,6 +955,7 @@ static inline int pte_unused(pte_t pte)
* young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
* must not be set.
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
index 47d80a7451a6..5dca1a46a9f6 100644
--- a/arch/s390/include/asm/pkey.h
+++ b/arch/s390/include/asm/pkey.h
@@ -22,7 +22,7 @@
* @param protkey pointer to buffer receiving the protected key
* @return 0 on success, negative errno value on failure
*/
-int pkey_keyblob2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+int pkey_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 3ae5f31c665d..deca3f221836 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -5,8 +5,9 @@
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
+#include <asm/march.h>
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
@@ -75,7 +76,7 @@ static __always_inline bool should_resched(int preempt_offset)
preempt_offset);
}
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define PREEMPT_ENABLED (0)
@@ -123,7 +124,7 @@ static __always_inline bool should_resched(int preempt_offset)
tif_need_resched());
}
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
#define init_task_preempt_count(p) do { } while (0)
/* Deferred to CPU bringup time */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 5ecd442535b9..9a5236acc0a8 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -44,6 +44,7 @@ struct pcpu {
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
unsigned long flags; /* per CPU flags */
+ unsigned long capacity; /* cpu capacity for scheduler */
signed char state; /* physical cpu state */
signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index da3dad18fe50..eb00fa1771da 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -72,6 +72,7 @@ struct sclp_info {
unsigned char has_core_type : 1;
unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
+ unsigned char has_wti : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned char has_64bscao : 1;
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 8505737712ee..70b920b32827 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -34,6 +34,7 @@
#define MACHINE_FLAG_SCC BIT(17)
#define MACHINE_FLAG_PCI_MIO BIT(18)
#define MACHINE_FLAG_RDP BIT(19)
+#define MACHINE_FLAG_SEQ_INSN BIT(20)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
@@ -95,6 +96,7 @@ extern unsigned long mio_wb_bit_mask;
#define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
#define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
#define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
+#define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN)
/*
* Console mode. Override with conmode=
@@ -115,6 +117,8 @@ extern unsigned int console_irq;
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
+void register_early_console(void);
+
#ifdef CONFIG_VMCP
void vmcp_cma_reserve(void);
#else
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index cd835f4fb11a..7feca96c48c6 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -12,6 +12,7 @@
#include <asm/processor.h>
#define raw_smp_processor_id() (get_lowcore()->cpu_nr)
+#define arch_scale_cpu_capacity smp_cpu_get_capacity
extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
@@ -34,6 +35,9 @@ extern void smp_save_dump_secondary_cpus(void);
extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern void smp_cpu_set_capacity(int cpu, unsigned long val);
+extern void smp_set_core_capacity(int cpu, unsigned long val);
+extern unsigned long smp_cpu_get_capacity(int cpu);
extern int smp_cpu_get_cpu_address(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void);
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 3a0ac0c7a9a3..cef06bffad80 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -67,6 +67,9 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
+#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE
+#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3)
+
#define SD_BOOK_INIT SD_CPU_INIT
#ifdef CONFIG_NUMA
diff --git a/arch/s390/include/asm/trace/hiperdispatch.h b/arch/s390/include/asm/trace/hiperdispatch.h
new file mode 100644
index 000000000000..46462ee645b0
--- /dev/null
+++ b/arch/s390/include/asm/trace/hiperdispatch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoint header for hiperdispatch
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_HIPERDISPATCH_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE hiperdispatch
+
+TRACE_EVENT(s390_hd_work_fn,
+ TP_PROTO(int steal_time_percentage,
+ int entitled_core_count,
+ int highcap_core_count),
+ TP_ARGS(steal_time_percentage,
+ entitled_core_count,
+ highcap_core_count),
+ TP_STRUCT__entry(__field(int, steal_time_percentage)
+ __field(int, entitled_core_count)
+ __field(int, highcap_core_count)),
+ TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage;
+ __entry->entitled_core_count = entitled_core_count;
+ __entry->highcap_core_count = highcap_core_count;),
+ TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d",
+ __entry->steal_time_percentage,
+ __entry->entitled_core_count,
+ __entry->highcap_core_count)
+);
+
+TRACE_EVENT(s390_hd_rebuild_domains,
+ TP_PROTO(int current_highcap_core_count,
+ int new_highcap_core_count),
+ TP_ARGS(current_highcap_core_count,
+ new_highcap_core_count),
+ TP_STRUCT__entry(__field(int, current_highcap_core_count)
+ __field(int, new_highcap_core_count)),
+ TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count;
+ __entry->new_highcap_core_count = new_highcap_core_count),
+ TP_printk("change highcap_core_count: %u -> %u",
+ __entry->current_highcap_core_count,
+ __entry->new_highcap_core_count)
+);
+
+#endif /* _TRACE_S390_HIPERDISPATCH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/s390/include/asm/vdso-symbols.h b/arch/s390/include/asm/vdso-symbols.h
new file mode 100644
index 000000000000..0df17574d788
--- /dev/null
+++ b/arch/s390/include/asm/vdso-symbols.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __S390_VDSO_SYMBOLS_H__
+#define __S390_VDSO_SYMBOLS_H__
+
+#include <generated/vdso64-offsets.h>
+#ifdef CONFIG_COMPAT
+#include <generated/vdso32-offsets.h>
+#endif
+
+#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
+#ifdef CONFIG_COMPAT
+#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
+#else
+#define VDSO32_SYMBOL(tsk, name) (-1UL)
+#endif
+
+#endif /* __S390_VDSO_SYMBOLS_H__ */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 53165aa7813a..91061f0279be 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -6,18 +6,6 @@
#ifndef __ASSEMBLY__
-#include <generated/vdso64-offsets.h>
-#ifdef CONFIG_COMPAT
-#include <generated/vdso32-offsets.h>
-#endif
-
-#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
-#ifdef CONFIG_COMPAT
-#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
-#else
-#define VDSO32_SYMBOL(tsk, name) (-1UL)
-#endif
-
extern struct vdso_data *vdso_data;
int vdso_getcpu_init(void);
diff --git a/arch/s390/include/asm/vdso/getrandom.h b/arch/s390/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..36355af7160b
--- /dev/null
+++ b/arch/s390/include/asm/vdso/getrandom.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+#include <asm/vdso/vsyscall.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+/**
+ * getrandom_syscall - Invoke the getrandom() syscall.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * Returns: The number of random bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags)
+{
+ return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags);
+}
+
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ /*
+ * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace
+ * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_
+ * PAGE_OFFSET points to the real VVAR page.
+ */
+ if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
+ return &_vdso_rng_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h
index 6c67c08cefdd..3c5d5e47814e 100644
--- a/arch/s390/include/asm/vdso/vsyscall.h
+++ b/arch/s390/include/asm/vdso/vsyscall.h
@@ -2,12 +2,21 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
+#define __VDSO_RND_DATA_OFFSET 768
+
#ifndef __ASSEMBLY__
#include <linux/hrtimer.h>
#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
+
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES
+};
+
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
@@ -18,6 +27,12 @@ static __always_inline struct vdso_data *__s390_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __s390_get_k_vdso_data
+static __always_inline struct vdso_rng_data *__s390_get_k_vdso_rnd_data(void)
+{
+ return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
+}
+#define __arch_get_k_vdso_rng_data __s390_get_k_vdso_rnd_data
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>