summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/abs_lowcore.h8
-rw-r--r--arch/s390/include/asm/access-regs.h38
-rw-r--r--arch/s390/include/asm/alternative-asm.h56
-rw-r--r--arch/s390/include/asm/alternative.h151
-rw-r--r--arch/s390/include/asm/ap.h30
-rw-r--r--arch/s390/include/asm/appldata.h4
-rw-r--r--arch/s390/include/asm/arch_hweight.h77
-rw-r--r--arch/s390/include/asm/asm-extable.h14
-rw-r--r--arch/s390/include/asm/asm-prototypes.h3
-rw-r--r--arch/s390/include/asm/asm.h51
-rw-r--r--arch/s390/include/asm/atomic.h140
-rw-r--r--arch/s390/include/asm/atomic_ops.h168
-rw-r--r--arch/s390/include/asm/barrier.h4
-rw-r--r--arch/s390/include/asm/bitops.h216
-rw-r--r--arch/s390/include/asm/boot_data.h51
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/s390/include/asm/ccwdev.h5
-rw-r--r--arch/s390/include/asm/checksum.h29
-rw-r--r--arch/s390/include/asm/chsc.h15
-rw-r--r--arch/s390/include/asm/cio.h9
-rw-r--r--arch/s390/include/asm/cmpxchg.h374
-rw-r--r--arch/s390/include/asm/cpacf.h255
-rw-r--r--arch/s390/include/asm/cpu_mf.h63
-rw-r--r--arch/s390/include/asm/css_chars.h2
-rw-r--r--arch/s390/include/asm/ctlreg.h5
-rw-r--r--arch/s390/include/asm/current.h2
-rw-r--r--arch/s390/include/asm/dat-bits.h170
-rw-r--r--arch/s390/include/asm/debug.h15
-rw-r--r--arch/s390/include/asm/diag.h34
-rw-r--r--arch/s390/include/asm/dma-types.h103
-rw-r--r--arch/s390/include/asm/dwarf.h1
-rw-r--r--arch/s390/include/asm/eadm.h5
-rw-r--r--arch/s390/include/asm/ebcdic.h16
-rw-r--r--arch/s390/include/asm/elf.h8
-rw-r--r--arch/s390/include/asm/entry-common.h7
-rw-r--r--arch/s390/include/asm/extmem.h7
-rw-r--r--arch/s390/include/asm/facility.h60
-rw-r--r--arch/s390/include/asm/fcx.h13
-rw-r--r--arch/s390/include/asm/fprobe.h10
-rw-r--r--arch/s390/include/asm/fpu-insn-asm.h (renamed from arch/s390/include/asm/vx-insn-asm.h)93
-rw-r--r--arch/s390/include/asm/fpu-insn.h479
-rw-r--r--arch/s390/include/asm/fpu-types.h51
-rw-r--r--arch/s390/include/asm/fpu.h295
-rw-r--r--arch/s390/include/asm/fpu/api.h126
-rw-r--r--arch/s390/include/asm/fpu/internal.h67
-rw-r--r--arch/s390/include/asm/fpu/types.h38
-rw-r--r--arch/s390/include/asm/ftrace.h85
-rw-r--r--arch/s390/include/asm/futex.h107
-rw-r--r--arch/s390/include/asm/gmap.h28
-rw-r--r--arch/s390/include/asm/hardirq.h6
-rw-r--r--arch/s390/include/asm/hiperdispatch.h14
-rw-r--r--arch/s390/include/asm/hugetlb.h106
-rw-r--r--arch/s390/include/asm/idals.h176
-rw-r--r--arch/s390/include/asm/io.h17
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/irqflags.h17
-rw-r--r--arch/s390/include/asm/kexec.h3
-rw-r--r--arch/s390/include/asm/kmsan.h59
-rw-r--r--arch/s390/include/asm/kvm_host.h42
-rw-r--r--arch/s390/include/asm/lowcore.h44
-rw-r--r--arch/s390/include/asm/march.h38
-rw-r--r--arch/s390/include/asm/mmu.h5
-rw-r--r--arch/s390/include/asm/mmu_context.h9
-rw-r--r--arch/s390/include/asm/mmzone.h17
-rw-r--r--arch/s390/include/asm/module.h14
-rw-r--r--arch/s390/include/asm/nospec-branch.h29
-rw-r--r--arch/s390/include/asm/nospec-insn.h13
-rw-r--r--arch/s390/include/asm/os_info.h29
-rw-r--r--arch/s390/include/asm/page-states.h3
-rw-r--r--arch/s390/include/asm/page.h110
-rw-r--r--arch/s390/include/asm/pai.h28
-rw-r--r--arch/s390/include/asm/pci.h31
-rw-r--r--arch/s390/include/asm/pci_clp.h13
-rw-r--r--arch/s390/include/asm/pci_io.h6
-rw-r--r--arch/s390/include/asm/percpu.h9
-rw-r--r--arch/s390/include/asm/perf_event.h31
-rw-r--r--arch/s390/include/asm/pgalloc.h44
-rw-r--r--arch/s390/include/asm/pgtable.h357
-rw-r--r--arch/s390/include/asm/physmem_info.h10
-rw-r--r--arch/s390/include/asm/pkey.h4
-rw-r--r--arch/s390/include/asm/preempt.h139
-rw-r--r--arch/s390/include/asm/processor.h65
-rw-r--r--arch/s390/include/asm/ptdump.h14
-rw-r--r--arch/s390/include/asm/ptrace.h4
-rw-r--r--arch/s390/include/asm/qdio.h17
-rw-r--r--arch/s390/include/asm/runtime-const.h77
-rw-r--r--arch/s390/include/asm/sclp.h38
-rw-r--r--arch/s390/include/asm/scsw.h7
-rw-r--r--arch/s390/include/asm/set_memory.h2
-rw-r--r--arch/s390/include/asm/setup.h54
-rw-r--r--arch/s390/include/asm/sigp.h11
-rw-r--r--arch/s390/include/asm/smp.h9
-rw-r--r--arch/s390/include/asm/softirq_stack.h2
-rw-r--r--arch/s390/include/asm/sparsemem.h18
-rw-r--r--arch/s390/include/asm/spinlock.h24
-rw-r--r--arch/s390/include/asm/spinlock_types.h2
-rw-r--r--arch/s390/include/asm/stacktrace.h14
-rw-r--r--arch/s390/include/asm/stp.h1
-rw-r--r--arch/s390/include/asm/string.h20
-rw-r--r--arch/s390/include/asm/switch_to.h49
-rw-r--r--arch/s390/include/asm/thread_info.h24
-rw-r--r--arch/s390/include/asm/timex.h48
-rw-r--r--arch/s390/include/asm/tlb.h42
-rw-r--r--arch/s390/include/asm/tlbflush.h5
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/include/asm/trace/hiperdispatch.h58
-rw-r--r--arch/s390/include/asm/uaccess.h559
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/include/asm/uv.h221
-rw-r--r--arch/s390/include/asm/vdso-symbols.h17
-rw-r--r--arch/s390/include/asm/vdso.h15
-rw-r--r--arch/s390/include/asm/vdso/data.h13
-rw-r--r--arch/s390/include/asm/vdso/getrandom.h40
-rw-r--r--arch/s390/include/asm/vdso/gettimeofday.h7
-rw-r--r--arch/s390/include/asm/vdso/time_data.h12
-rw-r--r--arch/s390/include/asm/vdso/vsyscall.h18
-rw-r--r--arch/s390/include/asm/vtime.h18
-rw-r--r--arch/s390/include/asm/vx-insn.h19
-rw-r--r--arch/s390/include/asm/word-at-a-time.h3
-rw-r--r--arch/s390/include/uapi/asm/dasd.h2
-rw-r--r--arch/s390/include/uapi/asm/diag.h32
-rw-r--r--arch/s390/include/uapi/asm/kvm.h318
-rw-r--r--arch/s390/include/uapi/asm/pkey.h41
-rw-r--r--arch/s390/include/uapi/asm/uvdevice.h32
125 files changed, 4911 insertions, 2083 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 4b904110d27c..297bf7157968 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -7,3 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
+generic-y += mmzone.h
diff --git a/arch/s390/include/asm/abs_lowcore.h b/arch/s390/include/asm/abs_lowcore.h
index 6f264b79e377..004d17ea05cf 100644
--- a/arch/s390/include/asm/abs_lowcore.h
+++ b/arch/s390/include/asm/abs_lowcore.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_ABS_LOWCORE_H
#define _ASM_S390_ABS_LOWCORE_H
+#include <linux/smp.h>
#include <asm/lowcore.h>
#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore))
@@ -24,4 +25,11 @@ static inline void put_abs_lowcore(struct lowcore *lc)
put_cpu();
}
+extern int relocate_lowcore;
+
+static inline int have_relocated_lowcore(void)
+{
+ return relocate_lowcore;
+}
+
#endif /* _ASM_S390_ABS_LOWCORE_H */
diff --git a/arch/s390/include/asm/access-regs.h b/arch/s390/include/asm/access-regs.h
new file mode 100644
index 000000000000..1a6412d9f5ad
--- /dev/null
+++ b/arch/s390/include/asm/access-regs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2024
+ */
+
+#ifndef __ASM_S390_ACCESS_REGS_H
+#define __ASM_S390_ACCESS_REGS_H
+
+#include <linux/instrumented.h>
+#include <asm/sigcontext.h>
+
+struct access_regs {
+ unsigned int regs[NUM_ACRS];
+};
+
+static inline void save_access_regs(unsigned int *acrs)
+{
+ struct access_regs *regs = (struct access_regs *)acrs;
+
+ instrument_write(regs, sizeof(*regs));
+ asm volatile("stamy 0,15,%[regs]"
+ : [regs] "=QS" (*regs)
+ :
+ : "memory");
+}
+
+static inline void restore_access_regs(unsigned int *acrs)
+{
+ struct access_regs *regs = (struct access_regs *)acrs;
+
+ instrument_read(regs, sizeof(*regs));
+ asm volatile("lamy 0,15,%[regs]"
+ :
+ : [regs] "QS" (*regs)
+ : "memory");
+}
+
+#endif /* __ASM_S390_ACCESS_REGS_H */
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
deleted file mode 100644
index 7db046596b93..000000000000
--- a/arch/s390/include/asm/alternative-asm.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_ALTERNATIVE_ASM_H
-#define _ASM_S390_ALTERNATIVE_ASM_H
-
-#ifdef __ASSEMBLY__
-
-/*
- * Issue one struct alt_instr descriptor entry (need to put it into
- * the section .altinstructions, see below). This entry contains
- * enough information for the alternatives patching code to patch an
- * instruction. See apply_alternatives().
- */
-.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
- .long \orig_start - .
- .long \alt_start - .
- .word \feature
- .byte \orig_end - \orig_start
- .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
- .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
-.endm
-
-/*
- * Define an alternative between two instructions. If @feature is
- * present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr.
- */
-.macro ALTERNATIVE oldinstr, newinstr, feature
- .pushsection .altinstr_replacement,"ax"
-770: \newinstr
-771: .popsection
-772: \oldinstr
-773: .pushsection .altinstructions,"a"
- alt_entry 772b, 773b, 770b, 771b, \feature
- .popsection
-.endm
-
-/*
- * Define an alternative between two instructions. If @feature is
- * present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr.
- */
-.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
- .pushsection .altinstr_replacement,"ax"
-770: \newinstr1
-771: \newinstr2
-772: .popsection
-773: \oldinstr
-774: .pushsection .altinstructions,"a"
- alt_entry 773b, 774b, 770b, 771b,\feature1
- alt_entry 773b, 774b, 771b, 772b,\feature2
- .popsection
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index 904dd049f954..73e781b56bfe 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -2,6 +2,54 @@
#ifndef _ASM_S390_ALTERNATIVE_H
#define _ASM_S390_ALTERNATIVE_H
+/*
+ * Each alternative comes with a 32 bit feature field:
+ * union {
+ * u32 feature;
+ * struct {
+ * u32 ctx : 4;
+ * u32 type : 8;
+ * u32 data : 20;
+ * };
+ * }
+ *
+ * @ctx is a bitfield, where only one bit must be set. Each bit defines
+ * in which context an alternative is supposed to be applied to the
+ * kernel image:
+ *
+ * - from the decompressor before the kernel itself is executed
+ * - from early kernel code from within the kernel
+ *
+ * @type is a number which defines the type and with that the type
+ * specific alternative patching.
+ *
+ * @data is additional type specific information which defines if an
+ * alternative should be applied.
+ */
+
+#define ALT_CTX_EARLY 1
+#define ALT_CTX_LATE 2
+#define ALT_CTX_ALL (ALT_CTX_EARLY | ALT_CTX_LATE)
+
+#define ALT_TYPE_FACILITY 0
+#define ALT_TYPE_SPEC 1
+#define ALT_TYPE_LOWCORE 2
+
+#define ALT_DATA_SHIFT 0
+#define ALT_TYPE_SHIFT 20
+#define ALT_CTX_SHIFT 28
+
+#define ALT_FACILITY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
+ ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
+ (facility) << ALT_DATA_SHIFT)
+
+#define ALT_SPEC(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
+ ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \
+ (facility) << ALT_DATA_SHIFT)
+
+#define ALT_LOWCORE (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
+ ALT_TYPE_LOWCORE << ALT_TYPE_SHIFT)
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -11,12 +59,30 @@
struct alt_instr {
s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */
- u16 facility; /* facility bit set for replacement */
+ union {
+ u32 feature; /* feature required for replacement */
+ struct {
+ u32 ctx : 4; /* context */
+ u32 type : 8; /* type of alternative */
+ u32 data : 20; /* patching information */
+ };
+ };
u8 instrlen; /* length of original instruction */
} __packed;
-void apply_alternative_instructions(void);
-void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+
+void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx);
+
+static inline void apply_alternative_instructions(void)
+{
+ __apply_alternatives(__alt_instructions, __alt_instructions_end, ALT_CTX_LATE);
+}
+
+static inline void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
+{
+ __apply_alternatives(start, end, ALT_CTX_ALL);
+}
/*
* +---------------------------------+
@@ -48,11 +114,12 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define OLDINSTR(oldinstr) \
"661:\n\t" oldinstr "\n662:\n"
-#define ALTINSTR_ENTRY(facility, num) \
+#define ALTINSTR_ENTRY(feature, num) \
"\t.long 661b - .\n" /* old instruction */ \
"\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
- "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.long " __stringify(feature) "\n" /* feature */ \
"\t.byte " oldinstr_len "\n" /* instruction len */ \
+ "\t.org . - (" oldinstr_len ") & 1\n" \
"\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \
"\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n"
@@ -60,24 +127,24 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"
/* alternative assembly primitive: */
-#define ALTERNATIVE(oldinstr, altinstr, facility) \
+#define ALTERNATIVE(oldinstr, altinstr, feature) \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr, 1) \
".popsection\n" \
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(facility, 1) \
+ ALTINSTR_ENTRY(feature, 1) \
".popsection\n"
-#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+#define ALTERNATIVE_2(oldinstr, altinstr1, feature1, altinstr2, feature2)\
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr1, 1) \
ALTINSTR_REPLACEMENT(altinstr2, 2) \
".popsection\n" \
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(facility1, 1) \
- ALTINSTR_ENTRY(facility2, 2) \
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
".popsection\n"
/*
@@ -92,12 +159,12 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
-#define alternative(oldinstr, altinstr, facility) \
- asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+#define alternative(oldinstr, altinstr, feature) \
+ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) : : : "memory")
-#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
- asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
- altinstr2, facility2) ::: "memory")
+#define alternative_2(oldinstr, altinstr1, feature1, altinstr2, feature2) \
+ asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, feature1, \
+ altinstr2, feature2) ::: "memory")
/* Alternative inline assembly with input. */
#define alternative_input(oldinstr, newinstr, feature, input...) \
@@ -105,8 +172,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
: : input)
/* Like alternative_input, but with a single output argument */
-#define alternative_io(oldinstr, altinstr, facility, output, input...) \
- asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) \
+#define alternative_io(oldinstr, altinstr, feature, output, input...) \
+ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) \
: output : input)
/* Use this macro if more than one output parameter is needed. */
@@ -115,6 +182,56 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
/* Use this macro if clobbers are needed without inputs. */
#define ASM_NO_INPUT_CLOBBER(clobber...) : clobber
+#else /* __ASSEMBLY__ */
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
+ .long \orig_start - .
+ .long \alt_start - .
+ .long \feature
+ .byte \orig_end - \orig_start
+ .org . - ( \orig_end - \orig_start ) & 1
+ .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
+ .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+ .pushsection .altinstr_replacement,"ax"
+770: \newinstr
+771: .popsection
+772: \oldinstr
+773: .pushsection .altinstructions,"a"
+ alt_entry 772b, 773b, 770b, 771b, \feature
+ .popsection
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+ .pushsection .altinstr_replacement,"ax"
+770: \newinstr1
+771: \newinstr2
+772: .popsection
+773: \oldinstr
+774: .pushsection .altinstructions,"a"
+ alt_entry 773b, 774b, 770b, 771b,\feature1
+ alt_entry 773b, 774b, 771b, 772b,\feature2
+ .popsection
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 43ac4a64f49b..395b02d6a133 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -223,13 +223,18 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
* config info as returned by the ap_qci() function.
*/
struct ap_config_info {
- unsigned int apsc : 1; /* S bit */
- unsigned int apxa : 1; /* N bit */
- unsigned int qact : 1; /* C bit */
- unsigned int rc8a : 1; /* R bit */
- unsigned int : 4;
- unsigned int apsb : 1; /* B bit */
- unsigned int : 23;
+ union {
+ unsigned int flags;
+ struct {
+ unsigned int apsc : 1; /* S bit */
+ unsigned int apxa : 1; /* N bit */
+ unsigned int qact : 1; /* C bit */
+ unsigned int rc8a : 1; /* R bit */
+ unsigned int : 4;
+ unsigned int apsb : 1; /* B bit */
+ unsigned int : 23;
+ };
+ };
unsigned char na; /* max # of APs - 1 */
unsigned char nd; /* max # of Domains - 1 */
unsigned char _reserved0[10];
@@ -544,15 +549,4 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
return reg1.status;
}
-/*
- * Interface to tell the AP bus code that a configuration
- * change has happened. The bus code should at least do
- * an ap bus resource rescan.
- */
-#if IS_ENABLED(CONFIG_ZCRYPT)
-void ap_bus_cfg_chg(void);
-#else
-static inline void ap_bus_cfg_chg(void){}
-#endif
-
#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index f2240392c708..a92ebbc7aa7a 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -54,13 +54,13 @@ static inline int appldata_asm(struct appldata_parameter_list *parm_list,
parm_list->function = fn;
parm_list->parlist_length = sizeof(*parm_list);
parm_list->buffer_length = length;
- parm_list->product_id_addr = (unsigned long) id;
+ parm_list->product_id_addr = virt_to_phys(id);
parm_list->buffer_addr = virt_to_phys(buffer);
diag_stat_inc(DIAG_STAT_X0DC);
asm volatile(
" diag %1,%0,0xdc"
: "=d" (ry)
- : "d" (parm_list), "m" (*parm_list), "m" (*id)
+ : "d" (virt_to_phys(parm_list)), "m" (*parm_list), "m" (*id)
: "cc");
return ry;
}
diff --git a/arch/s390/include/asm/arch_hweight.h b/arch/s390/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..aca08b0acbc1
--- /dev/null
+++ b/arch/s390/include/asm/arch_hweight.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_ARCH_HWEIGHT_H
+#define _ASM_S390_ARCH_HWEIGHT_H
+
+#include <linux/types.h>
+#include <asm/march.h>
+
+static __always_inline unsigned long popcnt_z196(unsigned long w)
+{
+ unsigned long cnt;
+
+ asm volatile(".insn rrf,0xb9e10000,%[cnt],%[w],0,0"
+ : [cnt] "=d" (cnt)
+ : [w] "d" (w)
+ : "cc");
+ return cnt;
+}
+
+static __always_inline unsigned long popcnt_z15(unsigned long w)
+{
+ unsigned long cnt;
+
+ asm volatile(".insn rrf,0xb9e10000,%[cnt],%[w],8,0"
+ : [cnt] "=d" (cnt)
+ : [w] "d" (w)
+ : "cc");
+ return cnt;
+}
+
+static __always_inline unsigned long __arch_hweight64(__u64 w)
+{
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
+ return popcnt_z15(w);
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
+ w = popcnt_z196(w);
+ w += w >> 32;
+ w += w >> 16;
+ w += w >> 8;
+ return w & 0xff;
+ }
+ return __sw_hweight64(w);
+}
+
+static __always_inline unsigned int __arch_hweight32(unsigned int w)
+{
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
+ return popcnt_z15(w);
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
+ w = popcnt_z196(w);
+ w += w >> 16;
+ w += w >> 8;
+ return w & 0xff;
+ }
+ return __sw_hweight32(w);
+}
+
+static __always_inline unsigned int __arch_hweight16(unsigned int w)
+{
+ if (__is_defined(MARCH_HAS_Z15_FEATURES))
+ return popcnt_z15((unsigned short)w);
+ if (__is_defined(MARCH_HAS_Z196_FEATURES)) {
+ w = popcnt_z196(w);
+ w += w >> 8;
+ return w & 0xff;
+ }
+ return __sw_hweight16(w);
+}
+
+static __always_inline unsigned int __arch_hweight8(unsigned int w)
+{
+ if (__is_defined(MARCH_HAS_Z196_FEATURES))
+ return popcnt_z196((unsigned char)w);
+ return __sw_hweight8(w);
+}
+
+#endif /* _ASM_S390_ARCH_HWEIGHT_H */
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index 4a6b0a8b6412..2e829c16fd8a 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -9,11 +9,11 @@
#define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
-#define EX_TYPE_UA_STORE 3
-#define EX_TYPE_UA_LOAD_MEM 4
+#define EX_TYPE_UA_FAULT 3
#define EX_TYPE_UA_LOAD_REG 5
#define EX_TYPE_UA_LOAD_REGPAIR 6
#define EX_TYPE_ZEROPAD 7
+#define EX_TYPE_FPC 8
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(3, 0)
@@ -69,11 +69,8 @@
#define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
-#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \
- __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0)
-
-#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
- __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
+#define EX_TABLE_UA_FAULT(_fault, _target, _regerr) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_FAULT, _regerr, _regerr, 0)
#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
@@ -84,4 +81,7 @@
#define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
+#define EX_TABLE_FPC(_fault, _target) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
+
#endif /* __ASM_EXTABLE_H */
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
index a873e873e1ee..f662eb4b9246 100644
--- a/arch/s390/include/asm/asm-prototypes.h
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -3,7 +3,8 @@
#include <linux/kvm_host.h>
#include <linux/ftrace.h>
-#include <asm/fpu/api.h>
+#include <asm/fpu.h>
+#include <asm/nospec-branch.h>
#include <asm-generic/asm-prototypes.h>
__int128_t __ashlti3(__int128_t a, int b);
diff --git a/arch/s390/include/asm/asm.h b/arch/s390/include/asm/asm.h
new file mode 100644
index 000000000000..e9062b01e2a2
--- /dev/null
+++ b/arch/s390/include/asm/asm.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ASM_H
+#define _ASM_S390_ASM_H
+
+#include <linux/stringify.h>
+
+/*
+ * Helper macros to be used for flag output operand handling.
+ * Inline assemblies must use four of the five supplied macros:
+ *
+ * Use CC_IPM(sym) at the end of the inline assembly; this extracts the
+ * condition code and program mask with the ipm instruction and writes it to
+ * the variable with symbolic name [sym] if the compiler has no support for
+ * flag output operands. If the compiler has support for flag output operands
+ * this generates no code.
+ *
+ * Use CC_OUT(sym, var) at the output operand list of an inline assembly. This
+ * defines an output operand with symbolic name [sym] for the variable
+ * [var]. [var] must be an int variable and [sym] must be identical with [sym]
+ * used with CC_IPM().
+ *
+ * Use either CC_CLOBBER or CC_CLOBBER_LIST() for the clobber list. Use
+ * CC_CLOBBER if the clobber list contains only "cc", otherwise use
+ * CC_CLOBBER_LIST() and add all clobbers as argument to the macro.
+ *
+ * Use CC_TRANSFORM() to convert the variable [var] which contains the
+ * extracted condition code. If the condition code is extracted with ipm, the
+ * [var] also contains the program mask. CC_TRANSFORM() moves the condition
+ * code to the two least significant bits and sets all other bits to zero.
+ */
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_ASM_FLAG_OUTPUT_BROKEN))
+
+#define __HAVE_ASM_FLAG_OUTPUTS__
+
+#define CC_IPM(sym)
+#define CC_OUT(sym, var) "=@cc" (var)
+#define CC_TRANSFORM(cc) ({ cc; })
+#define CC_CLOBBER
+#define CC_CLOBBER_LIST(...) __VA_ARGS__
+
+#else
+
+#define CC_IPM(sym) " ipm %[" __stringify(sym) "]\n"
+#define CC_OUT(sym, var) [sym] "=d" (var)
+#define CC_TRANSFORM(cc) ({ (cc) >> 28; })
+#define CC_CLOBBER "cc"
+#define CC_CLOBBER_LIST(...) "cc", __VA_ARGS__
+
+#endif
+
+#endif /* _ASM_S390_ASM_H */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 7138d189cc42..b36dd6a1d652 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,46 +15,76 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-static inline int arch_atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
- return __atomic_read(v);
+ return __atomic_read(&v->counter);
}
#define arch_atomic_read arch_atomic_read
-static inline void arch_atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
- __atomic_set(v, i);
+ __atomic_set(&v->counter, i);
}
#define arch_atomic_set arch_atomic_set
-static inline int arch_atomic_add_return(int i, atomic_t *v)
+static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter) + i;
}
#define arch_atomic_add_return arch_atomic_add_return
-static inline int arch_atomic_fetch_add(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add
-static inline void arch_atomic_add(int i, atomic_t *v)
+static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
__atomic_add(i, &v->counter);
}
#define arch_atomic_add arch_atomic_add
+static __always_inline void arch_atomic_inc(atomic_t *v)
+{
+ __atomic_add_const(1, &v->counter);
+}
+#define arch_atomic_inc arch_atomic_inc
+
+static __always_inline void arch_atomic_dec(atomic_t *v)
+{
+ __atomic_add_const(-1, &v->counter);
+}
+#define arch_atomic_dec arch_atomic_dec
+
+static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+ return __atomic_add_and_test_barrier(-i, &v->counter);
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+
+static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
+{
+ return __atomic_add_const_and_test_barrier(-1, &v->counter);
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+
+static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
+{
+ return __atomic_add_const_and_test_barrier(1, &v->counter);
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+
#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
#define ATOMIC_OPS(op) \
-static inline void arch_atomic_##op(int i, atomic_t *v) \
+static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__atomic_##op(i, &v->counter); \
} \
-static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __atomic_##op##_barrier(i, &v->counter); \
}
@@ -72,62 +102,112 @@ ATOMIC_OPS(xor)
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
+{
+ return arch_xchg(&v->counter, new);
+}
+#define arch_atomic_xchg arch_atomic_xchg
-static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
- return __atomic_cmpxchg(&v->counter, old, new);
+ return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ return arch_try_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+
#define ATOMIC64_INIT(i) { (i) }
-static inline s64 arch_atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
- return __atomic64_read(v);
+ return __atomic64_read((long *)&v->counter);
}
#define arch_atomic64_read arch_atomic64_read
-static inline void arch_atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- __atomic64_set(v, i);
+ __atomic64_set((long *)&v->counter, i);
}
#define arch_atomic64_set arch_atomic64_set
-static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
}
#define arch_atomic64_add_return arch_atomic64_add_return
-static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-static inline void arch_atomic64_add(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
__atomic64_add(i, (long *)&v->counter);
}
#define arch_atomic64_add arch_atomic64_add
-#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
+static __always_inline void arch_atomic64_inc(atomic64_t *v)
+{
+ __atomic64_add_const(1, (long *)&v->counter);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+
+static __always_inline void arch_atomic64_dec(atomic64_t *v)
+{
+ __atomic64_add_const(-1, (long *)&v->counter);
+}
+#define arch_atomic64_dec arch_atomic64_dec
-static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- return __atomic64_cmpxchg((long *)&v->counter, old, new);
+ return __atomic64_add_and_test_barrier(-i, (long *)&v->counter);
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+
+static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
+{
+ return __atomic64_add_const_and_test_barrier(-1, (long *)&v->counter);
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+
+static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
+{
+ return __atomic64_add_const_and_test_barrier(1, (long *)&v->counter);
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+
+static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
+{
+ return arch_xchg(&v->counter, new);
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+
+static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ return arch_cmpxchg(&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#define ATOMIC64_OPS(op) \
-static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
-{ \
- __atomic64_##op(i, (long *)&v->counter); \
-} \
-static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
-{ \
- return __atomic64_##op##_barrier(i, (long *)&v->counter); \
+static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ return arch_try_cmpxchg(&v->counter, old, new);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+
+#define ATOMIC64_OPS(op) \
+static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
+{ \
+ __atomic64_##op(i, (long *)&v->counter); \
+} \
+static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
+{ \
+ return __atomic64_##op##_barrier(i, (long *)&v->counter); \
}
ATOMIC64_OPS(and)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 50510e08b893..585678bbcd7a 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -8,44 +8,60 @@
#ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__
-static inline int __atomic_read(const atomic_t *v)
+#include <linux/limits.h>
+#include <asm/march.h>
+#include <asm/asm.h>
+
+static __always_inline int __atomic_read(const int *ptr)
{
- int c;
+ int val;
asm volatile(
- " l %0,%1\n"
- : "=d" (c) : "R" (v->counter));
- return c;
+ " l %[val],%[ptr]\n"
+ : [val] "=d" (val) : [ptr] "R" (*ptr));
+ return val;
}
-static inline void __atomic_set(atomic_t *v, int i)
+static __always_inline void __atomic_set(int *ptr, int val)
{
- asm volatile(
- " st %1,%0\n"
- : "=R" (v->counter) : "d" (i));
+ if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
+ asm volatile(
+ " mvhi %[ptr],%[val]\n"
+ : [ptr] "=Q" (*ptr) : [val] "K" (val));
+ } else {
+ asm volatile(
+ " st %[val],%[ptr]\n"
+ : [ptr] "=R" (*ptr) : [val] "d" (val));
+ }
}
-static inline s64 __atomic64_read(const atomic64_t *v)
+static __always_inline long __atomic64_read(const long *ptr)
{
- s64 c;
+ long val;
asm volatile(
- " lg %0,%1\n"
- : "=d" (c) : "RT" (v->counter));
- return c;
+ " lg %[val],%[ptr]\n"
+ : [val] "=d" (val) : [ptr] "RT" (*ptr));
+ return val;
}
-static inline void __atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void __atomic64_set(long *ptr, long val)
{
- asm volatile(
- " stg %1,%0\n"
- : "=RT" (v->counter) : "d" (i));
+ if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
+ asm volatile(
+ " mvghi %[ptr],%[val]\n"
+ : [ptr] "=Q" (*ptr) : [val] "K" (val));
+ } else {
+ asm volatile(
+ " stg %[val],%[ptr]\n"
+ : [ptr] "=RT" (*ptr) : [val] "d" (val));
+ }
}
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
-static inline op_type op_name(op_type val, op_type *ptr) \
+static __always_inline op_type op_name(op_type val, op_type *ptr) \
{ \
op_type old; \
\
@@ -58,7 +74,7 @@ static inline op_type op_name(op_type val, op_type *ptr) \
} \
#define __ATOMIC_OPS(op_name, op_type, op_string) \
- __ATOMIC_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_OP(op_name, op_type, op_string, "") \
__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
__ATOMIC_OPS(__atomic_add, int, "laa")
@@ -84,7 +100,7 @@ static __always_inline void op_name(op_type val, op_type *ptr) \
}
#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
- __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_CONST_OP(op_name, op_type, op_string, "") \
__ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
@@ -93,10 +109,10 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
#undef __ATOMIC_CONST_OPS
#undef __ATOMIC_CONST_OP
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
-static inline int op_name(int val, int *ptr) \
+static __always_inline int op_name(int val, int *ptr) \
{ \
int old, new; \
\
@@ -122,7 +138,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
#undef __ATOMIC_OPS
#define __ATOMIC64_OP(op_name, op_string) \
-static inline long op_name(long val, long *ptr) \
+static __always_inline long op_name(long val, long *ptr) \
{ \
long old, new; \
\
@@ -152,50 +168,78 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
-static inline int __atomic_cmpxchg(int *ptr, int old, int new)
-{
- asm volatile(
- " cs %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old;
-}
+#if defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__)
-static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
-{
- int old_expected = old;
+#define __ATOMIC_TEST_OP(op_name, op_type, op_string, op_barrier) \
+static __always_inline bool op_name(op_type val, op_type *ptr) \
+{ \
+ op_type tmp; \
+ int cc; \
+ \
+ asm volatile( \
+ op_string " %[tmp],%[val],%[ptr]\n" \
+ op_barrier \
+ : "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \
+ : [val] "d" (val) \
+ : "memory"); \
+ return (cc == 0) || (cc == 2); \
+} \
- asm volatile(
- " cs %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old == old_expected;
-}
+#define __ATOMIC_TEST_OPS(op_name, op_type, op_string) \
+ __ATOMIC_TEST_OP(op_name, op_type, op_string, "") \
+ __ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
-static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
-{
- asm volatile(
- " csg %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+QS" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old;
+__ATOMIC_TEST_OPS(__atomic_add_and_test, int, "laal")
+__ATOMIC_TEST_OPS(__atomic64_add_and_test, long, "laalg")
+
+#undef __ATOMIC_TEST_OPS
+#undef __ATOMIC_TEST_OP
+
+#define __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, op_barrier) \
+static __always_inline bool op_name(op_type val, op_type *ptr) \
+{ \
+ int cc; \
+ \
+ asm volatile( \
+ op_string " %[ptr],%[val]\n" \
+ op_barrier \
+ : "=@cc" (cc), [ptr] "+QS" (*ptr) \
+ : [val] "i" (val) \
+ : "memory"); \
+ return (cc == 0) || (cc == 2); \
}
-static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
-{
- long old_expected = old;
+#define __ATOMIC_CONST_TEST_OPS(op_name, op_type, op_string) \
+ __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, "") \
+ __ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
- asm volatile(
- " csg %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+QS" (*ptr)
- : [new] "d" (new)
- : "cc", "memory");
- return old == old_expected;
+__ATOMIC_CONST_TEST_OPS(__atomic_add_const_and_test, int, "alsi")
+__ATOMIC_CONST_TEST_OPS(__atomic64_add_const_and_test, long, "algsi")
+
+#undef __ATOMIC_CONST_TEST_OPS
+#undef __ATOMIC_CONST_TEST_OP
+
+#else /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */
+
+#define __ATOMIC_TEST_OP(op_name, op_func, op_type) \
+static __always_inline bool op_name(op_type val, op_type *ptr) \
+{ \
+ return op_func(val, ptr) == -val; \
}
+__ATOMIC_TEST_OP(__atomic_add_and_test, __atomic_add, int)
+__ATOMIC_TEST_OP(__atomic_add_and_test_barrier, __atomic_add_barrier, int)
+__ATOMIC_TEST_OP(__atomic_add_const_and_test, __atomic_add, int)
+__ATOMIC_TEST_OP(__atomic_add_const_and_test_barrier, __atomic_add_barrier, int)
+__ATOMIC_TEST_OP(__atomic64_add_and_test, __atomic64_add, long)
+__ATOMIC_TEST_OP(__atomic64_add_and_test_barrier, __atomic64_add_barrier, long)
+__ATOMIC_TEST_OP(__atomic64_add_const_and_test, __atomic64_add, long)
+__ATOMIC_TEST_OP(__atomic64_add_const_and_test_barrier, __atomic64_add_barrier, long)
+
+#undef __ATOMIC_TEST_OP
+
+#endif /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */
+
#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 82de2a7c4160..d82130d7f2b6 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -8,13 +8,15 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
+#include <asm/march.h>
+
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifdef MARCH_HAS_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
#else
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index c467dffa8c12..a5ca0a947691 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -36,184 +36,45 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
#include <linux/types.h>
-#include <asm/atomic_ops.h>
-#include <asm/barrier.h>
-
-#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-
-static inline unsigned long *
-__bitops_word(unsigned long nr, const volatile unsigned long *ptr)
-{
- unsigned long addr;
-
- addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
- return (unsigned long *)addr;
-}
-
-static inline unsigned long __bitops_mask(unsigned long nr)
-{
- return 1UL << (nr & (BITS_PER_LONG - 1));
-}
-
-static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
-
- __atomic64_or(mask, (long *)addr);
-}
-
-static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
-{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
-
- __atomic64_and(~mask, (long *)addr);
-}
-
-static __always_inline void arch_change_bit(unsigned long nr,
- volatile unsigned long *ptr)
-{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
-
- __atomic64_xor(mask, (long *)addr);
-}
-
-static inline bool arch_test_and_set_bit(unsigned long nr,
- volatile unsigned long *ptr)
-{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
- unsigned long old;
-
- old = __atomic64_or_barrier(mask, (long *)addr);
- return old & mask;
-}
-
-static inline bool arch_test_and_clear_bit(unsigned long nr,
- volatile unsigned long *ptr)
-{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
- unsigned long old;
-
- old = __atomic64_and_barrier(~mask, (long *)addr);
- return old & mask;
-}
-
-static inline bool arch_test_and_change_bit(unsigned long nr,
- volatile unsigned long *ptr)
-{
- unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask = __bitops_mask(nr);
- unsigned long old;
-
- old = __atomic64_xor_barrier(mask, (long *)addr);
- return old & mask;
-}
-
-static __always_inline void
-arch___set_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long *p = __bitops_word(nr, addr);
- unsigned long mask = __bitops_mask(nr);
-
- *p |= mask;
-}
-
-static __always_inline void
-arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long *p = __bitops_word(nr, addr);
- unsigned long mask = __bitops_mask(nr);
-
- *p &= ~mask;
-}
-
-static __always_inline void
-arch___change_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long *p = __bitops_word(nr, addr);
- unsigned long mask = __bitops_mask(nr);
-
- *p ^= mask;
-}
-
-static __always_inline bool
-arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long *p = __bitops_word(nr, addr);
- unsigned long mask = __bitops_mask(nr);
- unsigned long old;
-
- old = *p;
- *p |= mask;
- return old & mask;
-}
-
-static __always_inline bool
-arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long *p = __bitops_word(nr, addr);
- unsigned long mask = __bitops_mask(nr);
- unsigned long old;
-
- old = *p;
- *p &= ~mask;
- return old & mask;
-}
-
-static __always_inline bool
-arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
-{
- unsigned long *p = __bitops_word(nr, addr);
- unsigned long mask = __bitops_mask(nr);
- unsigned long old;
-
- old = *p;
- *p ^= mask;
- return old & mask;
-}
-
-#define arch_test_bit generic_test_bit
-#define arch_test_bit_acquire generic_test_bit_acquire
-
-static inline bool arch_test_and_set_bit_lock(unsigned long nr,
- volatile unsigned long *ptr)
-{
- if (arch_test_bit(nr, ptr))
- return true;
- return arch_test_and_set_bit(nr, ptr);
-}
-
-static inline void arch_clear_bit_unlock(unsigned long nr,
- volatile unsigned long *ptr)
-{
- smp_mb__before_atomic();
- arch_clear_bit(nr, ptr);
-}
-
-static inline void arch___clear_bit_unlock(unsigned long nr,
- volatile unsigned long *ptr)
-{
- smp_mb();
- arch___clear_bit(nr, ptr);
-}
-
-static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
- volatile unsigned long *ptr)
-{
- unsigned long old;
-
- old = __atomic64_xor_barrier(mask, (long *)ptr);
- return old & BIT(7);
+#include <asm/asm.h>
+
+#define arch___set_bit generic___set_bit
+#define arch___clear_bit generic___clear_bit
+#define arch___change_bit generic___change_bit
+#define arch___test_and_set_bit generic___test_and_set_bit
+#define arch___test_and_clear_bit generic___test_and_clear_bit
+#define arch___test_and_change_bit generic___test_and_change_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
+
+static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsigned long *ptr)
+{
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+ const volatile unsigned char *addr;
+ unsigned long mask;
+ int cc;
+
+ /*
+ * With CONFIG_PROFILE_ALL_BRANCHES enabled gcc fails to
+ * handle __builtin_constant_p() in some cases.
+ */
+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && __builtin_constant_p(nr)) {
+ addr = (const volatile unsigned char *)ptr;
+ addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE;
+ mask = 1UL << (nr & (BITS_PER_BYTE - 1));
+ asm volatile(
+ " tm %[addr],%[mask]\n"
+ : "=@cc" (cc)
+ : [addr] "Q" (*addr), [mask] "I" (mask)
+ );
+ return cc == 3;
+ }
+#endif
+ return generic_test_bit(nr, ptr);
}
-#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
-#include <asm-generic/bitops/instrumented-atomic.h>
-#include <asm-generic/bitops/instrumented-non-atomic.h>
-#include <asm-generic/bitops/instrumented-lock.h>
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
+#include <asm-generic/bitops/lock.h>
/*
* Functions which use MSB0 bit numbering.
@@ -379,8 +240,9 @@ static inline int fls(unsigned int word)
return fls64(word);
}
+#include <asm/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h
index f7eed27b3220..f55f8227058e 100644
--- a/arch/s390/include/asm/boot_data.h
+++ b/arch/s390/include/asm/boot_data.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_BOOT_DATA_H
+#include <linux/string.h>
#include <asm/setup.h>
#include <asm/ipl.h>
@@ -15,4 +16,54 @@ extern unsigned long ipl_cert_list_size;
extern unsigned long early_ipl_comp_list_addr;
extern unsigned long early_ipl_comp_list_size;
+extern char boot_rb[PAGE_SIZE * 2];
+extern bool boot_earlyprintk;
+extern size_t boot_rb_off;
+extern char bootdebug_filter[128];
+extern bool bootdebug;
+
+#define boot_rb_foreach(cb) \
+ do { \
+ size_t off = boot_rb_off + strlen(boot_rb + boot_rb_off) + 1; \
+ size_t len; \
+ for (; off < sizeof(boot_rb) && (len = strlen(boot_rb + off)); off += len + 1) \
+ cb(boot_rb + off); \
+ for (off = 0; off < boot_rb_off && (len = strlen(boot_rb + off)); off += len + 1) \
+ cb(boot_rb + off); \
+ } while (0)
+
+/*
+ * bootdebug_filter is a comma separated list of strings,
+ * where each string can be a prefix of the message.
+ */
+static inline bool bootdebug_filter_match(const char *buf)
+{
+ char *p = bootdebug_filter, *s;
+ char *end;
+
+ if (!*p)
+ return true;
+
+ end = p + strlen(p);
+ while (p < end) {
+ p = skip_spaces(p);
+ s = memscan(p, ',', end - p);
+ if (!strncmp(p, buf, s - p))
+ return true;
+ p = s + 1;
+ }
+ return false;
+}
+
+static inline const char *skip_timestamp(const char *buf)
+{
+#ifdef CONFIG_PRINTK_TIME
+ const char *p = memchr(buf, ']', strlen(buf));
+
+ if (p && p[1] == ' ')
+ return p + 2;
+#endif
+ return buf;
+}
+
#endif /* _ASM_S390_BOOT_DATA_H */
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index aebe1e22c7be..c500d45fb465 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -14,7 +14,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \
"1: .asciz \""__FILE__"\"\n" \
".previous\n" \
- ".section __bug_table,\"awM\",@progbits,%2\n" \
+ ".section __bug_table,\"aw\"\n" \
"2: .long 0b-.\n" \
" .long 1b-.\n" \
" .short %0,%1\n" \
@@ -30,7 +30,7 @@
#define __EMIT_BUG(x) do { \
asm_inline volatile( \
"0: mc 0,0\n" \
- ".section __bug_table,\"awM\",@progbits,%1\n" \
+ ".section __bug_table,\"aw\"\n" \
"1: .long 0b-.\n" \
" .short %0\n" \
" .org 1b+%1\n" \
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 91d261751d25..e3afcece375e 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -210,14 +210,15 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define get_ccwdev_lock(x) (x)->ccwlock
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
-#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
+#define to_ccwdrv(n) container_of_const(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
extern void ccw_device_destroy_console(struct ccw_device *);
extern int ccw_device_enable_console(struct ccw_device *);
extern void ccw_device_wait_idle(struct ccw_device *);
-extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size);
+extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size,
+ dma32_t *dma_handle);
extern void ccw_device_dma_free(struct ccw_device *cdev,
void *cpu_addr, size_t size);
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index 69837eec2ff5..d86dea5900e7 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -12,29 +12,19 @@
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
+#include <linux/kmsan-checks.h>
#include <linux/in6.h>
-/*
- * Computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit).
- *
- * Returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic.
- *
- * This function must be called with even lengths, except
- * for the last fragment, which may be odd.
- *
- * It's best to have buff aligned on a 32-bit boundary.
- */
-static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+static inline __wsum cksm(const void *buff, int len, __wsum sum)
{
union register_pair rp = {
- .even = (unsigned long) buff,
- .odd = (unsigned long) len,
+ .even = (unsigned long)buff,
+ .odd = (unsigned long)len,
};
- kasan_check_read(buff, len);
+ instrument_read(buff, len);
+ kmsan_check_memory(buff, len);
asm volatile(
"0: cksm %[sum],%[rp]\n"
" jo 0b\n"
@@ -42,6 +32,11 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
return sum;
}
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
+
/*
* Fold a partial checksum without adding pseudo headers.
*/
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index bb48ea380c0d..bb78159d8042 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -11,6 +11,9 @@
#include <uapi/asm/chsc.h>
+/* struct from linux/notifier.h */
+struct notifier_block;
+
/**
* Operation codes for CHSC PNSO:
* PNSO_OC_NET_BRIDGE_INFO - only addresses that are visible to a bridgeport
@@ -66,4 +69,16 @@ struct chsc_pnso_area {
struct chsc_pnso_naid_l2 entries[];
} __packed __aligned(PAGE_SIZE);
+/*
+ * notifier interface - registered notifiers gets called on
+ * the following events:
+ * - ap config changed (CHSC_NOTIFY_AP_CFG)
+ */
+enum chsc_notify_type {
+ CHSC_NOTIFY_AP_CFG = 3,
+};
+
+int chsc_notifier_register(struct notifier_block *nb);
+int chsc_notifier_unregister(struct notifier_block *nb);
+
#endif /* _ASM_S390_CHSC_H */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 1c4f585dd39b..b6b619f340a5 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/genalloc.h>
+#include <asm/dma-types.h>
#include <asm/types.h>
#include <asm/tpi.h>
@@ -32,7 +33,7 @@ struct ccw1 {
__u8 cmd_code;
__u8 flags;
__u16 count;
- __u32 cda;
+ dma32_t cda;
} __attribute__ ((packed,aligned(8)));
/**
@@ -152,8 +153,8 @@ struct sublog {
struct esw0 {
struct sublog sublog;
struct erw erw;
- __u32 faddr[2];
- __u32 saddr;
+ dma32_t faddr[2];
+ dma32_t saddr;
} __attribute__ ((packed));
/**
@@ -364,6 +365,8 @@ extern struct device *cio_get_dma_css_dev(void);
void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
size_t size);
+void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size, dma32_t *dma_handle);
void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size);
void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev);
struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages);
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index aae0315374de..a9e2006033b7 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,185 +11,231 @@
#include <linux/mmdebug.h>
#include <linux/types.h>
#include <linux/bug.h>
+#include <asm/asm.h>
-void __xchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new)
+{
+ asm volatile(
+ " cs %[old],%[new],%[ptr]\n"
+ : [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr)
+ : [new] "d" (new)
+ : "memory", "cc");
+ return old;
+}
+
+static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new)
+{
+ asm volatile(
+ " csg %[old],%[new],%[ptr]\n"
+ : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr)
+ : [new] "d" (new)
+ : "memory", "cc");
+ return old;
+}
-static __always_inline unsigned long
-__arch_xchg(unsigned long x, unsigned long address, int size)
+static inline u8 __arch_cmpxchg1(u64 ptr, u8 old, u8 new)
{
- unsigned long old;
- int shift;
+ union {
+ u8 b[4];
+ u32 w;
+ } old32, new32;
+ u32 prev;
+ int i;
+
+ i = ptr & 3;
+ ptr &= ~0x3;
+ prev = READ_ONCE(*(u32 *)ptr);
+ do {
+ old32.w = prev;
+ if (old32.b[i] != old)
+ return old32.b[i];
+ new32.w = old32.w;
+ new32.b[i] = new;
+ prev = __cs_asm(ptr, old32.w, new32.w);
+ } while (prev != old32.w);
+ return old;
+}
+
+static inline u16 __arch_cmpxchg2(u64 ptr, u16 old, u16 new)
+{
+ union {
+ u16 b[2];
+ u32 w;
+ } old32, new32;
+ u32 prev;
+ int i;
+
+ i = (ptr & 3) >> 1;
+ ptr &= ~0x3;
+ prev = READ_ONCE(*(u32 *)ptr);
+ do {
+ old32.w = prev;
+ if (old32.b[i] != old)
+ return old32.b[i];
+ new32.w = old32.w;
+ new32.b[i] = new;
+ prev = __cs_asm(ptr, old32.w, new32.w);
+ } while (prev != old32.w);
+ return old;
+}
+static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size)
+{
switch (size) {
- case 1:
- shift = (3 ^ (address & 3)) << 3;
- address ^= address & 3;
- asm volatile(
- " l %0,%1\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) address)
- : "d" ((x & 0xff) << shift), "d" (~(0xff << shift))
- : "memory", "cc", "0");
- return old >> shift;
- case 2:
- shift = (2 ^ (address & 2)) << 3;
- address ^= address & 2;
- asm volatile(
- " l %0,%1\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) address)
- : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift))
- : "memory", "cc", "0");
- return old >> shift;
- case 4:
- asm volatile(
- " l %0,%1\n"
- "0: cs %0,%2,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+Q" (*(int *) address)
- : "d" (x)
- : "memory", "cc");
- return old;
- case 8:
- asm volatile(
- " lg %0,%1\n"
- "0: csg %0,%2,%1\n"
- " jl 0b\n"
- : "=&d" (old), "+QS" (*(long *) address)
- : "d" (x)
- : "memory", "cc");
- return old;
+ case 1: return __arch_cmpxchg1(ptr, old & 0xff, new & 0xff);
+ case 2: return __arch_cmpxchg2(ptr, old & 0xffff, new & 0xffff);
+ case 4: return __cs_asm(ptr, old & 0xffffffff, new & 0xffffffff);
+ case 8: return __csg_asm(ptr, old, new);
+ default: __cmpxchg_called_with_bad_pointer();
}
- __xchg_called_with_bad_pointer();
- return x;
+ return old;
}
-#define arch_xchg(ptr, x) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
- __typeof__(*(ptr)) __ret; \
+ (__typeof__(*(ptr)))__arch_cmpxchg((unsigned long)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))); \
+})
+
+#define arch_cmpxchg64 arch_cmpxchg
+#define arch_cmpxchg_local arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg
+
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+
+#define arch_try_cmpxchg(ptr, oldp, new) \
+({ \
+ __typeof__(ptr) __oldp = (__typeof__(ptr))(oldp); \
+ __typeof__(*(ptr)) __old = *__oldp; \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __prev; \
+ int __cc; \
\
- __ret = (__typeof__(*(ptr))) \
- __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \
- sizeof(*(ptr))); \
- __ret; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ case 2: { \
+ __prev = arch_cmpxchg((ptr), (__old), (__new)); \
+ __cc = (__prev != __old); \
+ if (unlikely(__cc)) \
+ *__oldp = __prev; \
+ break; \
+ } \
+ case 4: { \
+ asm volatile( \
+ " cs %[__old],%[__new],%[__ptr]\n" \
+ : [__old] "+d" (*__oldp), \
+ [__ptr] "+Q" (*(ptr)), \
+ "=@cc" (__cc) \
+ : [__new] "d" (__new) \
+ : "memory"); \
+ break; \
+ } \
+ case 8: { \
+ asm volatile( \
+ " csg %[__old],%[__new],%[__ptr]\n" \
+ : [__old] "+d" (*__oldp), \
+ [__ptr] "+QS" (*(ptr)), \
+ "=@cc" (__cc) \
+ : [__new] "d" (__new) \
+ : "memory"); \
+ break; \
+ } \
+ default: \
+ __cmpxchg_called_with_bad_pointer(); \
+ } \
+ likely(__cc == 0); \
})
-void __cmpxchg_called_with_bad_pointer(void);
+#else /* __HAVE_ASM_FLAG_OUTPUTS__ */
-static __always_inline unsigned long __cmpxchg(unsigned long address,
- unsigned long old,
- unsigned long new, int size)
+#define arch_try_cmpxchg(ptr, oldp, new) \
+({ \
+ __typeof__((ptr)) __oldp = (__typeof__(ptr))(oldp); \
+ __typeof__(*(ptr)) __old = *__oldp; \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __prev; \
+ \
+ __prev = arch_cmpxchg((ptr), (__old), (__new)); \
+ if (unlikely(__prev != __old)) \
+ *__oldp = __prev; \
+ likely(__prev == __old); \
+})
+
+#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
+
+#define arch_try_cmpxchg64 arch_try_cmpxchg
+#define arch_try_cmpxchg_local arch_try_cmpxchg
+#define arch_try_cmpxchg64_local arch_try_cmpxchg
+
+void __xchg_called_with_bad_pointer(void);
+
+static inline u8 __arch_xchg1(u64 ptr, u8 x)
+{
+ int shift = (3 ^ (ptr & 3)) << 3;
+ u32 mask, old, new;
+
+ ptr &= ~0x3;
+ mask = ~(0xff << shift);
+ old = READ_ONCE(*(u32 *)ptr);
+ do {
+ new = old & mask;
+ new |= x << shift;
+ } while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
+ return old >> shift;
+}
+
+static inline u16 __arch_xchg2(u64 ptr, u16 x)
+{
+ int shift = (2 ^ (ptr & 2)) << 3;
+ u32 mask, old, new;
+
+ ptr &= ~0x3;
+ mask = ~(0xffff << shift);
+ old = READ_ONCE(*(u32 *)ptr);
+ do {
+ new = old & mask;
+ new |= x << shift;
+ } while (!arch_try_cmpxchg((u32 *)ptr, &old, new));
+ return old >> shift;
+}
+
+static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size)
{
switch (size) {
- case 1: {
- unsigned int prev, shift, mask;
-
- shift = (3 ^ (address & 3)) << 3;
- address ^= address & 3;
- old = (old & 0xff) << shift;
- new = (new & 0xff) << shift;
- mask = ~(0xff << shift);
- asm volatile(
- " l %[prev],%[address]\n"
- " nr %[prev],%[mask]\n"
- " xilf %[mask],0xffffffff\n"
- " or %[new],%[prev]\n"
- " or %[prev],%[tmp]\n"
- "0: lr %[tmp],%[prev]\n"
- " cs %[prev],%[new],%[address]\n"
- " jnl 1f\n"
- " xr %[tmp],%[prev]\n"
- " xr %[new],%[tmp]\n"
- " nr %[tmp],%[mask]\n"
- " jz 0b\n"
- "1:"
- : [prev] "=&d" (prev),
- [address] "+Q" (*(int *)address),
- [tmp] "+&d" (old),
- [new] "+&d" (new),
- [mask] "+&d" (mask)
- :: "memory", "cc");
- return prev >> shift;
- }
- case 2: {
- unsigned int prev, shift, mask;
-
- shift = (2 ^ (address & 2)) << 3;
- address ^= address & 2;
- old = (old & 0xffff) << shift;
- new = (new & 0xffff) << shift;
- mask = ~(0xffff << shift);
- asm volatile(
- " l %[prev],%[address]\n"
- " nr %[prev],%[mask]\n"
- " xilf %[mask],0xffffffff\n"
- " or %[new],%[prev]\n"
- " or %[prev],%[tmp]\n"
- "0: lr %[tmp],%[prev]\n"
- " cs %[prev],%[new],%[address]\n"
- " jnl 1f\n"
- " xr %[tmp],%[prev]\n"
- " xr %[new],%[tmp]\n"
- " nr %[tmp],%[mask]\n"
- " jz 0b\n"
- "1:"
- : [prev] "=&d" (prev),
- [address] "+Q" (*(int *)address),
- [tmp] "+&d" (old),
- [new] "+&d" (new),
- [mask] "+&d" (mask)
- :: "memory", "cc");
- return prev >> shift;
- }
+ case 1:
+ return __arch_xchg1(ptr, x & 0xff);
+ case 2:
+ return __arch_xchg2(ptr, x & 0xffff);
case 4: {
- unsigned int prev = old;
-
- asm volatile(
- " cs %[prev],%[new],%[address]\n"
- : [prev] "+&d" (prev),
- [address] "+Q" (*(int *)address)
- : [new] "d" (new)
- : "memory", "cc");
- return prev;
+ u32 old = READ_ONCE(*(u32 *)ptr);
+
+ do {
+ } while (!arch_try_cmpxchg((u32 *)ptr, &old, x & 0xffffffff));
+ return old;
}
case 8: {
- unsigned long prev = old;
-
- asm volatile(
- " csg %[prev],%[new],%[address]\n"
- : [prev] "+&d" (prev),
- [address] "+QS" (*(long *)address)
- : [new] "d" (new)
- : "memory", "cc");
- return prev;
+ u64 old = READ_ONCE(*(u64 *)ptr);
+
+ do {
+ } while (!arch_try_cmpxchg((u64 *)ptr, &old, x));
+ return old;
}
}
- __cmpxchg_called_with_bad_pointer();
- return old;
+ __xchg_called_with_bad_pointer();
+ return x;
}
-#define arch_cmpxchg(ptr, o, n) \
+#define arch_xchg(ptr, x) \
({ \
- __typeof__(*(ptr)) __ret; \
- \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- __ret; \
+ (__typeof__(*(ptr)))__arch_xchg((unsigned long)(ptr), \
+ (unsigned long)(x), \
+ sizeof(*(ptr))); \
})
-#define arch_cmpxchg64 arch_cmpxchg
-#define arch_cmpxchg_local arch_cmpxchg
-#define arch_cmpxchg64_local arch_cmpxchg
-
#define system_has_cmpxchg128() 1
static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
@@ -203,5 +249,25 @@ static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 n
}
#define arch_cmpxchg128 arch_cmpxchg128
+#define arch_cmpxchg128_local arch_cmpxchg128
+
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+
+static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new)
+{
+ int cc;
+
+ asm volatile(
+ " cdsg %[old],%[new],%[ptr]\n"
+ : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc)
+ : [new] "d" (new)
+ : "memory");
+ return likely(cc == 0);
+}
+
+#define arch_try_cmpxchg128 arch_try_cmpxchg128
+#define arch_try_cmpxchg128_local arch_try_cmpxchg128
+
+#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index b378e2b57ad8..59ab1192e2d5 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -12,6 +12,7 @@
#define _ASM_S390_CPACF_H
#include <asm/facility.h>
+#include <linux/kmsan-checks.h>
/*
* Instruction opcodes for the CPACF instructions
@@ -53,6 +54,10 @@
#define CPACF_KM_XTS_256 0x34
#define CPACF_KM_PXTS_128 0x3a
#define CPACF_KM_PXTS_256 0x3c
+#define CPACF_KM_XTS_128_FULL 0x52
+#define CPACF_KM_XTS_256_FULL 0x54
+#define CPACF_KM_PXTS_128_FULL 0x5a
+#define CPACF_KM_PXTS_256_FULL 0x5c
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@@ -120,23 +125,31 @@
#define CPACF_KMAC_DEA 0x01
#define CPACF_KMAC_TDEA_128 0x02
#define CPACF_KMAC_TDEA_192 0x03
+#define CPACF_KMAC_HMAC_SHA_224 0x70
+#define CPACF_KMAC_HMAC_SHA_256 0x71
+#define CPACF_KMAC_HMAC_SHA_384 0x72
+#define CPACF_KMAC_HMAC_SHA_512 0x73
/*
* Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
* instruction
*/
-#define CPACF_PCKMO_QUERY 0x00
-#define CPACF_PCKMO_ENC_DES_KEY 0x01
-#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
-#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
-#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
-#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
-#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
-#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
-#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
-#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
-#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
-#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_QUERY 0x00
+#define CPACF_PCKMO_ENC_DES_KEY 0x01
+#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
+#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
+#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
+#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
+#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
+#define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15
+#define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16
+#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20
+#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21
+#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22
+#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28
+#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29
+#define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76
+#define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a
/*
* Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION)
@@ -164,30 +177,126 @@
#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
+/*
+ * Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST)
+ * instructions
+ */
+#define CPACF_KIMD_NIP 0x8000
+#define CPACF_KLMD_DUFOP 0x4000
+#define CPACF_KLMD_NIP 0x8000
+
+/*
+ * Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION)
+ * instruction
+ */
+#define CPACF_KDSA_QUERY 0x00
+#define CPACF_KDSA_ECDSA_VERIFY_P256 0x01
+#define CPACF_KDSA_ECDSA_VERIFY_P384 0x02
+#define CPACF_KDSA_ECDSA_VERIFY_P521 0x03
+#define CPACF_KDSA_ECDSA_SIGN_P256 0x09
+#define CPACF_KDSA_ECDSA_SIGN_P384 0x0a
+#define CPACF_KDSA_ECDSA_SIGN_P521 0x0b
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12
+#define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13
+#define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20
+#define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24
+#define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28
+#define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30
+#define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34
+
+#define CPACF_FC_QUERY 0x00
+#define CPACF_FC_QUERY_AUTH_INFO 0x7F
+
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+typedef struct { unsigned char bytes[256]; } cpacf_qai_t;
-/**
- * cpacf_query() - check if a specific CPACF function is available
- * @opcode: the opcode of the crypto instruction
- * @func: the function code to test for
- *
- * Executes the query function for the given crypto instruction @opcode
- * and checks if @func is available
- *
- * Returns 1 if @func is available for @opcode, 0 otherwise
+/*
+ * Prototype for a not existing function to produce a link
+ * error if __cpacf_query() or __cpacf_check_opcode() is used
+ * with an invalid compile time const opcode.
*/
-static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+void __cpacf_bad_opcode(void);
+
+static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
+ u8 *pb, u8 fc)
{
asm volatile(
- " lghi 0,0\n" /* query function */
- " lgr 1,%[mask]\n"
- " spm 0\n" /* pckmo doesn't change the cc */
- /* Parameter regs are ignored, but must be nonzero and unique */
- "0: .insn rrf,%[opc] << 16,2,4,6,0\n"
- " brc 1,0b\n" /* handle partial completion */
- : "=m" (*mask)
- : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
- : "cc", "0", "1");
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
+ " .insn rre,%[opc] << 16,%[r1],%[r2]\n"
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc),
+ [r1] "i" (r1), [r2] "i" (r2)
+ : "cc", "memory", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3,
+ u8 m4, u8 *pb, u8 fc)
+{
+ asm volatile(
+ " la %%r1,%[pb]\n"
+ " lghi %%r0,%[fc]\n"
+ " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
+ : [pb] "=R" (*pb)
+ : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1),
+ [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4)
+ : "cc", "memory", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb,
+ u8 fc)
+{
+ switch (opcode) {
+ case CPACF_KDSA:
+ __cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc);
+ break;
+ case CPACF_KIMD:
+ __cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc);
+ break;
+ case CPACF_KLMD:
+ __cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc);
+ break;
+ case CPACF_KM:
+ __cpacf_query_rre(CPACF_KM, 2, 4, pb, fc);
+ break;
+ case CPACF_KMA:
+ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc);
+ break;
+ case CPACF_KMAC:
+ __cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc);
+ break;
+ case CPACF_KMC:
+ __cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc);
+ break;
+ case CPACF_KMCTR:
+ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc);
+ break;
+ case CPACF_KMF:
+ __cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc);
+ break;
+ case CPACF_KMO:
+ __cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc);
+ break;
+ case CPACF_PCC:
+ __cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc);
+ break;
+ case CPACF_PCKMO:
+ __cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc);
+ break;
+ case CPACF_PRNO:
+ __cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc);
+ break;
+ default:
+ __cpacf_bad_opcode();
+ }
+}
+
+static __always_inline void __cpacf_query(unsigned int opcode,
+ cpacf_mask_t *mask)
+{
+ __cpacf_query_insn(opcode, mask, CPACF_FC_QUERY);
}
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@@ -210,11 +319,25 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(57); /* check for MSA5 */
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
+ case CPACF_KDSA:
+ return test_facility(155); /* check for MSA9 */
default:
- BUG();
+ __cpacf_bad_opcode();
+ return 0;
}
}
+/**
+ * cpacf_query() - Query the function code mask for this CPACF opcode
+ * @opcode: the opcode of the crypto instruction
+ * @mask: ptr to struct cpacf_mask_t
+ *
+ * Executes the query function for the given crypto instruction @opcode
+ * and checks if @func is available
+ *
+ * On success 1 is returned and the mask is filled with the function
+ * code mask for this CPACF opcode, otherwise 0 is returned.
+ */
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
if (__cpacf_check_opcode(opcode)) {
@@ -230,7 +353,8 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
}
-static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+static __always_inline int cpacf_query_func(unsigned int opcode,
+ unsigned int func)
{
cpacf_mask_t mask;
@@ -239,6 +363,32 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
return 0;
}
+static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ __cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO);
+}
+
+/**
+ * cpacf_qai() - Get the query authentication information for a CPACF opcode
+ * @opcode: the opcode of the crypto instruction
+ * @mask: ptr to struct cpacf_qai_t
+ *
+ * Executes the query authentication information function for the given crypto
+ * instruction @opcode and checks if @func is available
+ *
+ * On success 1 is returned and the mask is filled with the query authentication
+ * information for this CPACF opcode, otherwise 0 is returned.
+ */
+static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai)
+{
+ if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) {
+ __cpacf_qai(opcode, qai);
+ return 1;
+ }
+ memset(qai, 0, sizeof(*qai));
+ return 0;
+}
+
/**
* cpacf_km() - executes the KM (CIPHER MESSAGE) instruction
* @func: the function code passed to KM; see CPACF_KM_xxx defines
@@ -321,7 +471,7 @@ static inline void cpacf_kimd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
@@ -346,7 +496,7 @@ static inline void cpacf_klmd(unsigned long func, void *param,
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[pba]\n"
- "0: .insn rre,%[opc] << 16,0,%[src]\n"
+ "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n"
" brc 1,0b\n" /* handle partial completion */
: [src] "+&d" (s.pair)
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
@@ -355,29 +505,30 @@ static inline void cpacf_klmd(unsigned long func, void *param,
}
/**
- * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
- * instruction
- * @func: the function code passed to KM; see CPACF_KMAC_xxx defines
+ * _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction and updates flags in gr0
+ * @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kmac(unsigned long func, void *param,
- const u8 *src, long src_len)
+static inline int _cpacf_kmac(unsigned long *gr0, void *param,
+ const u8 *src, long src_len)
{
union register_pair s;
s.even = (unsigned long)src;
s.odd = (unsigned long)src_len;
asm volatile(
- " lgr 0,%[fc]\n"
+ " lgr 0,%[r0]\n"
" lgr 1,%[pba]\n"
"0: .insn rre,%[opc] << 16,0,%[src]\n"
" brc 1,0b\n" /* handle partial completion */
- : [src] "+&d" (s.pair)
- : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+ " lgr %[r0],0\n"
+ : [r0] "+d" (*gr0), [src] "+&d" (s.pair)
+ : [pba] "d" ((unsigned long)param),
[opc] "i" (CPACF_KMAC)
: "cc", "memory", "0", "1");
@@ -385,6 +536,22 @@ static inline int cpacf_kmac(unsigned long func, void *param,
}
/**
+ * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction
+ * @func: function code passed to KMAC; see CPACF_KMAC_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for digest funcs
+ */
+static inline int cpacf_kmac(unsigned long func, void *param,
+ const u8 *src, long src_len)
+{
+ return _cpacf_kmac(&func, param, src, src_len);
+}
+
+/**
* cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction
* @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines
* @param: address of parameter block; see POP for details on each func
@@ -473,6 +640,8 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
: [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair)
: [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO)
: "cc", "memory", "0");
+ kmsan_unpoison_memory(ucbuf, ucbuf_len);
+ kmsan_unpoison_memory(cbuf, cbuf_len);
}
/**
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index a0de5b9b02ea..e1a279e0d6a6 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -10,8 +10,10 @@
#define _ASM_S390_CPU_MF_H
#include <linux/errno.h>
+#include <linux/kmsan-checks.h>
#include <asm/asm-extable.h>
#include <asm/facility.h>
+#include <asm/asm.h>
asm(".include \"asm/cpu_mf-insn.h\"\n");
@@ -184,11 +186,12 @@ static inline int lcctl(u64 ctl)
int cc;
asm volatile (
- " lcctl %1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc) : "Q" (ctl) : "cc");
- return cc;
+ " lcctl %[ctl]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [ctl] "Q" (ctl)
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
/* Extract CPU counter */
@@ -198,12 +201,13 @@ static inline int __ecctr(u64 ctr, u64 *content)
int cc;
asm volatile (
- " ecctr %0,%2\n"
- " ipm %1\n"
- " srl %1,28\n"
- : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc");
+ " ecctr %[_content],%[ctr]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [_content] "=d" (_content)
+ : [ctr] "d" (ctr)
+ : CC_CLOBBER);
*content = _content;
- return cc;
+ return CC_TRANSFORM(cc);
}
/* Extract CPU counter */
@@ -233,13 +237,17 @@ static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
int cc;
asm volatile (
- " STCCTM %2,%3,%1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc)
- : "Q" (*dest), "d" (range), "i" (set)
- : "cc", "memory");
- return cc;
+ " STCCTM %[range],%[set],%[dest]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [dest] "Q" (*dest), [range] "d" (range), [set] "i" (set)
+ : CC_CLOBBER_LIST("memory"));
+ /*
+ * If cc == 2, less than RANGE counters are stored, but it's not easy
+ * to tell how many. Always unpoison the whole range for simplicity.
+ */
+ kmsan_unpoison_memory(dest, range * sizeof(u64));
+ return CC_TRANSFORM(cc);
}
/* Query sampling information */
@@ -259,19 +267,20 @@ static inline int qsi(struct hws_qsi_info_block *info)
/* Load sampling controls */
static inline int lsctl(struct hws_lsctl_request_block *req)
{
- int cc;
+ int cc, exception;
- cc = 1;
+ exception = 1;
asm volatile(
- "0: lsctl 0(%1)\n"
- "1: ipm %0\n"
- " srl %0,28\n"
+ "0: lsctl %[req]\n"
+ "1: lhi %[exc],0\n"
"2:\n"
+ CC_IPM(cc)
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
- : "+d" (cc), "+a" (req)
- : "m" (*req)
- : "cc", "memory");
-
- return cc ? -EINVAL : 0;
+ : CC_OUT(cc, cc), [exc] "+d" (exception)
+ : [req] "Q" (*req)
+ : CC_CLOBBER);
+ if (exception || CC_TRANSFORM(cc))
+ return -EINVAL;
+ return 0;
}
#endif /* _ASM_S390_CPU_MF_H */
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index 638137d46c85..a03f64033760 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -25,7 +25,7 @@ struct css_general_char {
u64 : 2;
u64 : 3;
- u64 aif_osa : 1; /* bit 67 */
+ u64 aif_qdio : 1;/* bit 67 */
u64 : 12;
u64 eadm_rf : 1; /* bit 80 */
u64 : 1;
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
index 72a9556d04f3..e6527f51ad0b 100644
--- a/arch/s390/include/asm/ctlreg.h
+++ b/arch/s390/include/asm/ctlreg.h
@@ -202,8 +202,9 @@ union ctlreg0 {
unsigned long : 3;
unsigned long ccc : 1; /* Cryptography counter control */
unsigned long pec : 1; /* PAI extension control */
- unsigned long : 17;
- unsigned long : 3;
+ unsigned long : 15;
+ unsigned long wti : 1; /* Warning-track */
+ unsigned long : 4;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h
index 68f84315277c..d03a922c641e 100644
--- a/arch/s390/include/asm/current.h
+++ b/arch/s390/include/asm/current.h
@@ -14,6 +14,6 @@
struct task_struct;
-#define current ((struct task_struct *const)S390_lowcore.current_task)
+#define current ((struct task_struct *const)get_lowcore()->current_task)
#endif /* !(_S390_CURRENT_H) */
diff --git a/arch/s390/include/asm/dat-bits.h b/arch/s390/include/asm/dat-bits.h
new file mode 100644
index 000000000000..8d65eec2f124
--- /dev/null
+++ b/arch/s390/include/asm/dat-bits.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAT table and related structures
+ *
+ * Copyright IBM Corp. 2024
+ *
+ */
+
+#ifndef _S390_DAT_BITS_H
+#define _S390_DAT_BITS_H
+
+union asce {
+ unsigned long val;
+ struct {
+ unsigned long rsto: 52;/* Region- or Segment-Table Origin */
+ unsigned long : 2;
+ unsigned long g : 1; /* Subspace Group control */
+ unsigned long p : 1; /* Private Space control */
+ unsigned long s : 1; /* Storage-Alteration-Event control */
+ unsigned long x : 1; /* Space-Switch-Event control */
+ unsigned long r : 1; /* Real-Space control */
+ unsigned long : 1;
+ unsigned long dt : 2; /* Designation-Type control */
+ unsigned long tl : 2; /* Region- or Segment-Table Length */
+ };
+};
+
+enum {
+ ASCE_TYPE_SEGMENT = 0,
+ ASCE_TYPE_REGION3 = 1,
+ ASCE_TYPE_REGION2 = 2,
+ ASCE_TYPE_REGION1 = 3
+};
+
+union region1_table_entry {
+ unsigned long val;
+ struct {
+ unsigned long rto: 52;/* Region-Table Origin */
+ unsigned long : 2;
+ unsigned long p : 1; /* DAT-Protection Bit */
+ unsigned long : 1;
+ unsigned long tf : 2; /* Region-Second-Table Offset */
+ unsigned long i : 1; /* Region-Invalid Bit */
+ unsigned long : 1;
+ unsigned long tt : 2; /* Table-Type Bits */
+ unsigned long tl : 2; /* Region-Second-Table Length */
+ };
+};
+
+union region2_table_entry {
+ unsigned long val;
+ struct {
+ unsigned long rto: 52;/* Region-Table Origin */
+ unsigned long : 2;
+ unsigned long p : 1; /* DAT-Protection Bit */
+ unsigned long : 1;
+ unsigned long tf : 2; /* Region-Third-Table Offset */
+ unsigned long i : 1; /* Region-Invalid Bit */
+ unsigned long : 1;
+ unsigned long tt : 2; /* Table-Type Bits */
+ unsigned long tl : 2; /* Region-Third-Table Length */
+ };
+};
+
+struct region3_table_entry_fc0 {
+ unsigned long sto: 52;/* Segment-Table Origin */
+ unsigned long : 1;
+ unsigned long fc : 1; /* Format-Control */
+ unsigned long p : 1; /* DAT-Protection Bit */
+ unsigned long : 1;
+ unsigned long tf : 2; /* Segment-Table Offset */
+ unsigned long i : 1; /* Region-Invalid Bit */
+ unsigned long cr : 1; /* Common-Region Bit */
+ unsigned long tt : 2; /* Table-Type Bits */
+ unsigned long tl : 2; /* Segment-Table Length */
+};
+
+struct region3_table_entry_fc1 {
+ unsigned long rfaa: 33;/* Region-Frame Absolute Address */
+ unsigned long : 14;
+ unsigned long av : 1; /* ACCF-Validity Control */
+ unsigned long acc : 4; /* Access-Control Bits */
+ unsigned long f : 1; /* Fetch-Protection Bit */
+ unsigned long fc : 1; /* Format-Control */
+ unsigned long p : 1; /* DAT-Protection Bit */
+ unsigned long iep : 1; /* Instruction-Execution-Protection */
+ unsigned long : 2;
+ unsigned long i : 1; /* Region-Invalid Bit */
+ unsigned long cr : 1; /* Common-Region Bit */
+ unsigned long tt : 2; /* Table-Type Bits */
+ unsigned long : 2;
+};
+
+union region3_table_entry {
+ unsigned long val;
+ struct region3_table_entry_fc0 fc0;
+ struct region3_table_entry_fc1 fc1;
+ struct {
+ unsigned long : 53;
+ unsigned long fc: 1; /* Format-Control */
+ unsigned long : 4;
+ unsigned long i : 1; /* Region-Invalid Bit */
+ unsigned long cr: 1; /* Common-Region Bit */
+ unsigned long tt: 2; /* Table-Type Bits */
+ unsigned long : 2;
+ };
+};
+
+struct segment_table_entry_fc0 {
+ unsigned long pto: 53;/* Page-Table Origin */
+ unsigned long fc : 1; /* Format-Control */
+ unsigned long p : 1; /* DAT-Protection Bit */
+ unsigned long : 3;
+ unsigned long i : 1; /* Segment-Invalid Bit */
+ unsigned long cs : 1; /* Common-Segment Bit */
+ unsigned long tt : 2; /* Table-Type Bits */
+ unsigned long : 2;
+};
+
+struct segment_table_entry_fc1 {
+ unsigned long sfaa: 44;/* Segment-Frame Absolute Address */
+ unsigned long : 3;
+ unsigned long av : 1; /* ACCF-Validity Control */
+ unsigned long acc : 4; /* Access-Control Bits */
+ unsigned long f : 1; /* Fetch-Protection Bit */
+ unsigned long fc : 1; /* Format-Control */
+ unsigned long p : 1; /* DAT-Protection Bit */
+ unsigned long iep : 1; /* Instruction-Execution-Protection */
+ unsigned long : 2;
+ unsigned long i : 1; /* Segment-Invalid Bit */
+ unsigned long cs : 1; /* Common-Segment Bit */
+ unsigned long tt : 2; /* Table-Type Bits */
+ unsigned long : 2;
+};
+
+union segment_table_entry {
+ unsigned long val;
+ struct segment_table_entry_fc0 fc0;
+ struct segment_table_entry_fc1 fc1;
+ struct {
+ unsigned long : 53;
+ unsigned long fc: 1; /* Format-Control */
+ unsigned long : 4;
+ unsigned long i : 1; /* Segment-Invalid Bit */
+ unsigned long cs: 1; /* Common-Segment Bit */
+ unsigned long tt: 2; /* Table-Type Bits */
+ unsigned long : 2;
+ };
+};
+
+union page_table_entry {
+ unsigned long val;
+ struct {
+ unsigned long pfra: 52;/* Page-Frame Real Address */
+ unsigned long z : 1; /* Zero Bit */
+ unsigned long i : 1; /* Page-Invalid Bit */
+ unsigned long p : 1; /* DAT-Protection Bit */
+ unsigned long iep : 1; /* Instruction-Execution-Protection */
+ unsigned long : 8;
+ };
+};
+
+enum {
+ TABLE_TYPE_SEGMENT = 0,
+ TABLE_TYPE_REGION3 = 1,
+ TABLE_TYPE_REGION2 = 2,
+ TABLE_TYPE_REGION1 = 3
+};
+
+#endif /* _S390_DAT_BITS_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index ccd4e148b5ed..6375276d94ea 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -66,14 +66,15 @@ typedef int (debug_header_proc_t) (debug_info_t *id,
struct debug_view *view,
int area,
debug_entry_t *entry,
- char *out_buf);
+ char *out_buf, size_t out_buf_size);
typedef int (debug_format_proc_t) (debug_info_t *id,
struct debug_view *view, char *out_buf,
+ size_t out_buf_size,
const char *in_buf);
typedef int (debug_prolog_proc_t) (debug_info_t *id,
struct debug_view *view,
- char *out_buf);
+ char *out_buf, size_t out_buf_size);
typedef int (debug_input_proc_t) (debug_info_t *id,
struct debug_view *view,
struct file *file,
@@ -81,8 +82,13 @@ typedef int (debug_input_proc_t) (debug_info_t *id,
size_t in_buf_size, loff_t *offset);
int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
- int area, debug_entry_t *entry, char *out_buf);
+ int area, debug_entry_t *entry,
+ char *out_buf, size_t out_buf_size);
+#define DEBUG_SPRINTF_MAX_ARGS 10
+int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, size_t out_buf_size,
+ const char *inbuf);
struct debug_view {
char name[DEBUG_MAX_NAME_LEN];
debug_prolog_proc_t *prolog_proc;
@@ -112,6 +118,9 @@ debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
int buf_size, umode_t mode, uid_t uid,
gid_t gid);
+ssize_t debug_dump(debug_info_t *id, struct debug_view *view,
+ char *buf, size_t buf_size, bool reverse);
+
void debug_unregister(debug_info_t *id);
void debug_set_level(debug_info_t *id, int new_level);
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index bed804137537..5790630e31f0 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -12,6 +12,7 @@
#include <linux/if_ether.h>
#include <linux/percpu.h>
#include <asm/asm-extable.h>
+#include <asm/sclp.h>
#include <asm/cio.h>
enum diag_stat_enum {
@@ -35,8 +36,11 @@ enum diag_stat_enum {
DIAG_STAT_X2FC,
DIAG_STAT_X304,
DIAG_STAT_X308,
+ DIAG_STAT_X310,
DIAG_STAT_X318,
DIAG_STAT_X320,
+ DIAG_STAT_X324,
+ DIAG_STAT_X49C,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@@ -44,6 +48,13 @@ enum diag_stat_enum {
void diag_stat_inc(enum diag_stat_enum nr);
void diag_stat_inc_norecursion(enum diag_stat_enum nr);
+struct hypfs_diag0c_entry;
+
+/*
+ * Diagnose 0c: Pseudo Timer
+ */
+void diag0c(struct hypfs_diag0c_entry *data);
+
/*
* Diagnose 10: Release page range
*/
@@ -110,6 +121,8 @@ enum diag204_sc {
};
#define DIAG204_SUBCODE_MASK 0xffff
+#define DIAG204_BIF_BIT 0x80000000
+#define DIAG204_BUSY_WAIT (HZ / 10)
/* The two available diag 204 data formats */
enum diag204_format {
@@ -319,6 +332,11 @@ union diag318_info {
};
};
+static inline bool diag204_has_bif(void)
+{
+ return sclp.has_diag204_bif;
+}
+
int diag204(unsigned long subcode, unsigned long size, void *addr);
int diag224(void *ptr);
int diag26c(void *req, void *resp, enum diag26c_sc subcode);
@@ -331,10 +349,10 @@ struct hypfs_diag0c_entry;
*/
struct diag_ops {
int (*diag210)(struct diag210 *addr);
- int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
+ int (*diag26c)(unsigned long rx, unsigned long rx1, enum diag26c_sc subcode);
int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode);
int (*diag8c)(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
- void (*diag0c)(struct hypfs_diag0c_entry *entry);
+ void (*diag0c)(unsigned long rx);
void (*diag308_reset)(void);
};
@@ -342,10 +360,18 @@ extern struct diag_ops diag_amode31_ops;
extern struct diag210 *__diag210_tmp_amode31;
int _diag210_amode31(struct diag210 *addr);
-int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
+int _diag26c_amode31(unsigned long rx, unsigned long rx1, enum diag26c_sc subcode);
int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
-void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
+void _diag0c_amode31(unsigned long rx);
void _diag308_reset_amode31(void);
int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
+/* diag 49c subcodes */
+enum diag49c_sc {
+ DIAG49C_SUBC_ACK = 0,
+ DIAG49C_SUBC_REG = 1
+};
+
+int diag49c(unsigned long subcode);
+
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/dma-types.h b/arch/s390/include/asm/dma-types.h
new file mode 100644
index 000000000000..5c5734e6946c
--- /dev/null
+++ b/arch/s390/include/asm/dma-types.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_DMA_TYPES_H_
+#define _ASM_S390_DMA_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * typedef dma32_t
+ * Contains a 31 bit absolute address to a DMA capable piece of storage.
+ *
+ * For CIO, DMA addresses are always absolute addresses. These addresses tend
+ * to be used in architectured memory blocks (like ORB, IDAW, MIDAW). Under
+ * certain circumstances 31 bit wide addresses must be used because the
+ * address must fit in 31 bits.
+ *
+ * This type is to be used when such fields can be modelled as 32 bit wide.
+ */
+typedef u32 __bitwise dma32_t;
+
+/*
+ * typedef dma64_t
+ * Contains a 64 bit absolute address to a DMA capable piece of storage.
+ *
+ * For CIO, DMA addresses are always absolute addresses. These addresses tend
+ * to be used in architectured memory blocks (like ORB, IDAW, MIDAW).
+ *
+ * This type is to be used to model such 64 bit wide fields.
+ */
+typedef u64 __bitwise dma64_t;
+
+/*
+ * Although DMA addresses should be obtained using the DMA API, in cases when
+ * it is known that the first argument holds a virtual address that points to
+ * DMA-able 31 bit addressable storage, then this function can be safely used.
+ */
+static inline dma32_t virt_to_dma32(void *ptr)
+{
+ return (__force dma32_t)__pa32(ptr);
+}
+
+static inline void *dma32_to_virt(dma32_t addr)
+{
+ return __va((__force unsigned long)addr);
+}
+
+static inline dma32_t u32_to_dma32(u32 addr)
+{
+ return (__force dma32_t)addr;
+}
+
+static inline u32 dma32_to_u32(dma32_t addr)
+{
+ return (__force u32)addr;
+}
+
+static inline dma32_t dma32_add(dma32_t a, u32 b)
+{
+ return (__force dma32_t)((__force u32)a + b);
+}
+
+static inline dma32_t dma32_and(dma32_t a, u32 b)
+{
+ return (__force dma32_t)((__force u32)a & b);
+}
+
+/*
+ * Although DMA addresses should be obtained using the DMA API, in cases when
+ * it is known that the first argument holds a virtual address that points to
+ * DMA-able storage, then this function can be safely used.
+ */
+static inline dma64_t virt_to_dma64(void *ptr)
+{
+ return (__force dma64_t)__pa(ptr);
+}
+
+static inline void *dma64_to_virt(dma64_t addr)
+{
+ return __va((__force unsigned long)addr);
+}
+
+static inline dma64_t u64_to_dma64(u64 addr)
+{
+ return (__force dma64_t)addr;
+}
+
+static inline u64 dma64_to_u64(dma64_t addr)
+{
+ return (__force u64)addr;
+}
+
+static inline dma64_t dma64_add(dma64_t a, u64 b)
+{
+ return (__force dma64_t)((__force u64)a + b);
+}
+
+static inline dma64_t dma64_and(dma64_t a, u64 b)
+{
+ return (__force dma64_t)((__force u64)a & b);
+}
+
+#endif /* _ASM_S390_DMA_TYPES_H_ */
diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
index 4f21ae561e4d..390906b8e386 100644
--- a/arch/s390/include/asm/dwarf.h
+++ b/arch/s390/include/asm/dwarf.h
@@ -9,6 +9,7 @@
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_RESTORE .cfi_restore
+#define CFI_REL_OFFSET .cfi_rel_offset
#ifdef CONFIG_AS_CFI_VAL_OFFSET
#define CFI_VAL_OFFSET .cfi_val_offset
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 06f795855af7..c4589ec4505e 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/blk_types.h>
+#include <asm/dma-types.h>
struct arqb {
u64 data;
@@ -45,7 +46,7 @@ struct msb {
u16:12;
u16 bs:4;
u32 blk_count;
- u64 data_addr;
+ dma64_t data_addr;
u64 scm_addr;
u64:64;
} __packed;
@@ -54,7 +55,7 @@ struct aidaw {
u8 flags;
u32 :24;
u32 :32;
- u64 data_addr;
+ dma64_t data_addr;
} __packed;
#define MSB_OC_CLEAR 0
diff --git a/arch/s390/include/asm/ebcdic.h b/arch/s390/include/asm/ebcdic.h
index efb50fc6866c..7164cb658435 100644
--- a/arch/s390/include/asm/ebcdic.h
+++ b/arch/s390/include/asm/ebcdic.h
@@ -22,18 +22,18 @@ extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */
static inline void
codepage_convert(const __u8 *codepage, volatile char *addr, unsigned long nr)
{
- if (nr-- <= 0)
+ if (!nr--)
return;
asm volatile(
- " bras 1,1f\n"
- " tr 0(1,%0),0(%2)\n"
- "0: tr 0(256,%0),0(%2)\n"
+ " j 2f\n"
+ "0: tr 0(1,%0),0(%2)\n"
+ "1: tr 0(256,%0),0(%2)\n"
" la %0,256(%0)\n"
- "1: ahi %1,-256\n"
- " jnm 0b\n"
- " ex %1,0(1)"
+ "2: aghi %1,-256\n"
+ " jnm 1b\n"
+ " exrl %1,0b"
: "+&a" (addr), "+&a" (nr)
- : "a" (codepage) : "cc", "memory", "1");
+ : "a" (codepage) : "cc", "memory");
}
#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 70a30ae258b7..8f2c23cc52b6 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -91,6 +91,14 @@
/* Keep this the last entry. */
#define R_390_NUM 61
+/*
+ * HWCAP flags - for AT_HWCAP
+ *
+ * Bits 32-63 are reserved for use by libc.
+ * Bit 31 is reserved and will be used by libc to determine if a second
+ * argument is passed to IFUNC resolvers. This will be implemented when
+ * there is a need for AT_HWCAP2.
+ */
enum {
HWCAP_NR_ESAN3 = 0,
HWCAP_NR_ZARCH = 1,
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index fdd319a622b0..35555c944630 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -8,7 +8,7 @@
#include <linux/processor.h>
#include <linux/uaccess.h>
#include <asm/timex.h>
-#include <asm/fpu/api.h>
+#include <asm/fpu.h>
#include <asm/pai.h>
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
@@ -41,8 +41,7 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void)
{
- if (test_cpu_flag(CIF_FPU))
- __load_fpu_regs();
+ load_user_fpu_regs();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
debug_user_asce(1);
@@ -55,7 +54,7 @@ static __always_inline void arch_exit_to_user_mode(void)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
- choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
+ choose_random_kstack_offset(get_tod_clock_fast());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
diff --git a/arch/s390/include/asm/extmem.h b/arch/s390/include/asm/extmem.h
index 568fd81bb77b..e0a06060afdd 100644
--- a/arch/s390/include/asm/extmem.h
+++ b/arch/s390/include/asm/extmem.h
@@ -8,6 +8,13 @@
#define _ASM_S390X_DCSS_H
#ifndef __ASSEMBLY__
+/*
+ * DCSS segment is defined as a contiguous range of pages using DEFSEG command.
+ * The range start and end is a page number with a value less than or equal to
+ * 0x7ffffff (see CP Commands and Utilities Reference).
+ */
+#define MAX_DCSS_ADDR (512UL * SZ_1G)
+
/* possible values for segment type as returned by segment_info */
#define SEG_TYPE_SW 0
#define SEG_TYPE_EW 1
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 796007125dff..5f5b1aa6c233 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -14,13 +14,12 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/preempt.h>
-
+#include <asm/alternative.h>
#include <asm/lowcore.h>
#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
extern u64 stfle_fac_list[16];
-extern u64 alt_stfle_fac_list[16];
static inline void __set_facility(unsigned long nr, void *facilities)
{
@@ -40,33 +39,56 @@ static inline void __clear_facility(unsigned long nr, void *facilities)
ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
}
-static inline int __test_facility(unsigned long nr, void *facilities)
+static __always_inline bool __test_facility(unsigned long nr, void *facilities)
{
unsigned char *ptr;
if (nr >= MAX_FACILITY_BIT)
- return 0;
+ return false;
ptr = (unsigned char *) facilities + (nr >> 3);
return (*ptr & (0x80 >> (nr & 7))) != 0;
}
/*
+ * __test_facility_constant() generates a single instruction branch. If the
+ * tested facility is available (likely) the branch is patched into a nop.
+ *
+ * Do not use this function unless you know what you are doing. All users are
+ * supposed to use test_facility() which will do the right thing.
+ */
+static __always_inline bool __test_facility_constant(unsigned long nr)
+{
+ asm goto(
+ ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FACILITY(%[nr]))
+ :
+ : [nr] "i" (nr)
+ :
+ : l_no);
+ return true;
+l_no:
+ return false;
+}
+
+/*
* The test_facility function uses the bit ordering where the MSB is bit 0.
* That makes it easier to query facility bits with the bit number as
* documented in the Principles of Operation.
*/
-static inline int test_facility(unsigned long nr)
+static __always_inline bool test_facility(unsigned long nr)
{
unsigned long facilities_als[] = { FACILITIES_ALS };
- if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
- if (__test_facility(nr, &facilities_als))
- return 1;
+ if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(nr)) {
+ if (nr < sizeof(facilities_als) * 8) {
+ if (__test_facility(nr, &facilities_als))
+ return true;
+ }
+ return __test_facility_constant(nr);
}
return __test_facility(nr, &stfle_fac_list);
}
-static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
+static inline unsigned long __stfle_asm(u64 *fac_list, int size)
{
unsigned long reg0 = size - 1;
@@ -74,7 +96,7 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
" lgr 0,%[reg0]\n"
" .insn s,0xb2b00000,%[list]\n" /* stfle */
" lgr %[reg0],0\n"
- : [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list)
+ : [reg0] "+&d" (reg0), [list] "+Q" (*fac_list)
:
: "memory", "cc", "0");
return reg0;
@@ -82,32 +104,32 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
/**
* stfle - Store facility list extended
- * @stfle_fac_list: array where facility list can be stored
+ * @fac_list: array where facility list can be stored
* @size: size of passed in array in double words
*/
-static inline void __stfle(u64 *stfle_fac_list, int size)
+static inline void __stfle(u64 *fac_list, int size)
{
unsigned long nr;
u32 stfl_fac_list;
asm volatile(
" stfl 0(0)\n"
- : "=m" (S390_lowcore.stfl_fac_list));
- stfl_fac_list = S390_lowcore.stfl_fac_list;
- memcpy(stfle_fac_list, &stfl_fac_list, 4);
+ : "=m" (get_lowcore()->stfl_fac_list));
+ stfl_fac_list = get_lowcore()->stfl_fac_list;
+ memcpy(fac_list, &stfl_fac_list, 4);
nr = 4; /* bytes stored by stfl */
if (stfl_fac_list & 0x01000000) {
/* More facility bits available with stfle */
- nr = __stfle_asm(stfle_fac_list, size);
+ nr = __stfle_asm(fac_list, size);
nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
}
- memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+ memset((char *)fac_list + nr, 0, size * 8 - nr);
}
-static inline void stfle(u64 *stfle_fac_list, int size)
+static inline void stfle(u64 *fac_list, int size)
{
preempt_disable();
- __stfle(stfle_fac_list, size);
+ __stfle(fac_list, size);
preempt_enable();
}
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index 29784b4b44f6..80f82a739b45 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -10,6 +10,7 @@
#define _ASM_S390_FCX_H
#include <linux/types.h>
+#include <asm/dma-types.h>
#define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0
@@ -43,16 +44,16 @@ struct tcw {
u32 r:1;
u32 w:1;
u32 :16;
- u64 output;
- u64 input;
- u64 tsb;
- u64 tccb;
+ dma64_t output;
+ dma64_t input;
+ dma64_t tsb;
+ dma64_t tccb;
u32 output_count;
u32 input_count;
u32 :32;
u32 :32;
u32 :32;
- u32 intrg;
+ dma32_t intrg;
} __attribute__ ((packed, aligned(64)));
#define TIDAW_FLAGS_LAST (1 << (7 - 0))
@@ -73,7 +74,7 @@ struct tidaw {
u32 flags:8;
u32 :24;
u32 count;
- u64 addr;
+ dma64_t addr;
} __attribute__ ((packed, aligned(16)));
/**
diff --git a/arch/s390/include/asm/fprobe.h b/arch/s390/include/asm/fprobe.h
new file mode 100644
index 000000000000..5ef600b372f4
--- /dev/null
+++ b/arch/s390/include/asm/fprobe.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_FPROBE_H
+#define _ASM_S390_FPROBE_H
+
+#include <asm-generic/fprobe.h>
+
+#undef FPROBE_HEADER_MSB_PATTERN
+#define FPROBE_HEADER_MSB_PATTERN 0
+
+#endif /* _ASM_S390_FPROBE_H */
diff --git a/arch/s390/include/asm/vx-insn-asm.h b/arch/s390/include/asm/fpu-insn-asm.h
index 360f8b36d962..d296322be4bc 100644
--- a/arch/s390/include/asm/vx-insn-asm.h
+++ b/arch/s390/include/asm/fpu-insn-asm.h
@@ -9,11 +9,11 @@
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
-#ifndef __ASM_S390_VX_INSN_INTERNAL_H
-#define __ASM_S390_VX_INSN_INTERNAL_H
+#ifndef __ASM_S390_FPU_INSN_ASM_H
+#define __ASM_S390_FPU_INSN_ASM_H
-#ifndef __ASM_S390_VX_INSN_H
-#error only <asm/vx-insn.h> can be included directly
+#ifndef __ASM_S390_FPU_INSN_H
+#error only <asm/fpu-insn.h> can be included directly
#endif
#ifdef __ASSEMBLY__
@@ -195,10 +195,26 @@
/* RXB - Compute most significant bit used vector registers
*
* @rxb: Operand to store computed RXB value
- * @v1: First vector register designated operand
- * @v2: Second vector register designated operand
- * @v3: Third vector register designated operand
- * @v4: Fourth vector register designated operand
+ * @v1: Vector register designated operand whose MSB is stored in
+ * RXB bit 0 (instruction bit 36) and whose remaining bits
+ * are stored in instruction bits 8-11.
+ * @v2: Vector register designated operand whose MSB is stored in
+ * RXB bit 1 (instruction bit 37) and whose remaining bits
+ * are stored in instruction bits 12-15.
+ * @v3: Vector register designated operand whose MSB is stored in
+ * RXB bit 2 (instruction bit 38) and whose remaining bits
+ * are stored in instruction bits 16-19.
+ * @v4: Vector register designated operand whose MSB is stored in
+ * RXB bit 3 (instruction bit 39) and whose remaining bits
+ * are stored in instruction bits 32-35.
+ *
+ * Note: In most vector instruction formats [1] V1, V2, V3, and V4 directly
+ * correspond to @v1, @v2, @v3, and @v4. But there are exceptions, such as but
+ * not limited to the vector instruction formats VRR-g, VRR-h, VRS-a, VRS-d,
+ * and VSI.
+ *
+ * [1] IBM z/Architecture Principles of Operation, chapter "Program
+ * Execution, section "Instructions", subsection "Instruction Formats".
*/
.macro RXB rxb v1 v2=0 v3=0 v4=0
\rxb = 0
@@ -223,6 +239,9 @@
* @v2: Second vector register designated operand (for RXB)
* @v3: Third vector register designated operand (for RXB)
* @v4: Fourth vector register designated operand (for RXB)
+ *
+ * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
+ * description for further details.
*/
.macro MRXB m v1 v2=0 v3=0 v4=0
rxb = 0
@@ -238,6 +257,9 @@
* @v2: Second vector register designated operand (for RXB)
* @v3: Third vector register designated operand (for RXB)
* @v4: Fourth vector register designated operand (for RXB)
+ *
+ * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
+ * description for further details.
*/
.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
MRXB \m, \v1, \v2, \v3, \v4
@@ -350,7 +372,7 @@
VX_NUM v3, \vr
.word 0xE700 | (r1 << 4) | (v3&15)
.word (b2 << 12) | (\disp)
- MRXBOPC \m, 0x21, v3
+ MRXBOPC \m, 0x21, 0, v3
.endm
.macro VLGVB gr, vr, disp, base="%r0"
VLGV \gr, \vr, \disp, \base, 0
@@ -385,6 +407,28 @@
MRXBOPC 0, 0x0E, v1
.endm
+/* VECTOR STORE BYTE REVERSED ELEMENTS */
+ .macro VSTBR vr1, disp, index="%r0", base, m
+ VX_NUM v1, \vr1
+ GR_NUM x2, \index
+ GR_NUM b2, \base
+ .word 0xE600 | ((v1&15) << 4) | (x2&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \m, 0x0E, v1
+.endm
+.macro VSTBRH vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 1
+.endm
+.macro VSTBRF vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 2
+.endm
+.macro VSTBRG vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 3
+.endm
+.macro VSTBRQ vr1, disp, index="%r0", base
+ VSTBR \vr1, \disp, \index, \base, 4
+.endm
+
/* VECTOR STORE MULTIPLE */
.macro VSTM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom
@@ -499,6 +543,25 @@
VMRL \vr1, \vr2, \vr3, 3
.endm
+/* VECTOR LOAD WITH LENGTH */
+.macro VLL v, gr, disp, base
+ VX_NUM v1, \v
+ GR_NUM b2, \base
+ GR_NUM r3, \gr
+ .word 0xE700 | ((v1&15) << 4) | r3
+ .word (b2 << 12) | (\disp)
+ MRXBOPC 0, 0x37, v1
+.endm
+
+/* VECTOR STORE WITH LENGTH */
+.macro VSTL v, gr, disp, base
+ VX_NUM v1, \v
+ GR_NUM b2, \base
+ GR_NUM r3, \gr
+ .word 0xE700 | ((v1&15) << 4) | r3
+ .word (b2 << 12) | (\disp)
+ MRXBOPC 0, 0x3f, v1
+.endm
/* Vector integer instructions */
@@ -512,6 +575,16 @@
MRXBOPC 0, 0x68, v1, v2, v3
.endm
+/* VECTOR CHECKSUM */
+.macro VCKSM vr1, vr2, vr3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC 0, 0x66, v1, v2, v3
+.endm
+
/* VECTOR EXCLUSIVE OR */
.macro VX vr1, vr2, vr3
VX_NUM v1, \vr1
@@ -678,4 +751,4 @@
.endm
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_S390_VX_INSN_INTERNAL_H */
+#endif /* __ASM_S390_FPU_INSN_ASM_H */
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
new file mode 100644
index 000000000000..f668bffd6dd3
--- /dev/null
+++ b/arch/s390/include/asm/fpu-insn.h
@@ -0,0 +1,479 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Floating Point and Vector Instructions
+ *
+ */
+
+#ifndef __ASM_S390_FPU_INSN_H
+#define __ASM_S390_FPU_INSN_H
+
+#include <asm/fpu-insn-asm.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/instrumented.h>
+#include <asm/asm-extable.h>
+
+asm(".include \"asm/fpu-insn-asm.h\"\n");
+
+/*
+ * Various small helper functions, which can and should be used within
+ * kernel fpu code sections. Each function represents only one floating
+ * point or vector instruction (except for helper functions which require
+ * exception handling).
+ *
+ * This allows to use floating point and vector instructions like C
+ * functions, which has the advantage that all supporting code, like
+ * e.g. loops, can be written in easy to read C code.
+ *
+ * Each of the helper functions provides support for code instrumentation,
+ * like e.g. KASAN. Therefore instrumentation is also covered automatically
+ * when using these functions.
+ *
+ * In order to ensure that code generated with the helper functions stays
+ * within kernel fpu sections, which are guarded with kernel_fpu_begin()
+ * and kernel_fpu_end() calls, each function has a mandatory "memory"
+ * barrier.
+ */
+
+static __always_inline void fpu_cefbr(u8 f1, s32 val)
+{
+ asm volatile("cefbr %[f1],%[val]\n"
+ :
+ : [f1] "I" (f1), [val] "d" (val)
+ : "memory");
+}
+
+static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
+{
+ unsigned long val;
+
+ asm volatile("cgebr %[val],%[mode],%[f2]\n"
+ : [val] "=d" (val)
+ : [f2] "I" (f2), [mode] "I" (mode)
+ : "memory");
+ return val;
+}
+
+static __always_inline void fpu_debr(u8 f1, u8 f2)
+{
+ asm volatile("debr %[f1],%[f2]\n"
+ :
+ : [f1] "I" (f1), [f2] "I" (f2)
+ : "memory");
+}
+
+static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
+{
+ instrument_read(reg, sizeof(*reg));
+ asm volatile("ld %[fpr],%[reg]\n"
+ :
+ : [fpr] "I" (fpr), [reg] "Q" (reg->ui)
+ : "memory");
+}
+
+static __always_inline void fpu_ldgr(u8 f1, u32 val)
+{
+ asm volatile("ldgr %[f1],%[val]\n"
+ :
+ : [f1] "I" (f1), [val] "d" (val)
+ : "memory");
+}
+
+static __always_inline void fpu_lfpc(unsigned int *fpc)
+{
+ instrument_read(fpc, sizeof(*fpc));
+ asm volatile("lfpc %[fpc]"
+ :
+ : [fpc] "Q" (*fpc)
+ : "memory");
+}
+
+/**
+ * fpu_lfpc_safe - Load floating point control register safely.
+ * @fpc: new value for floating point control register
+ *
+ * Load floating point control register. This may lead to an exception,
+ * since a saved value may have been modified by user space (ptrace,
+ * signal return, kvm registers) to an invalid value. In such a case
+ * set the floating point control register to zero.
+ */
+static inline void fpu_lfpc_safe(unsigned int *fpc)
+{
+ instrument_read(fpc, sizeof(*fpc));
+ asm_inline volatile(
+ " lfpc %[fpc]\n"
+ "0: nopr %%r7\n"
+ EX_TABLE_FPC(0b, 0b)
+ :
+ : [fpc] "Q" (*fpc)
+ : "memory");
+}
+
+static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
+{
+ instrument_write(reg, sizeof(*reg));
+ asm volatile("std %[fpr],%[reg]\n"
+ : [reg] "=Q" (reg->ui)
+ : [fpr] "I" (fpr)
+ : "memory");
+}
+
+static __always_inline void fpu_sfpc(unsigned int fpc)
+{
+ asm volatile("sfpc %[fpc]"
+ :
+ : [fpc] "d" (fpc)
+ : "memory");
+}
+
+static __always_inline void fpu_stfpc(unsigned int *fpc)
+{
+ instrument_write(fpc, sizeof(*fpc));
+ asm volatile("stfpc %[fpc]"
+ : [fpc] "=Q" (*fpc)
+ :
+ : "memory");
+}
+
+static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VAB %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VCKSM %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VESRAVB %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vgfmag(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ asm volatile("VGFMAG %[v1],%[v2],%[v3],%[v4]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
+ : "memory");
+}
+
+static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VGFMG %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+static __always_inline void fpu_vl(u8 v1, const void *vxr)
+{
+ instrument_read(vxr, sizeof(__vector128));
+ asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
+ :
+ : [vxr] "Q" (*(__vector128 *)vxr),
+ [v1] "I" (v1)
+ : "memory");
+}
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+static __always_inline void fpu_vl(u8 v1, const void *vxr)
+{
+ instrument_read(vxr, sizeof(__vector128));
+ asm volatile(
+ " la 1,%[vxr]\n"
+ " VL %[v1],0,,1\n"
+ :
+ : [vxr] "R" (*(__vector128 *)vxr),
+ [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+static __always_inline void fpu_vleib(u8 v, s16 val, u8 index)
+{
+ asm volatile("VLEIB %[v],%[val],%[index]"
+ :
+ : [v] "I" (v), [val] "K" (val), [index] "I" (index)
+ : "memory");
+}
+
+static __always_inline void fpu_vleig(u8 v, s16 val, u8 index)
+{
+ asm volatile("VLEIG %[v],%[val],%[index]"
+ :
+ : [v] "I" (v), [val] "K" (val), [index] "I" (index)
+ : "memory");
+}
+
+static __always_inline u64 fpu_vlgvf(u8 v, u16 index)
+{
+ u64 val;
+
+ asm volatile("VLGVF %[val],%[v],%[index]"
+ : [val] "=d" (val)
+ : [v] "I" (v), [index] "L" (index)
+ : "memory");
+ return val;
+}
+
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_read(vxr, size);
+ asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ :
+ : [vxr] "Q" (*(u8 *)vxr),
+ [index] "d" (index),
+ [v1] "I" (v1)
+ : "memory");
+}
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_read(vxr, size);
+ asm volatile(
+ " la 1,%[vxr]\n"
+ " VLL %[v1],%[index],0,1\n"
+ :
+ : [vxr] "R" (*(u8 *)vxr),
+ [index] "d" (index),
+ [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+#define fpu_vlm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_read(_v, size); \
+ asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ : \
+ : [vxrs] "Q" (*_v), \
+ [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#define fpu_vlm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_read(_v, size); \
+ asm volatile( \
+ " la 1,%[vxrs]\n" \
+ " VLM %[v1],%[v3],0,1\n" \
+ : \
+ : [vxrs] "R" (*_v), \
+ [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory", "1"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+static __always_inline void fpu_vlr(u8 v1, u8 v2)
+{
+ asm volatile("VLR %[v1],%[v2]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2)
+ : "memory");
+}
+
+static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
+{
+ asm volatile("VLVGF %[v],%[val],%[index]"
+ :
+ : [v] "I" (v), [val] "d" (val), [index] "L" (index)
+ : "memory");
+}
+
+static __always_inline void fpu_vn(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VN %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vperm(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ asm volatile("VPERM %[v1],%[v2],%[v3],%[v4]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
+ : "memory");
+}
+
+static __always_inline void fpu_vrepib(u8 v1, s16 i2)
+{
+ asm volatile("VREPIB %[v1],%[i2]"
+ :
+ : [v1] "I" (v1), [i2] "K" (i2)
+ : "memory");
+}
+
+static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VSRLB %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+static __always_inline void fpu_vst(u8 v1, const void *vxr)
+{
+ instrument_write(vxr, sizeof(__vector128));
+ asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
+ : [vxr] "=Q" (*(__vector128 *)vxr)
+ : [v1] "I" (v1)
+ : "memory");
+}
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+static __always_inline void fpu_vst(u8 v1, const void *vxr)
+{
+ instrument_write(vxr, sizeof(__vector128));
+ asm volatile(
+ " la 1,%[vxr]\n"
+ " VST %[v1],0,,1\n"
+ : [vxr] "=R" (*(__vector128 *)vxr)
+ : [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_write(vxr, size);
+ asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
+ : [vxr] "=Q" (*(u8 *)vxr)
+ : [index] "d" (index), [v1] "I" (v1)
+ : "memory");
+}
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
+{
+ unsigned int size;
+
+ size = min(index + 1, sizeof(__vector128));
+ instrument_write(vxr, size);
+ asm volatile(
+ " la 1,%[vxr]\n"
+ " VSTL %[v1],%[index],0,1\n"
+ : [vxr] "=R" (*(u8 *)vxr)
+ : [index] "d" (index), [v1] "I" (v1)
+ : "memory", "1");
+}
+
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
+
+#define fpu_vstm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_write(_v, size); \
+ asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
+ : [vxrs] "=Q" (*_v) \
+ : [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#define fpu_vstm(_v1, _v3, _vxrs) \
+({ \
+ unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
+ struct { \
+ __vector128 _v[(_v3) - (_v1) + 1]; \
+ } *_v = (void *)(_vxrs); \
+ \
+ instrument_write(_v, size); \
+ asm volatile( \
+ " la 1,%[vxrs]\n" \
+ " VSTM %[v1],%[v3],0,1\n" \
+ : [vxrs] "=R" (*_v) \
+ : [v1] "I" (_v1), [v3] "I" (_v3) \
+ : "memory", "1"); \
+ (_v3) - (_v1) + 1; \
+})
+
+#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+static __always_inline void fpu_vupllf(u8 v1, u8 v2)
+{
+ asm volatile("VUPLLF %[v1],%[v2]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2)
+ : "memory");
+}
+
+static __always_inline void fpu_vx(u8 v1, u8 v2, u8 v3)
+{
+ asm volatile("VX %[v1],%[v2],%[v3]"
+ :
+ : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
+ : "memory");
+}
+
+static __always_inline void fpu_vzero(u8 v)
+{
+ asm volatile("VZERO %[v]"
+ :
+ : [v] "I" (v)
+ : "memory");
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_S390_FPU_INSN_H */
diff --git a/arch/s390/include/asm/fpu-types.h b/arch/s390/include/asm/fpu-types.h
new file mode 100644
index 000000000000..8d58d5a95399
--- /dev/null
+++ b/arch/s390/include/asm/fpu-types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * FPU data structures
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_FPU_TYPES_H
+#define _ASM_S390_FPU_TYPES_H
+
+#include <asm/sigcontext.h>
+
+struct fpu {
+ u32 fpc;
+ __vector128 vxrs[__NUM_VXRS] __aligned(8);
+};
+
+struct kernel_fpu_hdr {
+ int mask;
+ u32 fpc;
+};
+
+struct kernel_fpu {
+ struct kernel_fpu_hdr hdr;
+ __vector128 vxrs[] __aligned(8);
+};
+
+#define KERNEL_FPU_STRUCT(vxr_size) \
+struct kernel_fpu_##vxr_size { \
+ struct kernel_fpu_hdr hdr; \
+ __vector128 vxrs[vxr_size] __aligned(8); \
+}
+
+KERNEL_FPU_STRUCT(8);
+KERNEL_FPU_STRUCT(16);
+KERNEL_FPU_STRUCT(32);
+
+#define DECLARE_KERNEL_FPU_ONSTACK(vxr_size, name) \
+ struct kernel_fpu_##vxr_size name __uninitialized
+
+#define DECLARE_KERNEL_FPU_ONSTACK8(name) \
+ DECLARE_KERNEL_FPU_ONSTACK(8, name)
+
+#define DECLARE_KERNEL_FPU_ONSTACK16(name) \
+ DECLARE_KERNEL_FPU_ONSTACK(16, name)
+
+#define DECLARE_KERNEL_FPU_ONSTACK32(name) \
+ DECLARE_KERNEL_FPU_ONSTACK(32, name)
+
+#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/fpu.h b/arch/s390/include/asm/fpu.h
new file mode 100644
index 000000000000..c84cb33913e2
--- /dev/null
+++ b/arch/s390/include/asm/fpu.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * In-kernel FPU support functions
+ *
+ *
+ * Consider these guidelines before using in-kernel FPU functions:
+ *
+ * 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
+ * use of floating-point or vector registers and instructions.
+ *
+ * 2. For kernel_fpu_begin(), specify the vector register range you want to
+ * use with the KERNEL_VXR_* constants. Consider these usage guidelines:
+ *
+ * a) If your function typically runs in process-context, use the lower
+ * half of the vector registers, for example, specify KERNEL_VXR_LOW.
+ * b) If your function typically runs in soft-irq or hard-irq context,
+ * prefer using the upper half of the vector registers, for example,
+ * specify KERNEL_VXR_HIGH.
+ *
+ * If you adhere to these guidelines, an interrupted process context
+ * does not require to save and restore vector registers because of
+ * disjoint register ranges.
+ *
+ * Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
+ * includes logic to save and restore up to 16 vector registers at once.
+ *
+ * 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
+ * struct kernel_fpu states. Vector registers that are in use by outer
+ * levels are saved and restored. You can minimize the save and restore
+ * effort by choosing disjoint vector register ranges.
+ *
+ * 5. To use vector floating-point instructions, specify the KERNEL_FPC
+ * flag to save and restore floating-point controls in addition to any
+ * vector register range.
+ *
+ * 6. To use floating-point registers and instructions only, specify the
+ * KERNEL_FPR flag. This flag triggers a save and restore of vector
+ * registers V0 to V15 and floating-point controls.
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_FPU_H
+#define _ASM_S390_FPU_H
+
+#include <linux/processor.h>
+#include <linux/preempt.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <asm/sigcontext.h>
+#include <asm/fpu-types.h>
+#include <asm/fpu-insn.h>
+#include <asm/facility.h>
+
+static inline bool cpu_has_vx(void)
+{
+ return likely(test_facility(129));
+}
+
+enum {
+ KERNEL_FPC_BIT = 0,
+ KERNEL_VXR_V0V7_BIT,
+ KERNEL_VXR_V8V15_BIT,
+ KERNEL_VXR_V16V23_BIT,
+ KERNEL_VXR_V24V31_BIT,
+};
+
+#define KERNEL_FPC BIT(KERNEL_FPC_BIT)
+#define KERNEL_VXR_V0V7 BIT(KERNEL_VXR_V0V7_BIT)
+#define KERNEL_VXR_V8V15 BIT(KERNEL_VXR_V8V15_BIT)
+#define KERNEL_VXR_V16V23 BIT(KERNEL_VXR_V16V23_BIT)
+#define KERNEL_VXR_V24V31 BIT(KERNEL_VXR_V24V31_BIT)
+
+#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7 | KERNEL_VXR_V8V15)
+#define KERNEL_VXR_MID (KERNEL_VXR_V8V15 | KERNEL_VXR_V16V23)
+#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23 | KERNEL_VXR_V24V31)
+
+#define KERNEL_VXR (KERNEL_VXR_LOW | KERNEL_VXR_HIGH)
+#define KERNEL_FPR (KERNEL_FPC | KERNEL_VXR_LOW)
+
+void load_fpu_state(struct fpu *state, int flags);
+void save_fpu_state(struct fpu *state, int flags);
+void __kernel_fpu_begin(struct kernel_fpu *state, int flags);
+void __kernel_fpu_end(struct kernel_fpu *state, int flags);
+
+static __always_inline void save_vx_regs(__vector128 *vxrs)
+{
+ fpu_vstm(0, 15, &vxrs[0]);
+ fpu_vstm(16, 31, &vxrs[16]);
+}
+
+static __always_inline void load_vx_regs(__vector128 *vxrs)
+{
+ fpu_vlm(0, 15, &vxrs[0]);
+ fpu_vlm(16, 31, &vxrs[16]);
+}
+
+static __always_inline void __save_fp_regs(freg_t *fprs, unsigned int offset)
+{
+ fpu_std(0, &fprs[0 * offset]);
+ fpu_std(1, &fprs[1 * offset]);
+ fpu_std(2, &fprs[2 * offset]);
+ fpu_std(3, &fprs[3 * offset]);
+ fpu_std(4, &fprs[4 * offset]);
+ fpu_std(5, &fprs[5 * offset]);
+ fpu_std(6, &fprs[6 * offset]);
+ fpu_std(7, &fprs[7 * offset]);
+ fpu_std(8, &fprs[8 * offset]);
+ fpu_std(9, &fprs[9 * offset]);
+ fpu_std(10, &fprs[10 * offset]);
+ fpu_std(11, &fprs[11 * offset]);
+ fpu_std(12, &fprs[12 * offset]);
+ fpu_std(13, &fprs[13 * offset]);
+ fpu_std(14, &fprs[14 * offset]);
+ fpu_std(15, &fprs[15 * offset]);
+}
+
+static __always_inline void __load_fp_regs(freg_t *fprs, unsigned int offset)
+{
+ fpu_ld(0, &fprs[0 * offset]);
+ fpu_ld(1, &fprs[1 * offset]);
+ fpu_ld(2, &fprs[2 * offset]);
+ fpu_ld(3, &fprs[3 * offset]);
+ fpu_ld(4, &fprs[4 * offset]);
+ fpu_ld(5, &fprs[5 * offset]);
+ fpu_ld(6, &fprs[6 * offset]);
+ fpu_ld(7, &fprs[7 * offset]);
+ fpu_ld(8, &fprs[8 * offset]);
+ fpu_ld(9, &fprs[9 * offset]);
+ fpu_ld(10, &fprs[10 * offset]);
+ fpu_ld(11, &fprs[11 * offset]);
+ fpu_ld(12, &fprs[12 * offset]);
+ fpu_ld(13, &fprs[13 * offset]);
+ fpu_ld(14, &fprs[14 * offset]);
+ fpu_ld(15, &fprs[15 * offset]);
+}
+
+static __always_inline void save_fp_regs(freg_t *fprs)
+{
+ __save_fp_regs(fprs, sizeof(freg_t) / sizeof(freg_t));
+}
+
+static __always_inline void load_fp_regs(freg_t *fprs)
+{
+ __load_fp_regs(fprs, sizeof(freg_t) / sizeof(freg_t));
+}
+
+static __always_inline void save_fp_regs_vx(__vector128 *vxrs)
+{
+ freg_t *fprs = (freg_t *)&vxrs[0].high;
+
+ __save_fp_regs(fprs, sizeof(__vector128) / sizeof(freg_t));
+}
+
+static __always_inline void load_fp_regs_vx(__vector128 *vxrs)
+{
+ freg_t *fprs = (freg_t *)&vxrs[0].high;
+
+ __load_fp_regs(fprs, sizeof(__vector128) / sizeof(freg_t));
+}
+
+static inline void load_user_fpu_regs(void)
+{
+ struct thread_struct *thread = &current->thread;
+
+ if (!thread->ufpu_flags)
+ return;
+ load_fpu_state(&thread->ufpu, thread->ufpu_flags);
+ thread->ufpu_flags = 0;
+}
+
+static __always_inline void __save_user_fpu_regs(struct thread_struct *thread, int flags)
+{
+ save_fpu_state(&thread->ufpu, flags);
+ __atomic_or(flags, &thread->ufpu_flags);
+}
+
+static inline void save_user_fpu_regs(void)
+{
+ struct thread_struct *thread = &current->thread;
+ int mask, flags;
+
+ mask = __atomic_or(KERNEL_FPC | KERNEL_VXR, &thread->kfpu_flags);
+ flags = ~READ_ONCE(thread->ufpu_flags) & (KERNEL_FPC | KERNEL_VXR);
+ if (flags)
+ __save_user_fpu_regs(thread, flags);
+ barrier();
+ WRITE_ONCE(thread->kfpu_flags, mask);
+}
+
+static __always_inline void _kernel_fpu_begin(struct kernel_fpu *state, int flags)
+{
+ struct thread_struct *thread = &current->thread;
+ int mask, uflags;
+
+ mask = __atomic_or(flags, &thread->kfpu_flags);
+ state->hdr.mask = mask;
+ uflags = READ_ONCE(thread->ufpu_flags);
+ if ((uflags & flags) != flags)
+ __save_user_fpu_regs(thread, ~uflags & flags);
+ if (mask & flags)
+ __kernel_fpu_begin(state, flags);
+}
+
+static __always_inline void _kernel_fpu_end(struct kernel_fpu *state, int flags)
+{
+ int mask = state->hdr.mask;
+
+ if (mask & flags)
+ __kernel_fpu_end(state, flags);
+ barrier();
+ WRITE_ONCE(current->thread.kfpu_flags, mask);
+}
+
+void __kernel_fpu_invalid_size(void);
+
+static __always_inline void kernel_fpu_check_size(int flags, unsigned int size)
+{
+ unsigned int cnt = 0;
+
+ if (flags & KERNEL_VXR_V0V7)
+ cnt += 8;
+ if (flags & KERNEL_VXR_V8V15)
+ cnt += 8;
+ if (flags & KERNEL_VXR_V16V23)
+ cnt += 8;
+ if (flags & KERNEL_VXR_V24V31)
+ cnt += 8;
+ if (cnt != size)
+ __kernel_fpu_invalid_size();
+}
+
+#define kernel_fpu_begin(state, flags) \
+{ \
+ typeof(state) s = (state); \
+ int _flags = (flags); \
+ \
+ kernel_fpu_check_size(_flags, ARRAY_SIZE(s->vxrs)); \
+ _kernel_fpu_begin((struct kernel_fpu *)s, _flags); \
+}
+
+#define kernel_fpu_end(state, flags) \
+{ \
+ typeof(state) s = (state); \
+ int _flags = (flags); \
+ \
+ kernel_fpu_check_size(_flags, ARRAY_SIZE(s->vxrs)); \
+ _kernel_fpu_end((struct kernel_fpu *)s, _flags); \
+}
+
+static inline void save_kernel_fpu_regs(struct thread_struct *thread)
+{
+ if (!thread->kfpu_flags)
+ return;
+ save_fpu_state(&thread->kfpu, thread->kfpu_flags);
+}
+
+static inline void restore_kernel_fpu_regs(struct thread_struct *thread)
+{
+ if (!thread->kfpu_flags)
+ return;
+ load_fpu_state(&thread->kfpu, thread->kfpu_flags);
+}
+
+static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
+{
+ int i;
+
+ for (i = 0; i < __NUM_FPRS; i++)
+ fprs[i].ui = vxrs[i].high;
+}
+
+static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
+{
+ int i;
+
+ for (i = 0; i < __NUM_FPRS; i++)
+ vxrs[i].high = fprs[i].ui;
+}
+
+static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
+{
+ fpregs->pad = 0;
+ fpregs->fpc = fpu->fpc;
+ convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
+}
+
+static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
+{
+ fpu->fpc = fpregs->fpc;
+ convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
+}
+
+#endif /* _ASM_S390_FPU_H */
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
deleted file mode 100644
index d6ca8bc6ca68..000000000000
--- a/arch/s390/include/asm/fpu/api.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * In-kernel FPU support functions
- *
- *
- * Consider these guidelines before using in-kernel FPU functions:
- *
- * 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
- * use of floating-point or vector registers and instructions.
- *
- * 2. For kernel_fpu_begin(), specify the vector register range you want to
- * use with the KERNEL_VXR_* constants. Consider these usage guidelines:
- *
- * a) If your function typically runs in process-context, use the lower
- * half of the vector registers, for example, specify KERNEL_VXR_LOW.
- * b) If your function typically runs in soft-irq or hard-irq context,
- * prefer using the upper half of the vector registers, for example,
- * specify KERNEL_VXR_HIGH.
- *
- * If you adhere to these guidelines, an interrupted process context
- * does not require to save and restore vector registers because of
- * disjoint register ranges.
- *
- * Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
- * includes logic to save and restore up to 16 vector registers at once.
- *
- * 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
- * struct kernel_fpu states. Vector registers that are in use by outer
- * levels are saved and restored. You can minimize the save and restore
- * effort by choosing disjoint vector register ranges.
- *
- * 5. To use vector floating-point instructions, specify the KERNEL_FPC
- * flag to save and restore floating-point controls in addition to any
- * vector register range.
- *
- * 6. To use floating-point registers and instructions only, specify the
- * KERNEL_FPR flag. This flag triggers a save and restore of vector
- * registers V0 to V15 and floating-point controls.
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- */
-
-#ifndef _ASM_S390_FPU_API_H
-#define _ASM_S390_FPU_API_H
-
-#include <linux/preempt.h>
-#include <asm/asm-extable.h>
-#include <asm/fpu/internal.h>
-
-void save_fpu_regs(void);
-void load_fpu_regs(void);
-void __load_fpu_regs(void);
-
-/**
- * sfpc_safe - Set floating point control register safely.
- * @fpc: new value for floating point control register
- *
- * Set floating point control register. This may lead to an exception,
- * since a saved value may have been modified by user space (ptrace,
- * signal return, kvm registers) to an invalid value. In such a case
- * set the floating point control register to zero.
- */
-static inline void sfpc_safe(u32 fpc)
-{
- asm volatile("\n"
- "0: sfpc %[fpc]\n"
- "1: nopr %%r7\n"
- ".pushsection .fixup, \"ax\"\n"
- "2: lghi %[fpc],0\n"
- " jg 0b\n"
- ".popsection\n"
- EX_TABLE(1b, 2b)
- : [fpc] "+d" (fpc)
- : : "memory");
-}
-
-#define KERNEL_FPC 1
-#define KERNEL_VXR_V0V7 2
-#define KERNEL_VXR_V8V15 4
-#define KERNEL_VXR_V16V23 8
-#define KERNEL_VXR_V24V31 16
-
-#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
-#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
-#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
-
-#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
-#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
-
-struct kernel_fpu;
-
-/*
- * Note the functions below must be called with preemption disabled.
- * Do not enable preemption before calling __kernel_fpu_end() to prevent
- * an corruption of an existing kernel FPU state.
- *
- * Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
- */
-void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
-void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
-
-
-static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
-{
- preempt_disable();
- state->mask = S390_lowcore.fpu_flags;
- if (!test_cpu_flag(CIF_FPU))
- /* Save user space FPU state and register contents */
- save_fpu_regs();
- else if (state->mask & flags)
- /* Save FPU/vector register in-use by the kernel */
- __kernel_fpu_begin(state, flags);
- S390_lowcore.fpu_flags |= flags;
-}
-
-static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
-{
- S390_lowcore.fpu_flags = state->mask;
- if (state->mask & flags)
- /* Restore FPU/vector register in-use by the kernel */
- __kernel_fpu_end(state, flags);
- preempt_enable();
-}
-
-#endif /* _ASM_S390_FPU_API_H */
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
deleted file mode 100644
index d511c4cf5afb..000000000000
--- a/arch/s390/include/asm/fpu/internal.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * FPU state and register content conversion primitives
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- */
-
-#ifndef _ASM_S390_FPU_INTERNAL_H
-#define _ASM_S390_FPU_INTERNAL_H
-
-#include <linux/string.h>
-#include <asm/facility.h>
-#include <asm/fpu/types.h>
-
-static inline bool cpu_has_vx(void)
-{
- return likely(test_facility(129));
-}
-
-static inline void save_vx_regs(__vector128 *vxrs)
-{
- asm volatile(
- " la 1,%0\n"
- " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
- " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
- : "=Q" (*(struct vx_array *) vxrs) : : "1");
-}
-
-static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
-{
- int i;
-
- for (i = 0; i < __NUM_FPRS; i++)
- fprs[i].ui = vxrs[i].high;
-}
-
-static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
-{
- int i;
-
- for (i = 0; i < __NUM_FPRS; i++)
- vxrs[i].high = fprs[i].ui;
-}
-
-static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
-{
- fpregs->pad = 0;
- fpregs->fpc = fpu->fpc;
- if (cpu_has_vx())
- convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
- else
- memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
- sizeof(fpregs->fprs));
-}
-
-static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
-{
- fpu->fpc = fpregs->fpc;
- if (cpu_has_vx())
- convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
- else
- memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
- sizeof(fpregs->fprs));
-}
-
-#endif /* _ASM_S390_FPU_INTERNAL_H */
diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h
deleted file mode 100644
index d889e9436865..000000000000
--- a/arch/s390/include/asm/fpu/types.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * FPU data structures
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- */
-
-#ifndef _ASM_S390_FPU_TYPES_H
-#define _ASM_S390_FPU_TYPES_H
-
-#include <asm/sigcontext.h>
-
-struct fpu {
- __u32 fpc; /* Floating-point control */
- void *regs; /* Pointer to the current save area */
- union {
- /* Floating-point register save area */
- freg_t fprs[__NUM_FPRS];
- /* Vector register save area */
- __vector128 vxrs[__NUM_VXRS];
- };
-};
-
-/* VX array structure for address operand constraints in inline assemblies */
-struct vx_array { __vector128 _[__NUM_VXRS]; };
-
-/* In-kernel FPU state structure */
-struct kernel_fpu {
- u32 mask;
- u32 fpc;
- union {
- freg_t fprs[__NUM_FPRS];
- __vector128 vxrs[__NUM_VXRS];
- };
-};
-
-#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 5a82b08f03cd..185331e91f83 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -2,18 +2,28 @@
#ifndef _ASM_S390_FTRACE_H
#define _ASM_S390_FTRACE_H
-#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define MCOUNT_INSN_SIZE 6
#ifndef __ASSEMBLY__
+#include <asm/stacktrace.h>
-#ifdef CONFIG_CC_IS_CLANG
-/* https://bugs.llvm.org/show_bug.cgi?id=41424 */
-#define ftrace_return_address(n) 0UL
-#else
-#define ftrace_return_address(n) __builtin_return_address(n)
-#endif
+static __always_inline unsigned long return_address(unsigned int n)
+{
+ struct stack_frame *sf;
+
+ if (!n)
+ return (unsigned long)__builtin_return_address(0);
+
+ sf = (struct stack_frame *)current_frame_address();
+ do {
+ sf = (struct stack_frame *)sf->back_chain;
+ if (!sf)
+ return 0;
+ } while (--n);
+ return sf->gprs[8];
+}
+#define ftrace_return_address(n) return_address(n)
void ftrace_caller(void);
@@ -29,6 +39,7 @@ struct dyn_arch_ftrace { };
struct module;
struct dyn_ftrace;
+struct ftrace_ops;
bool ftrace_need_init_nop(void);
#define ftrace_need_init_nop ftrace_need_init_nop
@@ -40,62 +51,44 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
+#define ftrace_get_symaddr(fentry_ip) ((unsigned long)(fentry_ip))
-struct ftrace_regs {
- struct pt_regs regs;
-};
+#include <linux/ftrace_regs.h>
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
{
- struct pt_regs *regs = &fregs->regs;
+ struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS))
return regs;
return NULL;
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-struct fgraph_ret_regs {
- unsigned long gpr2;
- unsigned long fp;
-};
-
-static __always_inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->gpr2;
-}
-
-static __always_inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
+static __always_inline void
+ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
+ unsigned long ip)
{
- return ret_regs->fp;
+ arch_ftrace_regs(fregs)->regs.psw.addr = ip;
}
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#undef ftrace_regs_get_frame_pointer
static __always_inline unsigned long
-ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
+ftrace_regs_get_frame_pointer(struct ftrace_regs *fregs)
{
- return fregs->regs.psw.addr;
+ return ftrace_regs_get_stack_pointer(fregs);
}
-static __always_inline void
-ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
- unsigned long ip)
+static __always_inline unsigned long
+ftrace_regs_get_return_address(const struct ftrace_regs *fregs)
{
- fregs->regs.psw.addr = ip;
+ return arch_ftrace_regs(fregs)->regs.gprs[14];
}
-#define ftrace_regs_get_argument(fregs, n) \
- regs_get_kernel_argument(&(fregs)->regs, n)
-#define ftrace_regs_get_stack_pointer(fregs) \
- kernel_stack_pointer(&(fregs)->regs)
-#define ftrace_regs_return_value(fregs) \
- regs_return_value(&(fregs)->regs)
-#define ftrace_regs_set_return_value(fregs, ret) \
- regs_set_return_value(&(fregs)->regs, ret)
-#define ftrace_override_function_with_return(fregs) \
- override_function_with_return(&(fregs)->regs)
-#define ftrace_regs_query_register_offset(name) \
- regs_query_register_offset(name)
+#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \
+ (_regs)->psw.mask = 0; \
+ (_regs)->psw.addr = arch_ftrace_regs(fregs)->regs.psw.addr; \
+ (_regs)->gprs[15] = arch_ftrace_regs(fregs)->regs.gprs[15]; \
+ } while (0)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
@@ -107,7 +100,7 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
*/
static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
- struct pt_regs *regs = &fregs->regs;
+ struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
regs->orig_gpr2 = addr;
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
@@ -137,6 +130,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
return !strcmp(sym + 7, name) || !strcmp(sym + 8, name);
}
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+#define ftrace_graph_func ftrace_graph_func
+
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index eaeaeb3ff0be..f5781794356b 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -2,80 +2,95 @@
#ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H
+#include <linux/instrumented.h>
#include <linux/uaccess.h>
#include <linux/futex.h>
#include <asm/asm-extable.h>
#include <asm/mmu_context.h>
#include <asm/errno.h>
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile( \
- " sacf 256\n" \
- "0: l %1,0(%6)\n" \
- "1:"insn \
- "2: cs %1,%2,0(%6)\n" \
- "3: jl 1b\n" \
- " lhi %0,0\n" \
- "4: sacf 768\n" \
- EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
- EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
- : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
- "=m" (*uaddr) \
- : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
- "m" (*uaddr) : "cc");
+#define FUTEX_OP_FUNC(name, insn) \
+static uaccess_kmsan_or_inline int \
+__futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \
+{ \
+ int rc, new; \
+ \
+ instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \
+ asm_inline volatile( \
+ " sacf 256\n" \
+ "0: l %[old],%[uaddr]\n" \
+ "1:"insn \
+ "2: cs %[old],%[new],%[uaddr]\n" \
+ "3: jl 1b\n" \
+ " lhi %[rc],0\n" \
+ "4: sacf 768\n" \
+ EX_TABLE_UA_FAULT(0b, 4b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 4b, %[rc]) \
+ EX_TABLE_UA_FAULT(2b, 4b, %[rc]) \
+ EX_TABLE_UA_FAULT(3b, 4b, %[rc]) \
+ : [rc] "=d" (rc), [old] "=&d" (*old), \
+ [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \
+ : [oparg] "d" (oparg) \
+ : "cc"); \
+ if (!rc) \
+ instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \
+ return rc; \
+}
+
+FUTEX_OP_FUNC(set, "lr %[new],%[oparg]\n")
+FUTEX_OP_FUNC(add, "lr %[new],%[old]\n ar %[new],%[oparg]\n")
+FUTEX_OP_FUNC(or, "lr %[new],%[old]\n or %[new],%[oparg]\n")
+FUTEX_OP_FUNC(and, "lr %[new],%[old]\n nr %[new],%[oparg]\n")
+FUTEX_OP_FUNC(xor, "lr %[new],%[old]\n xr %[new],%[oparg]\n")
-static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
- u32 __user *uaddr)
+static inline
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int oldval = 0, newval, ret;
+ int old, rc;
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("lr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_set(oparg, &old, uaddr);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("lr %2,%1\nar %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_add(oparg, &old, uaddr);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("lr %2,%1\nor %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_or(oparg, &old, uaddr);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_and(~oparg, &old, uaddr);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
+ rc = __futex_atomic_xor(oparg, &old, uaddr);
break;
default:
- ret = -ENOSYS;
+ rc = -ENOSYS;
}
-
- if (!ret)
- *oval = oldval;
-
- return ret;
+ if (!rc)
+ *oval = old;
+ return rc;
}
-static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+static uaccess_kmsan_or_inline
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
{
- int ret;
+ int rc;
- asm volatile(
- " sacf 256\n"
- "0: cs %1,%4,0(%5)\n"
- "1: la %0,0\n"
- "2: sacf 768\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
- : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
- : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
+ instrument_copy_from_user_before(uval, uaddr, sizeof(*uval));
+ asm_inline volatile(
+ " sacf 256\n"
+ "0: cs %[old],%[new],%[uaddr]\n"
+ "1: lhi %[rc],0\n"
+ "2: sacf 768\n"
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc])
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc])
+ : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr)
+ : [new] "d" (newval)
: "cc", "memory");
*uval = oldval;
- return ret;
+ instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0);
+ return rc;
}
#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 5cc46e0dde62..9f2814d0e1e9 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -17,13 +17,12 @@
#define GMAP_NOTIFY_MPROT 0x1
/* Status bits only for huge segment entries */
-#define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */
-#define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */
+#define _SEGMENT_ENTRY_GMAP_IN 0x0800 /* invalidation notify bit */
+#define _SEGMENT_ENTRY_GMAP_UC 0x0002 /* dirty (migration) */
/**
* struct gmap_struct - guest address space
* @list: list head for the mm->context gmap list
- * @crst_list: list of all crst tables used in the guest address space
* @mm: pointer to the parent mm_struct
* @guest_to_host: radix tree with guest to host address translation
* @host_to_guest: radix tree with pointer to segment table entries
@@ -35,7 +34,6 @@
* @guest_handle: protected virtual machine handle for the ultravisor
* @host_to_rmap: radix tree with gmap_rmap lists
* @children: list of shadow gmap structures
- * @pt_list: list of all page tables used in the shadow guest address space
* @shadow_lock: spinlock to protect the shadow gmap list
* @parent: pointer to the parent gmap for shadow guest address spaces
* @orig_asce: ASCE for which the shadow page table has been created
@@ -45,7 +43,6 @@
*/
struct gmap {
struct list_head list;
- struct list_head crst_list;
struct mm_struct *mm;
struct radix_tree_root guest_to_host;
struct radix_tree_root host_to_guest;
@@ -61,7 +58,6 @@ struct gmap {
/* Additional data for shadow guest address spaces */
struct radix_tree_root host_to_rmap;
struct list_head children;
- struct list_head pt_list;
spinlock_t shadow_lock;
struct gmap *parent;
unsigned long orig_asce;
@@ -106,26 +102,21 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
void gmap_remove(struct gmap *gmap);
struct gmap *gmap_get(struct gmap *gmap);
void gmap_put(struct gmap *gmap);
+void gmap_free(struct gmap *gmap);
+struct gmap *gmap_alloc(unsigned long limit);
-void gmap_enable(struct gmap *gmap);
-void gmap_disable(struct gmap *gmap);
-struct gmap *gmap_get_enabled(void);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
-unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
-int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
void __gmap_zap(struct gmap *, unsigned long gaddr);
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
-struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
- int edat_level);
-int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level);
+void gmap_unshadow(struct gmap *sg);
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
int fake);
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
@@ -134,24 +125,21 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
int fake);
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
int fake);
-int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
- unsigned long *pgt, int *dat_protection, int *fake);
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
void gmap_register_pte_notifier(struct gmap_notifier *);
void gmap_unregister_pte_notifier(struct gmap_notifier *);
-int gmap_mprotect_notify(struct gmap *, unsigned long start,
- unsigned long len, int prot);
+int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits);
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
-int gmap_mark_unmergeable(void);
-void s390_unlist_old_asce(struct gmap *gmap);
+int s390_disable_cow_sharing(void);
int s390_replace_asce(struct gmap *gmap);
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool interruptible);
+unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
/**
* s390_uv_destroy_range - Destroy a range of pages in the given mm.
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 58668ffb5488..a5b45388c91f 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -13,9 +13,9 @@
#include <asm/lowcore.h>
-#define local_softirq_pending() (S390_lowcore.softirq_pending)
-#define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x))
-#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x))
+#define local_softirq_pending() (get_lowcore()->softirq_pending)
+#define set_softirq_pending(x) (get_lowcore()->softirq_pending = (x))
+#define or_softirq_pending(x) (get_lowcore()->softirq_pending |= (x))
#define __ARCH_IRQ_STAT
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
diff --git a/arch/s390/include/asm/hiperdispatch.h b/arch/s390/include/asm/hiperdispatch.h
new file mode 100644
index 000000000000..27e23aa27a24
--- /dev/null
+++ b/arch/s390/include/asm/hiperdispatch.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2024
+ */
+
+#ifndef _ASM_HIPERDISPATCH_H
+#define _ASM_HIPERDISPATCH_H
+
+void hd_reset_state(void);
+void hd_add_core(int cpu);
+void hd_disable_hiperdispatch(void);
+int hd_enable_hiperdispatch(void);
+
+#endif /* _ASM_HIPERDISPATCH_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index deb198a61039..663e87220e89 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -10,41 +10,39 @@
#define _ASM_S390_HUGETLB_H
#include <linux/pgtable.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm/page.h>
-#define hugetlb_free_pgd_range free_pgd_range
#define hugepages_supported() (MACHINE_HAS_EDAT1)
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz);
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-pte_t huge_ptep_get(pte_t *ptep);
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
+ pte_t *ptep, pte_t pte);
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- struct hstate *h = hstate_file(file);
+#define __HAVE_ARCH_HUGE_PTEP_GET
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
- return 0;
+pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
+{
+ return __huge_ptep_get_and_clear(mm, addr, ptep);
}
-static inline void arch_clear_hugepage_flags(struct page *page)
+static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_arch_1, &page->flags);
+ clear_bit(PG_arch_1, &folio->flags);
}
-#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
{
@@ -54,94 +52,54 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY));
}
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
+ return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- int changed = !pte_same(huge_ptep_get(ptep), pte);
+ int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
+
if (changed) {
- huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ __huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
}
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
- __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
-}
+ pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
-static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
-{
- return mk_pte(page, pgprot);
-}
-
-static inline int huge_pte_none(pte_t pte)
-{
- return pte_none(pte);
-}
-
-static inline int huge_pte_none_mostly(pte_t pte)
-{
- return huge_pte_none(pte);
-}
-
-static inline int huge_pte_write(pte_t pte)
-{
- return pte_write(pte);
-}
-
-static inline int huge_pte_dirty(pte_t pte)
-{
- return pte_dirty(pte);
-}
-
-static inline pte_t huge_pte_mkwrite(pte_t pte)
-{
- return pte_mkwrite_novma(pte);
-}
-
-static inline pte_t huge_pte_mkdirty(pte_t pte)
-{
- return pte_mkdirty(pte);
-}
-
-static inline pte_t huge_pte_wrprotect(pte_t pte)
-{
- return pte_wrprotect(pte);
-}
-
-static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
-{
- return pte_modify(pte, newprot);
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
+#define __HAVE_ARCH_HUGE_PTE_MKUFFD_WP
static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
{
return pte;
}
+#define __HAVE_ARCH_HUGE_PTE_CLEAR_UFFD_WP
static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
{
return pte;
}
+#define __HAVE_ARCH_HUGE_PTE_UFFD_WP
static inline int huge_pte_uffd_wp(pte_t pte)
{
return 0;
}
-static inline bool gigantic_page_runtime_supported(void)
-{
- return true;
-}
+#include <asm-generic/hugetlb.h>
#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index 59fcc3c72edf..ac68c657b28c 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
+/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
@@ -17,32 +17,37 @@
#include <linux/err.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <asm/cio.h>
#include <linux/uaccess.h>
+#include <asm/dma-types.h>
+#include <asm/cio.h>
-#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
-#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
+#define IDA_SIZE_SHIFT 12
+#define IDA_BLOCK_SIZE (1UL << IDA_SIZE_SHIFT)
-#define IDA_2K_SIZE_LOG 11
-#define IDA_2K_BLOCK_SIZE (1L << IDA_2K_SIZE_LOG)
+#define IDA_2K_SIZE_SHIFT 11
+#define IDA_2K_BLOCK_SIZE (1UL << IDA_2K_SIZE_SHIFT)
/*
* Test if an address/length pair needs an idal list.
*/
-static inline int
-idal_is_needed(void *vaddr, unsigned int length)
+static inline bool idal_is_needed(void *vaddr, unsigned int length)
{
- return ((__pa(vaddr) + length - 1) >> 31) != 0;
-}
+ dma64_t paddr = virt_to_dma64(vaddr);
+ return (((__force unsigned long)(paddr) + length - 1) >> 31) != 0;
+}
/*
* Return the number of idal words needed for an address/length pair.
*/
static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
{
- return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
- (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
+ unsigned int cidaw;
+
+ cidaw = (unsigned long)vaddr & (IDA_BLOCK_SIZE - 1);
+ cidaw += length + IDA_BLOCK_SIZE - 1;
+ cidaw >>= IDA_SIZE_SHIFT;
+ return cidaw;
}
/*
@@ -50,26 +55,27 @@ static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
*/
static inline unsigned int idal_2k_nr_words(void *vaddr, unsigned int length)
{
- return ((__pa(vaddr) & (IDA_2K_BLOCK_SIZE - 1)) + length +
- (IDA_2K_BLOCK_SIZE - 1)) >> IDA_2K_SIZE_LOG;
+ unsigned int cidaw;
+
+ cidaw = (unsigned long)vaddr & (IDA_2K_BLOCK_SIZE - 1);
+ cidaw += length + IDA_2K_BLOCK_SIZE - 1;
+ cidaw >>= IDA_2K_SIZE_SHIFT;
+ return cidaw;
}
/*
* Create the list of idal words for an address/length pair.
*/
-static inline unsigned long *idal_create_words(unsigned long *idaws,
- void *vaddr, unsigned int length)
+static inline dma64_t *idal_create_words(dma64_t *idaws, void *vaddr, unsigned int length)
{
- unsigned long paddr;
+ dma64_t paddr = virt_to_dma64(vaddr);
unsigned int cidaw;
- paddr = __pa(vaddr);
- cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
- (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
*idaws++ = paddr;
- paddr &= -IDA_BLOCK_SIZE;
+ cidaw = idal_nr_words(vaddr, length);
+ paddr = dma64_and(paddr, -IDA_BLOCK_SIZE);
while (--cidaw > 0) {
- paddr += IDA_BLOCK_SIZE;
+ paddr = dma64_add(paddr, IDA_BLOCK_SIZE);
*idaws++ = paddr;
}
return idaws;
@@ -79,36 +85,33 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
* Sets the address of the data in CCW.
* If necessary it allocates an IDAL and sets the appropriate flags.
*/
-static inline int
-set_normalized_cda(struct ccw1 * ccw, void *vaddr)
+static inline int set_normalized_cda(struct ccw1 *ccw, void *vaddr)
{
unsigned int nridaws;
- unsigned long *idal;
+ dma64_t *idal;
if (ccw->flags & CCW_FLAG_IDA)
return -EINVAL;
nridaws = idal_nr_words(vaddr, ccw->count);
if (nridaws > 0) {
- idal = kmalloc(nridaws * sizeof(unsigned long),
- GFP_ATOMIC | GFP_DMA );
- if (idal == NULL)
+ idal = kcalloc(nridaws, sizeof(*idal), GFP_ATOMIC | GFP_DMA);
+ if (!idal)
return -ENOMEM;
idal_create_words(idal, vaddr, ccw->count);
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
- ccw->cda = (__u32)(unsigned long) vaddr;
+ ccw->cda = virt_to_dma32(vaddr);
return 0;
}
/*
* Releases any allocated IDAL related to the CCW.
*/
-static inline void
-clear_normalized_cda(struct ccw1 * ccw)
+static inline void clear_normalized_cda(struct ccw1 *ccw)
{
if (ccw->flags & CCW_FLAG_IDA) {
- kfree((void *)(unsigned long) ccw->cda);
+ kfree(dma32_to_virt(ccw->cda));
ccw->flags &= ~CCW_FLAG_IDA;
}
ccw->cda = 0;
@@ -120,125 +123,138 @@ clear_normalized_cda(struct ccw1 * ccw)
struct idal_buffer {
size_t size;
size_t page_order;
- void *data[];
+ dma64_t data[];
};
/*
* Allocate an idal buffer
*/
-static inline struct idal_buffer *
-idal_buffer_alloc(size_t size, int page_order)
+static inline struct idal_buffer *idal_buffer_alloc(size_t size, int page_order)
{
- struct idal_buffer *ib;
int nr_chunks, nr_ptrs, i;
+ struct idal_buffer *ib;
+ void *vaddr;
- nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
- nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
+ nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT;
+ nr_chunks = (PAGE_SIZE << page_order) >> IDA_SIZE_SHIFT;
ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL);
- if (ib == NULL)
+ if (!ib)
return ERR_PTR(-ENOMEM);
ib->size = size;
ib->page_order = page_order;
for (i = 0; i < nr_ptrs; i++) {
- if ((i & (nr_chunks - 1)) != 0) {
- ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
- continue;
- }
- ib->data[i] = (void *)
- __get_free_pages(GFP_KERNEL, page_order);
- if (ib->data[i] != NULL)
+ if (i & (nr_chunks - 1)) {
+ ib->data[i] = dma64_add(ib->data[i - 1], IDA_BLOCK_SIZE);
continue;
- // Not enough memory
- while (i >= nr_chunks) {
- i -= nr_chunks;
- free_pages((unsigned long) ib->data[i],
- ib->page_order);
}
- kfree(ib);
- return ERR_PTR(-ENOMEM);
+ vaddr = (void *)__get_free_pages(GFP_KERNEL, page_order);
+ if (!vaddr)
+ goto error;
+ ib->data[i] = virt_to_dma64(vaddr);
}
return ib;
+error:
+ while (i >= nr_chunks) {
+ i -= nr_chunks;
+ vaddr = dma64_to_virt(ib->data[i]);
+ free_pages((unsigned long)vaddr, ib->page_order);
+ }
+ kfree(ib);
+ return ERR_PTR(-ENOMEM);
}
/*
* Free an idal buffer.
*/
-static inline void
-idal_buffer_free(struct idal_buffer *ib)
+static inline void idal_buffer_free(struct idal_buffer *ib)
{
int nr_chunks, nr_ptrs, i;
+ void *vaddr;
- nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
- nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
- for (i = 0; i < nr_ptrs; i += nr_chunks)
- free_pages((unsigned long) ib->data[i], ib->page_order);
+ nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT;
+ nr_chunks = (PAGE_SIZE << ib->page_order) >> IDA_SIZE_SHIFT;
+ for (i = 0; i < nr_ptrs; i += nr_chunks) {
+ vaddr = dma64_to_virt(ib->data[i]);
+ free_pages((unsigned long)vaddr, ib->page_order);
+ }
kfree(ib);
}
/*
* Test if a idal list is really needed.
*/
-static inline int
-__idal_buffer_is_needed(struct idal_buffer *ib)
+static inline bool __idal_buffer_is_needed(struct idal_buffer *ib)
{
- return ib->size > (4096ul << ib->page_order) ||
- idal_is_needed(ib->data[0], ib->size);
+ if (ib->size > (PAGE_SIZE << ib->page_order))
+ return true;
+ return idal_is_needed(dma64_to_virt(ib->data[0]), ib->size);
}
/*
* Set channel data address to idal buffer.
*/
-static inline void
-idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
+static inline void idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
{
+ void *vaddr;
+
if (__idal_buffer_is_needed(ib)) {
- // setup idals;
- ccw->cda = (u32)(addr_t) ib->data;
+ /* Setup idals */
+ ccw->cda = virt_to_dma32(ib->data);
ccw->flags |= CCW_FLAG_IDA;
- } else
- // we do not need idals - use direct addressing
- ccw->cda = (u32)(addr_t) ib->data[0];
+ } else {
+ /*
+ * No idals needed - use direct addressing. Convert from
+ * dma64_t to virt and then to dma32_t only because of type
+ * checking. The physical address is known to be below 2GB.
+ */
+ vaddr = dma64_to_virt(ib->data[0]);
+ ccw->cda = virt_to_dma32(vaddr);
+ }
ccw->count = ib->size;
}
/*
* Copy count bytes from an idal buffer to user memory
*/
-static inline size_t
-idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
+static inline size_t idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
{
size_t left;
+ void *vaddr;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
- left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
+ vaddr = dma64_to_virt(ib->data[i]);
+ left = copy_to_user(to, vaddr, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
- to = (void __user *) to + IDA_BLOCK_SIZE;
+ to = (void __user *)to + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
- return copy_to_user(to, ib->data[i], count);
+ vaddr = dma64_to_virt(ib->data[i]);
+ return copy_to_user(to, vaddr, count);
}
/*
* Copy count bytes from user memory to an idal buffer
*/
-static inline size_t
-idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
+static inline size_t idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
{
size_t left;
+ void *vaddr;
int i;
BUG_ON(count > ib->size);
for (i = 0; count > IDA_BLOCK_SIZE; i++) {
- left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
+ vaddr = dma64_to_virt(ib->data[i]);
+ left = copy_from_user(vaddr, from, IDA_BLOCK_SIZE);
if (left)
return left + count - IDA_BLOCK_SIZE;
- from = (void __user *) from + IDA_BLOCK_SIZE;
+ from = (void __user *)from + IDA_BLOCK_SIZE;
count -= IDA_BLOCK_SIZE;
}
- return copy_from_user(ib->data[i], from, count);
+ vaddr = dma64_to_virt(ib->data[i]);
+ return copy_from_user(vaddr, from, count);
}
#endif
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 4453ad7c11ac..fc9933a743d6 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -16,8 +16,10 @@
#include <asm/pci_io.h>
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
void *xlate_dev_mem_ptr(phys_addr_t phys);
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0
@@ -73,6 +75,21 @@ static inline void ioport_unmap(void __iomem *p)
#define __raw_writel zpci_write_u32
#define __raw_writeq zpci_write_u64
+/* combine single writes by using store-block insn */
+static inline void __iowrite32_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ zpci_memcpy_toio(to, from, count * 4);
+}
+#define __iowrite32_copy __iowrite32_copy
+
+static inline void __iowrite64_copy(void __iomem *to, const void *from,
+ size_t count)
+{
+ zpci_memcpy_toio(to, from, count * 8);
+}
+#define __iowrite64_copy __iowrite64_copy
+
#endif /* CONFIG_PCI */
#include <asm-generic/io.h>
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 54b42817f70a..d9e705f4a697 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,6 +47,7 @@ enum interruption_class {
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_FTP,
+ IRQEXT_WTI,
IRQIO_CIO,
IRQIO_DAS,
IRQIO_C15,
@@ -99,6 +100,7 @@ int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
+ IRQ_SUBCLASS_WARNING_TRACK = 33,
};
#define CR0_IRQ_SUBCLASS_MASK \
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 02427b205c11..bcab456dfb80 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -37,12 +37,18 @@ static __always_inline void __arch_local_irq_ssm(unsigned long flags)
asm volatile("ssm %0" : : "Q" (flags) : "memory");
}
-static __always_inline unsigned long arch_local_save_flags(void)
+#ifdef CONFIG_KMSAN
+#define arch_local_irq_attributes noinline notrace __no_sanitize_memory __maybe_unused
+#else
+#define arch_local_irq_attributes __always_inline
+#endif
+
+static arch_local_irq_attributes unsigned long arch_local_save_flags(void)
{
return __arch_local_irq_stnsm(0xff);
}
-static __always_inline unsigned long arch_local_irq_save(void)
+static arch_local_irq_attributes unsigned long arch_local_irq_save(void)
{
return __arch_local_irq_stnsm(0xfc);
}
@@ -52,7 +58,12 @@ static __always_inline void arch_local_irq_disable(void)
arch_local_irq_save();
}
-static __always_inline void arch_local_irq_enable(void)
+static arch_local_irq_attributes void arch_local_irq_enable_external(void)
+{
+ __arch_local_irq_stosm(0x01);
+}
+
+static arch_local_irq_attributes void arch_local_irq_enable(void)
{
__arch_local_irq_stosm(0x03);
}
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 1bd08eb56d5f..9084b750350d 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -94,6 +94,9 @@ void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
+
+bool is_kdump_kernel(void);
+#define is_kdump_kernel is_kdump_kernel
#endif
#ifdef CONFIG_KEXEC_FILE
diff --git a/arch/s390/include/asm/kmsan.h b/arch/s390/include/asm/kmsan.h
new file mode 100644
index 000000000000..f73e181d09ae
--- /dev/null
+++ b/arch/s390/include/asm/kmsan.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_KMSAN_H
+#define _ASM_S390_KMSAN_H
+
+#include <asm/lowcore.h>
+#include <asm/page.h>
+#include <linux/kmsan.h>
+#include <linux/mmzone.h>
+#include <linux/stddef.h>
+
+#ifndef MODULE
+
+static inline bool is_lowcore_addr(void *addr)
+{
+ return addr >= (void *)get_lowcore() &&
+ addr < (void *)(get_lowcore() + 1);
+}
+
+static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
+{
+ if (is_lowcore_addr(addr)) {
+ /*
+ * Different lowcores accessed via S390_lowcore are described
+ * by the same struct page. Resolve the prefix manually in
+ * order to get a distinct struct page.
+ */
+ addr += (void *)lowcore_ptr[raw_smp_processor_id()] -
+ (void *)get_lowcore();
+ if (KMSAN_WARN_ON(is_lowcore_addr(addr)))
+ return NULL;
+ return kmsan_get_metadata(addr, is_origin);
+ }
+ return NULL;
+}
+
+static inline bool kmsan_virt_addr_valid(void *addr)
+{
+ bool ret;
+
+ /*
+ * pfn_valid() relies on RCU, and may call into the scheduler on exiting
+ * the critical section. However, this would result in recursion with
+ * KMSAN. Therefore, disable preemption here, and re-enable preemption
+ * below while suppressing reschedules to avoid recursion.
+ *
+ * Note, this sacrifices occasionally breaking scheduling guarantees.
+ * Although, a kernel compiled with KMSAN has already given up on any
+ * performance guarantees due to being heavily instrumented.
+ */
+ preempt_disable();
+ ret = virt_addr_valid(addr);
+ preempt_enable_no_resched();
+
+ return ret;
+}
+
+#endif /* !MODULE */
+
+#endif /* _ASM_S390_KMSAN_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 52664105a473..9a367866cab0 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -15,7 +15,6 @@
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/kvm_types.h>
-#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/seqlock.h>
#include <linux/module.h>
@@ -23,7 +22,7 @@
#include <linux/mmu_notifier.h>
#include <asm/debug.h>
#include <asm/cpu.h>
-#include <asm/fpu/api.h>
+#include <asm/fpu.h>
#include <asm/isc.h>
#include <asm/guarded_storage.h>
@@ -31,6 +30,8 @@
#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS 255
+#define KVM_INTERNAL_MEM_SLOTS 1
+
/*
* These seem to be used for allocating ->chip in the routing table, which we
* don't use. 1 is as small as we can get to reduce the needed memory. If we
@@ -95,11 +96,16 @@ union ipte_control {
};
};
+/*
+ * Utility is defined as two bytes but having it four bytes wide
+ * generates more efficient code. Since the following bytes are
+ * reserved this makes no functional difference.
+ */
union sca_utility {
- __u16 val;
+ __u32 val;
struct {
- __u16 mtcr : 1;
- __u16 reserved : 15;
+ __u32 mtcr : 1;
+ __u32 : 31;
};
};
@@ -108,7 +114,7 @@ struct bsca_block {
__u64 reserved[5];
__u64 mcn;
union sca_utility utility;
- __u8 reserved2[6];
+ __u8 reserved2[4];
struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
};
@@ -116,7 +122,7 @@ struct esca_block {
union ipte_control ipte_control;
__u64 reserved1[6];
union sca_utility utility;
- __u8 reserved2[6];
+ __u8 reserved2[4];
__u64 mcn[4];
__u64 reserved3[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
@@ -357,6 +363,7 @@ struct kvm_s390_sie_block {
#define ECD_MEF 0x08000000
#define ECD_ETOKENF 0x02000000
#define ECD_ECC 0x00200000
+#define ECD_HMAC 0x00004000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
@@ -427,6 +434,7 @@ struct kvm_vcpu_stat {
u64 instruction_io_other;
u64 instruction_lpsw;
u64 instruction_lpswe;
+ u64 instruction_lpswey;
u64 instruction_pfmf;
u64 instruction_ptff;
u64 instruction_sck;
@@ -527,6 +535,9 @@ struct kvm_vcpu_stat {
#define PGM_REGION_FIRST_TRANS 0x39
#define PGM_REGION_SECOND_TRANS 0x3a
#define PGM_REGION_THIRD_TRANS 0x3b
+#define PGM_SECURE_STORAGE_ACCESS 0x3d
+#define PGM_NON_SECURE_STORAGE_ACCESS 0x3e
+#define PGM_SECURE_STORAGE_VIOLATION 0x3f
#define PGM_MONITOR 0x40
#define PGM_PER 0x80
#define PGM_CRYPTO_OPERATION 0x119
@@ -743,13 +754,10 @@ struct kvm_vcpu_arch {
struct kvm_s390_sie_block *vsie_block;
unsigned int host_acrs[NUM_ACRS];
struct gs_cb *host_gscb;
- struct fpu host_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
struct gmap *gmap;
- /* backup location for the currently enabled gmap when scheduled out */
- struct gmap *enabled_gmap;
struct kvm_guestdbg_info_arch guestdbg;
unsigned long pfault_token;
unsigned long pfault_select;
@@ -765,6 +773,8 @@ struct kvm_vcpu_arch {
__u64 cputm_start;
bool gs_enabled;
bool skey_enabled;
+ /* Indicator if the access registers have been loaded from guest */
+ bool acrs_loaded;
struct kvm_s390_pv_vcpu pv;
union diag318_info diag318_info;
};
@@ -923,12 +933,14 @@ struct sie_page2 {
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
};
+struct vsie_page;
+
struct kvm_s390_vsie {
struct mutex mutex;
struct radix_tree_root addr_to_page;
int page_count;
int next;
- struct page *pages[KVM_MAX_VCPUS];
+ struct vsie_page *pages[KVM_MAX_VCPUS];
};
struct kvm_s390_gisa_iam {
@@ -1028,11 +1040,12 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
unsigned long *aqm, unsigned long *adm);
-int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa);
+int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa,
+ unsigned long gasce);
-static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
+static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa, unsigned long gasce)
{
- return __sie64a(virt_to_phys(sie_block), sie_block, rsa);
+ return __sie64a(virt_to_phys(sie_block), sie_block, rsa, gasce);
}
extern char sie_exit;
@@ -1044,7 +1057,6 @@ extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 5dc1b6345006..42a092fa1029 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -14,10 +14,15 @@
#include <asm/ctlreg.h>
#include <asm/cpu.h>
#include <asm/types.h>
+#include <asm/alternative.h>
#define LC_ORDER 1
#define LC_PAGES 2
+#define LOWCORE_ALT_ADDRESS _AC(0x70000, UL)
+
+#ifndef __ASSEMBLY__
+
struct pgm_tdb {
u64 data[32];
};
@@ -93,12 +98,11 @@ struct lowcore {
psw_t io_new_psw; /* 0x01f0 */
/* Save areas. */
- __u64 save_area_sync[8]; /* 0x0200 */
- __u64 save_area_async[8]; /* 0x0240 */
+ __u64 save_area[8]; /* 0x0200 */
+ __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */
- /* CPU flags. */
- __u64 cpu_flags; /* 0x0288 */
+ __u64 pcpu; /* 0x0288 */
/* Return psws. */
psw_t return_psw; /* 0x0290 */
@@ -157,12 +161,11 @@ struct lowcore {
__s32 preempt_count; /* 0x03a8 */
__u32 spinlock_lockval; /* 0x03ac */
__u32 spinlock_index; /* 0x03b0 */
- __u32 fpu_flags; /* 0x03b4 */
+ __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
__u64 percpu_offset; /* 0x03b8 */
__u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */
__u64 machine_flags; /* 0x03c8 */
- __u64 gmap; /* 0x03d0 */
- __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
+ __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */
__u32 return_lpswe; /* 0x0400 */
__u32 return_mcck_lpswe; /* 0x0404 */
@@ -213,7 +216,17 @@ struct lowcore {
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
} __packed __aligned(8192);
-#define S390_lowcore (*((struct lowcore *) 0))
+static __always_inline struct lowcore *get_lowcore(void)
+{
+ struct lowcore *lc;
+
+ if (__is_defined(__DECOMPRESSOR))
+ return NULL;
+ asm(ALTERNATIVE("llilh %[lc],0", "llilh %[lc],%[alt]", ALT_LOWCORE)
+ : [lc] "=d" (lc)
+ : [alt] "i" (LOWCORE_ALT_ADDRESS >> 16));
+ return lc;
+}
extern struct lowcore *lowcore_ptr[];
@@ -222,4 +235,19 @@ static inline void set_prefix(__u32 address)
asm volatile("spx %0" : : "Q" (address) : "memory");
}
+#else /* __ASSEMBLY__ */
+
+.macro GET_LC reg
+ ALTERNATIVE "llilh \reg,0", \
+ __stringify(llilh \reg, LOWCORE_ALT_ADDRESS >> 16), \
+ ALT_LOWCORE
+.endm
+
+.macro STMG_LC start, end, savearea
+ ALTERNATIVE "stmg \start, \end, \savearea", \
+ __stringify(stmg \start, \end, LOWCORE_ALT_ADDRESS + \savearea), \
+ ALT_LOWCORE
+.endm
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_LOWCORE_H */
diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h
new file mode 100644
index 000000000000..fd9eef3be44c
--- /dev/null
+++ b/arch/s390/include/asm/march.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_S390_MARCH_H
+#define __ASM_S390_MARCH_H
+
+#include <linux/kconfig.h>
+
+#define MARCH_HAS_Z10_FEATURES 1
+
+#ifndef __DECOMPRESSOR
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#define MARCH_HAS_Z196_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+#define MARCH_HAS_ZEC12_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+#define MARCH_HAS_Z13_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES
+#define MARCH_HAS_Z14_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES
+#define MARCH_HAS_Z15_FEATURES 1
+#endif
+
+#ifdef CONFIG_HAVE_MARCH_Z16_FEATURES
+#define MARCH_HAS_Z16_FEATURES 1
+#endif
+
+#endif /* __DECOMPRESSOR */
+
+#endif /* __ASM_S390_MARCH_H */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index bb1b4bef1878..4c2dc7abc285 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -32,6 +32,11 @@ typedef struct {
unsigned int uses_skeys:1;
/* The mmu context uses CMM. */
unsigned int uses_cmm:1;
+ /*
+ * The mmu context allows COW-sharing of memory pages (KSM, zeropage).
+ * Note that COW-sharing during fork() is currently always allowed.
+ */
+ unsigned int allow_cow_sharing:1;
/* The gmaps associated with this context are allowed to use huge pages. */
unsigned int allow_gmap_hpage_1m:1;
} mm_context_t;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 929af18b0908..d56eb0a1f37b 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,6 +35,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0;
+ mm->context.allow_cow_sharing = 1;
mm->context.allow_gmap_hpage_1m = 0;
#endif
switch (mm->context.asce_limit) {
@@ -75,9 +76,9 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
int cpu = smp_processor_id();
if (next == &init_mm)
- S390_lowcore.user_asce = s390_invalid_asce;
+ get_lowcore()->user_asce = s390_invalid_asce;
else
- S390_lowcore.user_asce.val = next->context.asce;
+ get_lowcore()->user_asce.val = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
local_ctl_load(7, &s390_invalid_asce);
@@ -110,7 +111,7 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- local_ctl_load(7, &S390_lowcore.user_asce);
+ local_ctl_load(7, &get_lowcore()->user_asce);
}
#define activate_mm activate_mm
@@ -119,7 +120,7 @@ static inline void activate_mm(struct mm_struct *prev,
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- local_ctl_load(7, &S390_lowcore.user_asce);
+ local_ctl_load(7, &get_lowcore()->user_asce);
}
#include <asm-generic/mmu_context.h>
diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h
deleted file mode 100644
index 73e3e7c6976c..000000000000
--- a/arch/s390/include/asm/mmzone.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * NUMA support for s390
- *
- * Copyright IBM Corp. 2015
- */
-
-#ifndef _ASM_S390_MMZONE_H
-#define _ASM_S390_MMZONE_H
-
-#ifdef CONFIG_NUMA
-
-extern struct pglist_data *node_data[];
-#define NODE_DATA(nid) (node_data[nid])
-
-#endif /* CONFIG_NUMA */
-#endif /* _ASM_S390_MMZONE_H */
diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h
index 9f1eea15872c..916ab59e458a 100644
--- a/arch/s390/include/asm/module.h
+++ b/arch/s390/include/asm/module.h
@@ -38,4 +38,18 @@ struct mod_arch_specific {
#endif /* CONFIG_FUNCTION_TRACER */
};
+static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ const Elf_Shdr *s, *se;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+ return NULL;
+}
+
#endif /* _ASM_S390_MODULE_H */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index 82725cf783c7..192835a3e24d 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -5,8 +5,17 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/facility.h>
extern int nospec_disable;
+extern int nobp;
+
+static inline bool nobp_enabled(void)
+{
+ if (__is_defined(__DECOMPRESSOR))
+ return false;
+ return nobp && test_facility(82);
+}
void nospec_init_branches(void);
void nospec_auto_detect(void);
@@ -17,6 +26,26 @@ static inline bool nospec_uses_trampoline(void)
return __is_defined(CC_USING_EXPOLINE) && !nospec_disable;
}
+#ifdef CONFIG_EXPOLINE_EXTERN
+
+void __s390_indirect_jump_r1(void);
+void __s390_indirect_jump_r2(void);
+void __s390_indirect_jump_r3(void);
+void __s390_indirect_jump_r4(void);
+void __s390_indirect_jump_r5(void);
+void __s390_indirect_jump_r6(void);
+void __s390_indirect_jump_r7(void);
+void __s390_indirect_jump_r8(void);
+void __s390_indirect_jump_r9(void);
+void __s390_indirect_jump_r10(void);
+void __s390_indirect_jump_r11(void);
+void __s390_indirect_jump_r12(void);
+void __s390_indirect_jump_r13(void);
+void __s390_indirect_jump_r14(void);
+void __s390_indirect_jump_r15(void);
+
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 7a946c42ad13..cb15dd25bf21 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -16,24 +16,25 @@
*/
.macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN
- .pushsection .text,"ax",@progbits
- __ALIGN
+ SYM_CODE_START(\name)
#else
.pushsection .text.\name,"axG",@progbits,\name,comdat
-#endif
.globl \name
.hidden \name
.type \name,@function
\name:
CFI_STARTPROC
+#endif
.endm
.macro __THUNK_EPILOG_NAME name
- CFI_ENDPROC
#ifdef CONFIG_EXPOLINE_EXTERN
- .size \name, .-\name
-#endif
+ SYM_CODE_END(\name)
+ EXPORT_SYMBOL(\name)
+#else
+ CFI_ENDPROC
.popsection
+#endif
.endm
.macro __THUNK_PROLOG_BR r1
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
index a4d2e103f116..3ee9e8f5ceae 100644
--- a/arch/s390/include/asm/os_info.h
+++ b/arch/s390/include/asm/os_info.h
@@ -17,11 +17,25 @@
#define OS_INFO_VMCOREINFO 0
#define OS_INFO_REIPL_BLOCK 1
#define OS_INFO_FLAGS_ENTRY 2
+#define OS_INFO_RESERVED 3
+#define OS_INFO_IDENTITY_BASE 4
+#define OS_INFO_KASLR_OFFSET 5
+#define OS_INFO_KASLR_OFF_PHYS 6
+#define OS_INFO_VMEMMAP 7
+#define OS_INFO_AMODE31_START 8
+#define OS_INFO_AMODE31_END 9
+#define OS_INFO_IMAGE_START 10
+#define OS_INFO_IMAGE_END 11
+#define OS_INFO_IMAGE_PHYS 12
+#define OS_INFO_MAX 13
#define OS_INFO_FLAG_REIPL_CLEAR (1UL << 0)
struct os_info_entry {
- u64 addr;
+ union {
+ u64 addr;
+ u64 val;
+ };
u64 size;
u32 csum;
} __packed;
@@ -33,17 +47,24 @@ struct os_info {
u16 version_minor;
u64 crashkernel_addr;
u64 crashkernel_size;
- struct os_info_entry entry[3];
- u8 reserved[4004];
+ struct os_info_entry entry[OS_INFO_MAX];
+ u8 reserved[3804];
} __packed;
void os_info_init(void);
-void os_info_entry_add(int nr, void *ptr, u64 len);
+void os_info_entry_add_data(int nr, void *ptr, u64 len);
+void os_info_entry_add_val(int nr, u64 val);
void os_info_crashkernel_add(unsigned long base, unsigned long size);
u32 os_info_csum(struct os_info *os_info);
#ifdef CONFIG_CRASH_DUMP
void *os_info_old_entry(int nr, unsigned long *size);
+static inline unsigned long os_info_old_value(int nr)
+{
+ unsigned long size;
+
+ return (unsigned long)os_info_old_entry(nr, &size);
+}
#else
static inline void *os_info_old_entry(int nr, unsigned long *size)
{
diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h
index 08fcbd628120..794fdb21500a 100644
--- a/arch/s390/include/asm/page-states.h
+++ b/arch/s390/include/asm/page-states.h
@@ -7,7 +7,6 @@
#ifndef PAGE_STATES_H
#define PAGE_STATES_H
-#include <asm/sections.h>
#include <asm/page.h>
#define ESSA_GET_STATE 0
@@ -21,7 +20,7 @@
#define ESSA_MAX ESSA_SET_STABLE_NODAT
-extern int __bootdata_preserved(cmma_flag);
+extern int cmma_flag;
static __always_inline unsigned long essa(unsigned long paddr, unsigned char cmd)
{
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 73b9c3bf377f..1ff145f7b52b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -10,15 +10,10 @@
#include <linux/const.h>
#include <asm/types.h>
+#include <asm/asm.h>
-#define _PAGE_SHIFT 12
-#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT)
-#define _PAGE_MASK (~(_PAGE_SIZE - 1))
+#include <vdso/page.h>
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT _PAGE_SHIFT
-#define PAGE_SIZE _PAGE_SIZE
-#define PAGE_MASK _PAGE_MASK
#define PAGE_DEFAULT_ACC _AC(0, UL)
/* storage-protection override */
#define PAGE_SPO_ACC 9
@@ -74,7 +69,7 @@ static inline void copy_page(void *to, void *from)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
- vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
/*
* These are used to make use of C type-checking..
@@ -148,11 +143,12 @@ static inline int page_reset_referenced(unsigned long addr)
int cc;
asm volatile(
- " rrbe 0,%1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc) : "a" (addr) : "cc");
- return cc;
+ " rrbe 0,%[addr]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [addr] "a" (addr)
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
/* Bits int the storage key */
@@ -162,6 +158,7 @@ static inline int page_reset_referenced(unsigned long addr)
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
struct page;
+struct folio;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
@@ -173,22 +170,83 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
-#if IS_ENABLED(CONFIG_PGSTE)
-int arch_make_page_accessible(struct page *page);
-#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+int arch_make_folio_accessible(struct folio *folio);
+#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+
+struct vm_layout {
+ unsigned long kaslr_offset;
+ unsigned long kaslr_offset_phys;
+ unsigned long identity_base;
+ unsigned long identity_size;
+};
+
+extern struct vm_layout vm_layout;
+
+#define __kaslr_offset vm_layout.kaslr_offset
+#define __kaslr_offset_phys vm_layout.kaslr_offset_phys
+#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
+#define __identity_base vm_layout.identity_base
+#else
+#define __identity_base 0UL
#endif
+#define ident_map_size vm_layout.identity_size
+
+static inline unsigned long kaslr_offset(void)
+{
+ return __kaslr_offset;
+}
+
+extern int __kaslr_enabled;
+static inline int kaslr_enabled(void)
+{
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return __kaslr_enabled;
+ return 0;
+}
+
+#define __PAGE_OFFSET __identity_base
+#define PAGE_OFFSET __PAGE_OFFSET
-#define __PAGE_OFFSET 0x0UL
-#define PAGE_OFFSET 0x0UL
+#ifdef __DECOMPRESSOR
-#define __pa(x) ((unsigned long)(x))
+#define __pa_nodebug(x) ((unsigned long)(x))
+#define __pa(x) __pa_nodebug(x)
+#define __pa32(x) __pa(x)
#define __va(x) ((void *)(unsigned long)(x))
+#else /* __DECOMPRESSOR */
+
+static inline unsigned long __pa_nodebug(unsigned long x)
+{
+ if (x < __kaslr_offset)
+ return x - __identity_base;
+ return x - __kaslr_offset + __kaslr_offset_phys;
+}
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+
+unsigned long __phys_addr(unsigned long x, bool is_31bit);
+
+#else /* CONFIG_DEBUG_VIRTUAL */
+
+static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
+{
+ return __pa_nodebug(x);
+}
+
+#endif /* CONFIG_DEBUG_VIRTUAL */
+
+#define __pa(x) __phys_addr((unsigned long)(x), false)
+#define __pa32(x) __phys_addr((unsigned long)(x), true)
+#define __va(x) ((void *)((unsigned long)(x) + __identity_base))
+
+#endif /* __DECOMPRESSOR */
+
#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
-#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
-#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+#define phys_to_folio(phys) page_folio(phys_to_page(phys))
+#define folio_to_phys(page) pfn_to_phys(folio_pfn(folio))
static inline void *pfn_to_virt(unsigned long pfn)
{
@@ -205,7 +263,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
-#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
@@ -214,4 +272,12 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
+#define AMODE31_SIZE (3 * PAGE_SIZE)
+
+#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+#define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE
+#define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
+
+#define TEXT_OFFSET 0x100000
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h
index 7d1888e3dee6..ebeabd0aaa51 100644
--- a/arch/s390/include/asm/pai.h
+++ b/arch/s390/include/asm/pai.h
@@ -11,12 +11,13 @@
#include <linux/jump_label.h>
#include <asm/lowcore.h>
#include <asm/ptrace.h>
+#include <asm/asm.h>
struct qpaci_info_block {
u64 header;
struct {
u64 : 8;
- u64 num_cc : 8; /* # of supported crypto counters */
+ u64 num_cc : 8; /* # of supported crypto counters */
u64 : 9;
u64 num_nnpa : 7; /* # of supported NNPA counters */
u64 : 32;
@@ -33,12 +34,11 @@ static inline int qpaci(struct qpaci_info_block *info)
" lgr 0,%[size]\n"
" .insn s,0xb28f0000,%[info]\n"
" lgr %[size],0\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [info] "=Q" (*info), [size] "+&d" (size)
:
- : "0", "cc", "memory");
- return cc ? (size + 1) * sizeof(u64) : 0;
+ : CC_CLOBBER_LIST("0", "memory"));
+ return CC_TRANSFORM(cc) ? (size + 1) * sizeof(u64) : 0;
}
#define PAI_CRYPTO_BASE 0x1000 /* First event number */
@@ -55,11 +55,11 @@ static __always_inline void pai_kernel_enter(struct pt_regs *regs)
return;
if (!static_branch_unlikely(&pai_key))
return;
- if (!S390_lowcore.ccd)
+ if (!get_lowcore()->ccd)
return;
if (!user_mode(regs))
return;
- WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET);
+ WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd | PAI_CRYPTO_KERNEL_OFFSET);
}
static __always_inline void pai_kernel_exit(struct pt_regs *regs)
@@ -68,17 +68,15 @@ static __always_inline void pai_kernel_exit(struct pt_regs *regs)
return;
if (!static_branch_unlikely(&pai_key))
return;
- if (!S390_lowcore.ccd)
+ if (!get_lowcore()->ccd)
return;
if (!user_mode(regs))
return;
- WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
+ WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
}
-enum paievt_mode {
- PAI_MODE_NONE,
- PAI_MODE_SAMPLING,
- PAI_MODE_COUNTING,
-};
+#define PAI_SAVE_AREA(x) ((x)->hw.event_base)
+#define PAI_CPU_MASK(x) ((x)->hw.addr_filters)
+#define PAI_SWLIST(x) (&(x)->hw.tp_list)
#endif
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index e91cd6bbc330..474e1f8d1d3c 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -96,7 +96,6 @@ struct zpci_bar_struct {
u8 size; /* order 2 exponent */
};
-struct s390_domain;
struct kvm_zdev;
#define ZPCI_FUNCTIONS_PER_BUS 256
@@ -107,9 +106,10 @@ struct zpci_bus {
struct list_head resources;
struct list_head bus_next;
struct resource bus_resource;
- int pchid;
+ int topo; /* TID if topo_is_tid, PCHID otherwise */
int domain_nr;
- bool multifunction;
+ u8 multifunction : 1;
+ u8 topo_is_tid : 1;
enum pci_bus_speed max_bus_speed;
};
@@ -122,6 +122,7 @@ struct zpci_dev {
struct rcu_head rcu;
struct hotplug_slot hotplug_slot;
+ struct mutex state_lock; /* protect state changes */
enum zpci_state state;
u32 fid; /* function ID, used by sclp */
u32 fh; /* function handle, used by insn's */
@@ -129,9 +130,12 @@ struct zpci_dev {
u16 vfn; /* virtual function number */
u16 pchid; /* physical channel ID */
u16 maxstbl; /* Maximum store block size */
+ u16 rid; /* RID as supplied by firmware */
+ u16 tid; /* Topology for which RID is valid */
u8 pfgid; /* function group ID */
u8 pft; /* pci function type */
u8 port;
+ u8 fidparm;
u8 dtsm; /* Supported DT mask */
u8 rid_available : 1;
u8 has_hp_slot : 1;
@@ -139,10 +143,10 @@ struct zpci_dev {
u8 is_physfn : 1;
u8 util_str_avail : 1;
u8 irqs_registered : 1;
- u8 reserved : 2;
+ u8 tid_avail : 1;
+ u8 reserved : 1;
unsigned int devfn; /* DEVFN part of the RID*/
- struct mutex lock;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
@@ -170,6 +174,7 @@ struct zpci_dev {
u64 dma_mask; /* DMA address space mask */
/* Function measurement block */
+ struct mutex fmb_lock;
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
u16 fmb_length;
@@ -180,9 +185,10 @@ struct zpci_dev {
struct dentry *debugfs_dev;
/* IOMMU and passthrough */
- struct s390_domain *s390_domain; /* s390 IOMMU domain data */
+ struct iommu_domain *s390_domain; /* attached IOMMU domain */
struct kvm_zdev *kzdev;
struct mutex kzdev_lock;
+ spinlock_t dom_lock; /* protect s390_domain change */
};
static inline bool zdev_enabled(struct zpci_dev *zdev)
@@ -190,7 +196,14 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
return (zdev->fh & (1UL << 31)) ? true : false;
}
-extern const struct attribute_group *zpci_attr_groups[];
+extern const struct attribute_group zpci_attr_group;
+extern const struct attribute_group pfip_attr_group;
+extern const struct attribute_group zpci_ident_attr_group;
+
+#define ARCH_PCI_DEV_GROUPS &zpci_attr_group, \
+ &pfip_attr_group, \
+ &zpci_ident_attr_group,
+
extern unsigned int s390_pci_force_floating __initdata;
extern unsigned int s390_pci_no_rid;
@@ -202,12 +215,14 @@ extern struct airq_iv *zpci_aif_sbv;
----------------------------------------------------------------------------- */
/* Base stuff */
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
+int zpci_add_device(struct zpci_dev *zdev);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
int zpci_deconfigure_device(struct zpci_dev *zdev);
void zpci_device_reserved(struct zpci_dev *zdev);
bool zpci_is_device_configured(struct zpci_dev *zdev);
+int zpci_scan_devices(void);
int zpci_hot_reset_device(struct zpci_dev *zdev);
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *);
@@ -217,7 +232,7 @@ void zpci_update_fh(struct zpci_dev *zdev, u32 fh);
/* CLP */
int clp_setup_writeback_mio(void);
-int clp_scan_pci_devices(void);
+int clp_scan_pci_devices(struct list_head *scan_list);
int clp_query_pci_fn(struct zpci_dev *zdev);
int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index f0c677ddd270..3fff2f7095c8 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -110,7 +110,8 @@ struct clp_req_query_pci {
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u16 vfn; /* virtual fn number */
- u16 : 3;
+ u16 : 2;
+ u16 tid_avail : 1;
u16 rid_avail : 1;
u16 is_physfn : 1;
u16 reserved1 : 1;
@@ -122,16 +123,18 @@ struct clp_rsp_query_pci {
u16 pchid;
__le32 bar[PCI_STD_NUM_BARS];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
- u16 : 12;
- u16 port : 4;
+ u8 fidparm;
+ u8 reserved3 : 4;
+ u8 port : 4;
u8 fmb_len;
u8 pft; /* pci function type */
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
#define ZPCI_RID_MASK_DEVFN 0x00ff
u16 rid; /* BUS/DEVFN PCI address */
- u16 reserved0;
- u32 reserved[10];
+ u32 reserved0;
+ u16 tid;
+ u32 reserved[9];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
u32 reserved2[16];
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 2686bee800e3..43a5ea4ee20f 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -143,7 +143,7 @@ static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
- unsigned long n)
+ size_t n)
{
int size, rc = 0;
@@ -162,7 +162,7 @@ static inline int zpci_memcpy_fromio(void *dst,
}
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
- const void *src, unsigned long n)
+ const void *src, size_t n)
{
int size, rc = 0;
@@ -187,7 +187,7 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
}
static inline int zpci_memset_io(volatile void __iomem *dst,
- unsigned char val, size_t count)
+ int val, size_t count)
{
u8 *src = kmalloc(count, GFP_KERNEL);
int rc;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 264095dd84bc..84f6b8357b45 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -4,12 +4,13 @@
#include <linux/preempt.h>
#include <asm/cmpxchg.h>
+#include <asm/march.h>
/*
* s390 uses its own implementation for per cpu data, the offset of
* the cpu local data area is cached in the cpu's lowcore memory.
*/
-#define __my_cpu_offset S390_lowcore.percpu_offset
+#define __my_cpu_offset get_lowcore()->percpu_offset
/*
* For 64 bit module code, the module may be more than 4G above the
@@ -50,7 +51,7 @@
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+#ifndef MARCH_HAS_Z196_FEATURES
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
@@ -61,7 +62,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#else /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
{ \
@@ -129,7 +130,7 @@
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#endif /* MARCH_HAS_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 9917e2717b2b..e53894cedf08 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -37,9 +37,9 @@ extern ssize_t cpumf_events_sysfs_show(struct device *dev,
/* Perf callbacks */
struct pt_regs;
-extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
+extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_arch_misc_flags(struct pt_regs *regs);
+#define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs)
#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
/* Perf pt_regs extension for sample-data-entry indicators */
@@ -48,31 +48,8 @@ struct perf_sf_sde_regs {
unsigned long reserved:63; /* reserved */
};
-/* Perf PMU definitions for the counter facility */
-#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */
-
-/* Perf PMU definitions for the sampling facility */
-#define PERF_CPUM_SF_MAX_CTR 2
-#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
-#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
-#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
-#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
-#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
-#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
- PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
-
-#define REG_NONE 0
-#define REG_OVERFLOW 1
-#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
-#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
-#define TEAR_REG(hwc) ((hwc)->last_tag)
-#define SAMPL_RATE(hwc) ((hwc)->event_base)
-#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
-#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
-
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
+ (regs)->psw.mask = 0; \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain); \
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 502d655fe6ae..b19b6ed2ab53 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -23,9 +23,9 @@ unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
-struct page *page_table_alloc_pgste(struct mm_struct *mm);
+struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
void page_table_free(struct mm_struct *, unsigned long *);
-void page_table_free_pgste(struct page *page);
+void page_table_free_pgste(struct ptdesc *ptdesc);
extern int page_table_allocate_pgste;
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
@@ -53,29 +53,42 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long *table = crst_table_alloc(mm);
- if (table)
- crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ if (!table)
+ return NULL;
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ pagetable_p4d_ctor(virt_to_ptdesc(table));
+
return (p4d_t *) table;
}
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
- if (!mm_p4d_folded(mm))
- crst_table_free(mm, (unsigned long *) p4d);
+ if (mm_p4d_folded(mm))
+ return;
+
+ pagetable_dtor(virt_to_ptdesc(p4d));
+ crst_table_free(mm, (unsigned long *) p4d);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long *table = crst_table_alloc(mm);
- if (table)
- crst_table_init(table, _REGION3_ENTRY_EMPTY);
+
+ if (!table)
+ return NULL;
+ crst_table_init(table, _REGION3_ENTRY_EMPTY);
+ pagetable_pud_ctor(virt_to_ptdesc(table));
+
return (pud_t *) table;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
- if (!mm_pud_folded(mm))
- crst_table_free(mm, (unsigned long *) pud);
+ if (mm_pud_folded(mm))
+ return;
+
+ pagetable_dtor(virt_to_ptdesc(pud));
+ crst_table_free(mm, (unsigned long *) pud);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
@@ -96,7 +109,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
if (mm_pmd_folded(mm))
return;
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
+ pagetable_dtor(virt_to_ptdesc(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
@@ -117,11 +130,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return (pgd_t *) crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc(mm);
+
+ if (!table)
+ return NULL;
+ pagetable_pgd_ctor(virt_to_ptdesc(table));
+
+ return (pgd_t *) table;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
+ pagetable_dtor(virt_to_ptdesc(pgd));
crst_table_free(mm, (unsigned long *) pgd);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 1299b56e43f6..3ca5af4cfe43 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -17,7 +17,6 @@
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
-#include <asm/sections.h>
#include <asm/ctlreg.h>
#include <asm/bug.h>
#include <asm/page.h>
@@ -35,7 +34,7 @@ enum {
PG_DIRECT_MAP_MAX
};
-extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
+extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
static inline void update_page_count(int level, long count)
{
@@ -85,14 +84,14 @@ extern unsigned long zero_page_mask;
* happen without trampolines and in addition the placement within a
* 2GB frame is branch prediction unit friendly.
*/
-extern unsigned long __bootdata_preserved(VMALLOC_START);
-extern unsigned long __bootdata_preserved(VMALLOC_END);
+extern unsigned long VMALLOC_START;
+extern unsigned long VMALLOC_END;
#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
-extern struct page *__bootdata_preserved(vmemmap);
-extern unsigned long __bootdata_preserved(vmemmap_size);
+extern struct page *vmemmap;
+extern unsigned long vmemmap_size;
-extern unsigned long __bootdata_preserved(MODULES_VADDR);
-extern unsigned long __bootdata_preserved(MODULES_END);
+extern unsigned long MODULES_VADDR;
+extern unsigned long MODULES_END;
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
@@ -107,6 +106,26 @@ static inline int is_module_addr(void *addr)
return 1;
}
+#ifdef CONFIG_KMSAN
+#define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START)
+#define KMSAN_VMALLOC_SHADOW_START VMALLOC_END
+#define KMSAN_VMALLOC_SHADOW_END (KMSAN_VMALLOC_SHADOW_START + KMSAN_VMALLOC_SIZE)
+#define KMSAN_VMALLOC_ORIGIN_START KMSAN_VMALLOC_SHADOW_END
+#define KMSAN_VMALLOC_ORIGIN_END (KMSAN_VMALLOC_ORIGIN_START + KMSAN_VMALLOC_SIZE)
+#define KMSAN_MODULES_SHADOW_START KMSAN_VMALLOC_ORIGIN_END
+#define KMSAN_MODULES_SHADOW_END (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
+#define KMSAN_MODULES_ORIGIN_START KMSAN_MODULES_SHADOW_END
+#define KMSAN_MODULES_ORIGIN_END (KMSAN_MODULES_ORIGIN_START + MODULES_LEN)
+#endif
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#define KASLR_LEN (1UL << 31)
+#else
+#define KASLR_LEN 0UL
+#endif
+
+void setup_protection_map(void);
+
/*
* A 64 bit pagetable entry of S390 has following format:
* | PFRA |0IPC| OS |
@@ -259,28 +278,40 @@ static inline int is_module_addr(void *addr)
#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
-#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
+#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH | \
+ _REGION3_ENTRY_PRESENT)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
+#define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL
+#define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL
#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
+#define _REGION3_ENTRY_COMM 0x0010 /* Common-Region, marks swap entry */
#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
-#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
-#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
+#define _REGION3_ENTRY_WRITE 0x8000 /* SW region write bit */
+#define _REGION3_ENTRY_READ 0x4000 /* SW region read bit */
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
+#define _REGION3_ENTRY_SOFT_DIRTY 0x0002 /* SW region soft dirty bit */
#else
#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
#endif
#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
+/*
+ * SW region present bit. For non-leaf region-third-table entries, bits 62-63
+ * indicate the TABLE LENGTH and both must be set to 1. But such entries
+ * would always be considered as present, so it is safe to use bit 63 as
+ * PRESENT bit for PUD.
+ */
+#define _REGION3_ENTRY_PRESENT 0x0001
+
/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
-#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL
+#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL
+#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
@@ -288,21 +319,29 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
-#define _SEGMENT_ENTRY (0)
+#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PRESENT)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
+
+#define _SEGMENT_ENTRY_COMM 0x0010 /* Common-Segment, marks swap entry */
#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
-#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
-#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
+#define _SEGMENT_ENTRY_WRITE 0x8000 /* SW segment write bit */
+#define _SEGMENT_ENTRY_READ 0x4000 /* SW segment read bit */
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
+#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0002 /* SW segment soft dirty bit */
#else
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
#endif
+#define _SEGMENT_ENTRY_PRESENT 0x0001 /* SW segment present bit */
+
+/* Common bits in region and segment table entries, for swap entries */
+#define _RST_ENTRY_COMM 0x0010 /* Common-Region/Segment, marks swap entry */
+#define _RST_ENTRY_INVALID 0x0020 /* invalid region/segment table entry */
+
#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
#define _PAGE_ENTRIES 256 /* number of page table entries */
@@ -318,7 +357,7 @@ static inline int is_module_addr(void *addr)
#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
-#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
+#define _PAGE_INDEX (0xffUL << PAGE_SHIFT)
#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
@@ -381,9 +420,10 @@ static inline int is_module_addr(void *addr)
#define PGSTE_HC_BIT 0x0020000000000000UL
#define PGSTE_GR_BIT 0x0004000000000000UL
#define PGSTE_GC_BIT 0x0002000000000000UL
-#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
-#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
+#define PGSTE_ST2_MASK 0x0000ffff00000000UL
+#define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */
+#define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */
+#define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
@@ -405,90 +445,107 @@ static inline int is_module_addr(void *addr)
/*
* Page protection definitions.
*/
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
-#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+#define __PAGE_NONE (_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
+#define __PAGE_RO (_PAGE_PRESENT | _PAGE_READ | \
_PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
-#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+#define __PAGE_RX (_PAGE_PRESENT | _PAGE_READ | \
_PAGE_INVALID | _PAGE_PROTECT)
-#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define __PAGE_RW (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
-#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define __PAGE_RWX (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_INVALID | _PAGE_PROTECT)
-
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define __PAGE_SHARED (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
-#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define __PAGE_KERNEL (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
+#define __PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
_PAGE_PROTECT | _PAGE_NOEXEC)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_YOUNG | _PAGE_DIRTY)
-/*
- * On s390 the page table entry has an invalid bit and a read-only bit.
- * Read permission implies execute permission and write permission
- * implies read permission.
- */
- /*xwr*/
+extern unsigned long page_noexec_mask;
+
+#define __pgprot_page_mask(x) __pgprot((x) & page_noexec_mask)
+
+#define PAGE_NONE __pgprot_page_mask(__PAGE_NONE)
+#define PAGE_RO __pgprot_page_mask(__PAGE_RO)
+#define PAGE_RX __pgprot_page_mask(__PAGE_RX)
+#define PAGE_RW __pgprot_page_mask(__PAGE_RW)
+#define PAGE_RWX __pgprot_page_mask(__PAGE_RWX)
+#define PAGE_SHARED __pgprot_page_mask(__PAGE_SHARED)
+#define PAGE_KERNEL __pgprot_page_mask(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot_page_mask(__PAGE_KERNEL_RO)
/*
* Segment entry (large page) protection definitions.
*/
-#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
+#define __SEGMENT_NONE (_SEGMENT_ENTRY_PRESENT | \
+ _SEGMENT_ENTRY_INVALID | \
_SEGMENT_ENTRY_PROTECT)
-#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
+#define __SEGMENT_RO (_SEGMENT_ENTRY_PRESENT | \
+ _SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_NOEXEC)
-#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
+#define __SEGMENT_RX (_SEGMENT_ENTRY_PRESENT | \
+ _SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_READ)
-#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
+#define __SEGMENT_RW (_SEGMENT_ENTRY_PRESENT | \
+ _SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE | \
_SEGMENT_ENTRY_NOEXEC)
-#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
+#define __SEGMENT_RWX (_SEGMENT_ENTRY_PRESENT | \
+ _SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE)
-#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
+#define __SEGMENT_KERNEL (_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE | \
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_DIRTY | \
_SEGMENT_ENTRY_NOEXEC)
-#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
+#define __SEGMENT_KERNEL_RO (_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_NOEXEC)
-#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
- _SEGMENT_ENTRY_LARGE | \
- _SEGMENT_ENTRY_READ | \
- _SEGMENT_ENTRY_WRITE | \
- _SEGMENT_ENTRY_YOUNG | \
- _SEGMENT_ENTRY_DIRTY)
+
+extern unsigned long segment_noexec_mask;
+
+#define __pgprot_segment_mask(x) __pgprot((x) & segment_noexec_mask)
+
+#define SEGMENT_NONE __pgprot_segment_mask(__SEGMENT_NONE)
+#define SEGMENT_RO __pgprot_segment_mask(__SEGMENT_RO)
+#define SEGMENT_RX __pgprot_segment_mask(__SEGMENT_RX)
+#define SEGMENT_RW __pgprot_segment_mask(__SEGMENT_RW)
+#define SEGMENT_RWX __pgprot_segment_mask(__SEGMENT_RWX)
+#define SEGMENT_KERNEL __pgprot_segment_mask(__SEGMENT_KERNEL)
+#define SEGMENT_KERNEL_RO __pgprot_segment_mask(__SEGMENT_KERNEL_RO)
/*
* Region3 entry (large page) protection definitions.
*/
-#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
- _REGION3_ENTRY_LARGE | \
- _REGION3_ENTRY_READ | \
- _REGION3_ENTRY_WRITE | \
- _REGION3_ENTRY_YOUNG | \
+#define __REGION3_KERNEL (_REGION_ENTRY_TYPE_R3 | \
+ _REGION3_ENTRY_PRESENT | \
+ _REGION3_ENTRY_LARGE | \
+ _REGION3_ENTRY_READ | \
+ _REGION3_ENTRY_WRITE | \
+ _REGION3_ENTRY_YOUNG | \
_REGION3_ENTRY_DIRTY | \
_REGION_ENTRY_NOEXEC)
-#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
- _REGION3_ENTRY_LARGE | \
- _REGION3_ENTRY_READ | \
- _REGION3_ENTRY_YOUNG | \
- _REGION_ENTRY_PROTECT | \
- _REGION_ENTRY_NOEXEC)
-#define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \
- _REGION3_ENTRY_LARGE | \
- _REGION3_ENTRY_READ | \
- _REGION3_ENTRY_WRITE | \
- _REGION3_ENTRY_YOUNG | \
- _REGION3_ENTRY_DIRTY)
+#define __REGION3_KERNEL_RO (_REGION_ENTRY_TYPE_R3 | \
+ _REGION3_ENTRY_PRESENT | \
+ _REGION3_ENTRY_LARGE | \
+ _REGION3_ENTRY_READ | \
+ _REGION3_ENTRY_YOUNG | \
+ _REGION_ENTRY_PROTECT | \
+ _REGION_ENTRY_NOEXEC)
+
+extern unsigned long region_noexec_mask;
+
+#define __pgprot_region_mask(x) __pgprot((x) & region_noexec_mask)
+
+#define REGION3_KERNEL __pgprot_region_mask(__REGION3_KERNEL)
+#define REGION3_KERNEL_RO __pgprot_region_mask(__REGION3_KERNEL_RO)
static inline bool mm_p4d_folded(struct mm_struct *mm)
{
@@ -566,10 +623,20 @@ static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
}
/*
- * In the case that a guest uses storage keys
- * faults should no longer be backed by zero pages
+ * As soon as the guest uses storage keys or enables PV, we deduplicate all
+ * mapped shared zeropages and prevent new shared zeropages from getting
+ * mapped.
*/
-#define mm_forbids_zeropage mm_has_pgste
+#define mm_forbids_zeropage mm_forbids_zeropage
+static inline int mm_forbids_zeropage(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (!mm->context.allow_cow_sharing)
+ return 1;
+#endif
+ return 0;
+}
+
static inline int mm_uses_skeys(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -591,7 +658,15 @@ static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
: "cc");
}
-static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
+/**
+ * cspg() - Compare and Swap and Purge (CSPG)
+ * @ptr: Pointer to the value to be exchanged
+ * @old: The expected old value
+ * @new: The new value
+ *
+ * Return: True if compare and swap was successful, otherwise false.
+ */
+static inline bool cspg(unsigned long *ptr, unsigned long old, unsigned long new)
{
union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
@@ -601,6 +676,7 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new
: [r1] "+&d" (r1.pair), "+m" (*ptr)
: [address] "d" (address)
: "cc");
+ return old == r1.even;
}
#define CRDTE_DTT_PAGE 0x00UL
@@ -609,7 +685,18 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new
#define CRDTE_DTT_REGION2 0x18UL
#define CRDTE_DTT_REGION1 0x1cUL
-static inline void crdte(unsigned long old, unsigned long new,
+/**
+ * crdte() - Compare and Replace DAT Table Entry
+ * @old: The expected old value
+ * @new: The new value
+ * @table: Pointer to the value to be exchanged
+ * @dtt: Table type of the table to be exchanged
+ * @address: The address mapped by the entry to be replaced
+ * @asce: The ASCE of this entry
+ *
+ * Return: True if compare and replace was successful, otherwise false.
+ */
+static inline bool crdte(unsigned long old, unsigned long new,
unsigned long *table, unsigned long dtt,
unsigned long address, unsigned long asce)
{
@@ -620,6 +707,7 @@ static inline void crdte(unsigned long old, unsigned long new,
: [r1] "+&d" (r1.pair)
: [r2] "d" (r2.pair), [asce] "a" (asce)
: "memory", "cc");
+ return old == r1.even;
}
/*
@@ -695,7 +783,7 @@ static inline int pud_present(pud_t pud)
{
if (pud_folded(pud))
return 1;
- return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
+ return (pud_val(pud) & _REGION3_ENTRY_PRESENT) != 0;
}
static inline int pud_none(pud_t pud)
@@ -705,23 +793,28 @@ static inline int pud_none(pud_t pud)
return pud_val(pud) == _REGION3_ENTRY_EMPTY;
}
-#define pud_leaf pud_large
-static inline int pud_large(pud_t pud)
+#define pud_leaf pud_leaf
+static inline bool pud_leaf(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
return 0;
- return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
+ return (pud_present(pud) && (pud_val(pud) & _REGION3_ENTRY_LARGE) != 0);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_PRESENT) != 0;
}
-#define pmd_leaf pmd_large
-static inline int pmd_large(pmd_t pmd)
+#define pmd_leaf pmd_leaf
+static inline bool pmd_leaf(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
+ return (pmd_present(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0);
}
static inline int pmd_bad(pmd_t pmd)
{
- if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
return 1;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
@@ -730,7 +823,7 @@ static inline int pud_bad(pud_t pud)
{
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
- if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
+ if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
return 1;
if (type < _REGION_ENTRY_TYPE_R3)
return 0;
@@ -748,11 +841,6 @@ static inline int p4d_bad(p4d_t p4d)
return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
}
-static inline int pmd_present(pmd_t pmd)
-{
- return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
-}
-
static inline int pmd_none(pmd_t pmd)
{
return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
@@ -820,8 +908,8 @@ static inline int pte_protnone(pte_t pte)
static inline int pmd_protnone(pmd_t pmd)
{
- /* pmd_large(pmd) implies pmd_present(pmd) */
- return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+ /* pmd_leaf(pmd) implies pmd_present(pmd) */
+ return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
}
#endif
@@ -904,6 +992,7 @@ static inline int pte_unused(pte_t pte)
* young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
* must not be set.
*/
+#define pte_pgprot pte_pgprot
static inline pgprot_t pte_pgprot(pte_t pte)
{
unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
@@ -1149,7 +1238,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
/* At this point the reference through the mapping is still present */
if (mm_is_protected(mm) && pte_present(res))
- uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
+ uv_convert_from_secure_pte(res);
return res;
}
@@ -1167,7 +1256,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
/* At this point the reference through the mapping is still present */
if (mm_is_protected(vma->vm_mm) && pte_present(res))
- uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
+ uv_convert_from_secure_pte(res);
return res;
}
@@ -1199,14 +1288,14 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
* The notifier should have destroyed all protected vCPUs at this
* point, so the destroy should be successful.
*/
- if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK))
+ if (full && !uv_destroy_pte(res))
return res;
/*
* If something went wrong and the page could not be destroyed, or
* if this is not a mm teardown, the slower export is used as
* fallback instead.
*/
- uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
+ uv_convert_from_secure_pte(res);
return res;
}
@@ -1316,6 +1405,8 @@ pgprot_t pgprot_writecombine(pgprot_t prot);
#define pgprot_writethrough pgprot_writethrough
pgprot_t pgprot_writethrough(pgprot_t prot);
+#define PFN_PTE_SHIFT PAGE_SHIFT
+
/*
* Set multiple PTEs to consecutive pages with a single call. All PTEs
* are within the same folio, PMD and VMA.
@@ -1355,8 +1446,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
pte_t __pte;
__pte = __pte(physpage | pgprot_val(pgprot));
- if (!MACHINE_HAS_NX)
- __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
return pte_mkyoung(__pte);
}
@@ -1383,7 +1472,7 @@ static inline unsigned long pmd_deref(pmd_t pmd)
unsigned long origin_mask;
origin_mask = _SEGMENT_ENTRY_ORIGIN;
- if (pmd_large(pmd))
+ if (pmd_leaf(pmd))
origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
return (unsigned long)__va(pmd_val(pmd) & origin_mask);
}
@@ -1398,11 +1487,12 @@ static inline unsigned long pud_deref(pud_t pud)
unsigned long origin_mask;
origin_mask = _REGION_ENTRY_ORIGIN;
- if (pud_large(pud))
+ if (pud_leaf(pud))
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
return (unsigned long)__va(pud_val(pud) & origin_mask);
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
return __pa(pud_deref(pud)) >> PAGE_SHIFT;
@@ -1723,8 +1813,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
- if (!MACHINE_HAS_NX)
- entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
set_pmd(pmdp, entry);
}
@@ -1766,8 +1854,10 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+ pmd_t pmd;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
}
@@ -1794,7 +1884,7 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
static inline int pmd_trans_huge(pmd_t pmd)
{
- return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
+ return pmd_leaf(pmd);
}
#define has_transparent_hugepage has_transparent_hugepage
@@ -1854,6 +1944,53 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+/*
+ * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE)
+ * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste
+ * as invalid.
+ * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010
+ * | offset |Xtype |11TT|S0|
+ * |0000000000111111111122222222223333333333444444444455|555555|5566|66|
+ * |0123456789012345678901234567890123456789012345678901|234567|8901|23|
+ *
+ * Bits 0-51 store the offset.
+ * Bits 53-57 store the type.
+ * Bit 62 (S) is used for softdirty tracking.
+ * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT.
+ * Bit 52 (X) is unused.
+ */
+
+#define __SWP_OFFSET_MASK_RSTE ((1UL << 52) - 1)
+#define __SWP_OFFSET_SHIFT_RSTE 12
+#define __SWP_TYPE_MASK_RSTE ((1UL << 5) - 1)
+#define __SWP_TYPE_SHIFT_RSTE 6
+
+/*
+ * TT bits set to 0x00 == SEGMENT. For REGION3 entries, caller must add R3
+ * bits 0x01. See also __set_huge_pte_at().
+ */
+static inline unsigned long mk_swap_rste(unsigned long type, unsigned long offset)
+{
+ unsigned long rste;
+
+ rste = _RST_ENTRY_INVALID | _RST_ENTRY_COMM;
+ rste |= (offset & __SWP_OFFSET_MASK_RSTE) << __SWP_OFFSET_SHIFT_RSTE;
+ rste |= (type & __SWP_TYPE_MASK_RSTE) << __SWP_TYPE_SHIFT_RSTE;
+ return rste;
+}
+
+static inline unsigned long __swp_type_rste(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE;
+}
+
+static inline unsigned long __swp_offset_rste(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE;
+}
+
+#define __rste_to_swp_entry(rste) ((swp_entry_t) { rste })
+
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern void vmem_remove_mapping(unsigned long start, unsigned long size);
extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
@@ -1871,4 +2008,18 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define pmd_pgtable(pmd) \
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
+static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
+{
+ unsigned long *pgstes, res;
+
+ pgstes = pgt + _PAGE_ENTRIES;
+
+ res = (pgstes[0] & PGSTE_ST2_MASK) << 16;
+ res |= pgstes[1] & PGSTE_ST2_MASK;
+ res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16;
+ res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32;
+
+ return res;
+}
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
index 9e41a74fce9a..7ef3bbec98b0 100644
--- a/arch/s390/include/asm/physmem_info.h
+++ b/arch/s390/include/asm/physmem_info.h
@@ -9,6 +9,7 @@ enum physmem_info_source {
MEM_DETECT_NONE = 0,
MEM_DETECT_SCLP_STOR_INFO,
MEM_DETECT_DIAG260,
+ MEM_DETECT_DIAG500_STOR_LIMIT,
MEM_DETECT_SCLP_READ_INFO,
MEM_DETECT_BIN_SEARCH
};
@@ -25,7 +26,7 @@ enum reserved_range_type {
RR_AMODE31,
RR_IPLREPORT,
RR_CERT_COMP_LIST,
- RR_MEM_DETECT_EXTENDED,
+ RR_MEM_DETECT_EXT,
RR_VMEM,
RR_MAX
};
@@ -107,6 +108,8 @@ static inline const char *get_physmem_info_source(void)
return "sclp storage info";
case MEM_DETECT_DIAG260:
return "diag260";
+ case MEM_DETECT_DIAG500_STOR_LIMIT:
+ return "diag500 storage limit";
case MEM_DETECT_SCLP_READ_INFO:
return "sclp read info";
case MEM_DETECT_BIN_SEARCH:
@@ -125,7 +128,7 @@ static inline const char *get_rr_type_name(enum reserved_range_type t)
RR_TYPE_NAME(AMODE31);
RR_TYPE_NAME(IPLREPORT);
RR_TYPE_NAME(CERT_COMP_LIST);
- RR_TYPE_NAME(MEM_DETECT_EXTENDED);
+ RR_TYPE_NAME(MEM_DETECT_EXT);
RR_TYPE_NAME(VMEM);
default:
return "UNKNOWN";
@@ -169,4 +172,7 @@ static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
return *size;
}
+#define AMODE31_START (physmem_info.reserved[RR_AMODE31].start)
+#define AMODE31_END (physmem_info.reserved[RR_AMODE31].end)
+
#endif
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
index 47d80a7451a6..5dca1a46a9f6 100644
--- a/arch/s390/include/asm/pkey.h
+++ b/arch/s390/include/asm/pkey.h
@@ -22,7 +22,7 @@
* @param protkey pointer to buffer receiving the protected key
* @return 0 on success, negative errno value on failure
*/
-int pkey_keyblob2pkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+int pkey_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index bf15da0fedbc..6ccd033acfe5 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -5,46 +5,62 @@
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+#include <asm/cmpxchg.h>
+#include <asm/march.h>
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
+
+/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
-static inline int preempt_count(void)
+/*
+ * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
+ * that think a non-zero value indicates we cannot preempt.
+ */
+static __always_inline int preempt_count(void)
{
- return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
+ return READ_ONCE(get_lowcore()->preempt_count) & ~PREEMPT_NEED_RESCHED;
}
-static inline void preempt_count_set(int pc)
+static __always_inline void preempt_count_set(int pc)
{
int old, new;
+ old = READ_ONCE(get_lowcore()->preempt_count);
do {
- old = READ_ONCE(S390_lowcore.preempt_count);
- new = (old & PREEMPT_NEED_RESCHED) |
- (pc & ~PREEMPT_NEED_RESCHED);
- } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
- old, new) != old);
+ new = (old & PREEMPT_NEED_RESCHED) | (pc & ~PREEMPT_NEED_RESCHED);
+ } while (!arch_try_cmpxchg(&get_lowcore()->preempt_count, &old, new));
}
-static inline void set_preempt_need_resched(void)
+/*
+ * We fold the NEED_RESCHED bit into the preempt count such that
+ * preempt_enable() can decrement and test for needing to reschedule with a
+ * short instruction sequence.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know we both
+ * need to resched (the bit is cleared) and can resched (no preempt count).
+ */
+
+static __always_inline void set_preempt_need_resched(void)
{
- __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+ __atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
}
-static inline void clear_preempt_need_resched(void)
+static __always_inline void clear_preempt_need_resched(void)
{
- __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+ __atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
}
-static inline bool test_preempt_need_resched(void)
+static __always_inline bool test_preempt_need_resched(void)
{
- return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
+ return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED);
}
-static inline void __preempt_count_add(int val)
+static __always_inline void __preempt_count_add(int val)
{
/*
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
@@ -52,88 +68,59 @@ static inline void __preempt_count_add(int val)
*/
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
- __atomic_add_const(val, &S390_lowcore.preempt_count);
+ __atomic_add_const(val, &get_lowcore()->preempt_count);
return;
}
}
- __atomic_add(val, &S390_lowcore.preempt_count);
+ __atomic_add(val, &get_lowcore()->preempt_count);
}
-static inline void __preempt_count_sub(int val)
+static __always_inline void __preempt_count_sub(int val)
{
__preempt_count_add(-val);
}
-static inline bool __preempt_count_dec_and_test(void)
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+static __always_inline bool __preempt_count_dec_and_test(void)
{
- return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
+ return __atomic_add_const_and_test(-1, &get_lowcore()->preempt_count);
}
-static inline bool should_resched(int preempt_offset)
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
- preempt_offset);
+ return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset);
}
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define PREEMPT_ENABLED (0)
-
-static inline int preempt_count(void)
-{
- return READ_ONCE(S390_lowcore.preempt_count);
-}
-
-static inline void preempt_count_set(int pc)
-{
- S390_lowcore.preempt_count = pc;
-}
-
-static inline void set_preempt_need_resched(void)
-{
-}
-
-static inline void clear_preempt_need_resched(void)
-{
-}
+#define init_task_preempt_count(p) do { } while (0)
+/* Deferred to CPU bringup time */
+#define init_idle_preempt_count(p, cpu) do { } while (0)
-static inline bool test_preempt_need_resched(void)
-{
- return false;
-}
+#ifdef CONFIG_PREEMPTION
-static inline void __preempt_count_add(int val)
-{
- S390_lowcore.preempt_count += val;
-}
+void preempt_schedule(void);
+void preempt_schedule_notrace(void);
-static inline void __preempt_count_sub(int val)
-{
- S390_lowcore.preempt_count -= val;
-}
+#ifdef CONFIG_PREEMPT_DYNAMIC
-static inline bool __preempt_count_dec_and_test(void)
-{
- return !--S390_lowcore.preempt_count && tif_need_resched();
-}
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule() dynamic_preempt_schedule()
+#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
-static inline bool should_resched(int preempt_offset)
-{
- return unlikely(preempt_count() == preempt_offset &&
- tif_need_resched());
-}
+#else /* CONFIG_PREEMPT_DYNAMIC */
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+#define __preempt_schedule() preempt_schedule()
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
-#define init_task_preempt_count(p) do { } while (0)
-/* Deferred to CPU bringup time */
-#define init_idle_preempt_count(p, cpu) do { } while (0)
+#endif /* CONFIG_PREEMPT_DYNAMIC */
-#ifdef CONFIG_PREEMPTION
-extern void preempt_schedule(void);
-#define __preempt_schedule() preempt_schedule()
-extern void preempt_schedule_notrace(void);
-#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c0b6e74d899a..4f8d5592c298 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -15,13 +15,11 @@
#include <linux/bits.h>
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
-#define CIF_FPU 3 /* restore FPU registers */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
-#define _CIF_FPU BIT(CIF_FPU)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
@@ -33,30 +31,48 @@
#include <linux/cpumask.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <asm/fpu-types.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/runtime_instr.h>
-#include <asm/fpu/types.h>
-#include <asm/fpu/internal.h>
#include <asm/irqflags.h>
+#include <asm/alternative.h>
+#include <asm/fault.h>
+
+struct pcpu {
+ unsigned long ec_mask; /* bit mask for ec_xxx functions */
+ unsigned long ec_clk; /* sigp timestamp for ec_xxx */
+ unsigned long flags; /* per CPU flags */
+ unsigned long capacity; /* cpu capacity for scheduler */
+ signed char state; /* physical cpu state */
+ signed char polarization; /* physical polarization */
+ u16 address; /* physical cpu address */
+};
+
+DECLARE_PER_CPU(struct pcpu, pcpu_devices);
typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
+static __always_inline struct pcpu *this_pcpu(void)
+{
+ return (struct pcpu *)(get_lowcore()->pcpu);
+}
+
static __always_inline void set_cpu_flag(int flag)
{
- S390_lowcore.cpu_flags |= (1UL << flag);
+ this_pcpu()->flags |= (1UL << flag);
}
static __always_inline void clear_cpu_flag(int flag)
{
- S390_lowcore.cpu_flags &= ~(1UL << flag);
+ this_pcpu()->flags &= ~(1UL << flag);
}
static __always_inline bool test_cpu_flag(int flag)
{
- return S390_lowcore.cpu_flags & (1UL << flag);
+ return this_pcpu()->flags & (1UL << flag);
}
static __always_inline bool test_and_set_cpu_flag(int flag)
@@ -81,9 +97,7 @@ static __always_inline bool test_and_clear_cpu_flag(int flag)
*/
static __always_inline bool test_cpu_flag_of(int flag, int cpu)
{
- struct lowcore *lc = lowcore_ptr[cpu];
-
- return lc->cpu_flags & (1UL << flag);
+ return per_cpu(pcpu_devices, cpu).flags & (1UL << flag);
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
@@ -93,12 +107,21 @@ static inline void get_cpu_id(struct cpuid *ptr)
asm volatile("stidp %0" : "=Q" (*ptr));
}
+static __always_inline unsigned long get_cpu_timer(void)
+{
+ unsigned long timer;
+
+ asm volatile("stpt %[timer]" : [timer] "=Q" (timer));
+ return timer;
+}
+
void s390_adjust_jiffies(void);
void s390_update_cpu_mhz(void);
void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
+unsigned long vdso_text_size(void);
unsigned long vdso_size(void);
/*
@@ -140,8 +163,7 @@ static __always_inline void __stackleak_poison(unsigned long erase_low,
" la %[addr],256(%[addr])\n"
" brctg %[tmp],0b\n"
"1: stg %[poison],0(%[addr])\n"
- " larl %[tmp],3f\n"
- " ex %[count],0(%[tmp])\n"
+ " exrl %[count],3f\n"
" j 4f\n"
"2: stg %[poison],0(%[addr])\n"
" j 4f\n"
@@ -165,10 +187,10 @@ struct thread_struct {
unsigned long hardirq_timer; /* task cputime in hardirq context */
unsigned long softirq_timer; /* task cputime in softirq context */
const sys_call_ptr_t *sys_call_table; /* system call table address */
- unsigned long gmap_addr; /* address of last gmap fault. */
- unsigned int gmap_write_flag; /* gmap fault write indication */
+ union teid gmap_teid; /* address and flags of last gmap fault */
unsigned int gmap_int_code; /* int code of last gmap fault */
- unsigned int gmap_pfault; /* signal of a pending guest pfault */
+ int ufpu_flags; /* user fpu flags */
+ int kfpu_flags; /* kernel fpu flags */
/* Per-thread information related to debugging */
struct per_regs per_user; /* User specified PER registers */
@@ -184,7 +206,8 @@ struct thread_struct {
struct gs_cb *gs_cb; /* Current guarded storage cb */
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
- struct fpu fpu; /* FP and VX register save area */
+ struct fpu ufpu; /* User FP and VX register save area */
+ struct fpu kfpu; /* Kernel FP and VX register save area */
};
/* Flag to disable transactions. */
@@ -203,7 +226,6 @@ typedef struct thread_struct thread_struct;
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
- .fpu.regs = (void *) init_task.thread.fpu.fprs, \
.last_break = 1, \
}
@@ -258,7 +280,7 @@ static __always_inline unsigned long __current_stack_pointer(void)
static __always_inline bool on_thread_stack(void)
{
- unsigned long ksp = S390_lowcore.kernel_stack;
+ unsigned long ksp = get_lowcore()->kernel_stack;
return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
@@ -303,8 +325,8 @@ static inline void __load_psw(psw_t psw)
*/
static __always_inline void __load_psw_mask(unsigned long mask)
{
+ psw_t psw __uninitialized;
unsigned long addr;
- psw_t psw;
psw.mask = mask;
@@ -392,6 +414,11 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
return arch_irqs_disabled_flags(regs->psw.mask);
}
+static __always_inline void bpon(void)
+{
+ asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/ptdump.h b/arch/s390/include/asm/ptdump.h
deleted file mode 100644
index f960b2896606..000000000000
--- a/arch/s390/include/asm/ptdump.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _ASM_S390_PTDUMP_H
-#define _ASM_S390_PTDUMP_H
-
-void ptdump_check_wx(void);
-
-static inline void debug_checkwx(void)
-{
- if (IS_ENABLED(CONFIG_DEBUG_WX))
- ptdump_check_wx();
-}
-
-#endif /* _ASM_S390_PTDUMP_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index d28bf8fb2799..788bc4467445 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -203,6 +203,10 @@ static inline int test_and_clear_pt_regs_flag(struct pt_regs *regs, int flag)
return ret;
}
+struct task_struct;
+
+void update_cr_regs(struct task_struct *task);
+
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 2f983e0b95e0..69c4ead0c332 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -9,8 +9,9 @@
#define __QDIO_H__
#include <linux/interrupt.h>
-#include <asm/cio.h>
+#include <asm/dma-types.h>
#include <asm/ccwdev.h>
+#include <asm/cio.h>
/* only use 4 queues to save some cachelines */
#define QDIO_MAX_QUEUES_PER_IRQ 4
@@ -34,9 +35,9 @@
* @dkey: access key for SLSB
*/
struct qdesfmt0 {
- u64 sliba;
- u64 sla;
- u64 slsba;
+ dma64_t sliba;
+ dma64_t sla;
+ dma64_t slsba;
u32 : 32;
u32 akey : 4;
u32 bkey : 4;
@@ -74,7 +75,7 @@ struct qdr {
/* private: */
u32 res[9];
/* public: */
- u64 qiba;
+ dma64_t qiba;
u32 : 32;
u32 qkey : 4;
u32 : 28;
@@ -146,7 +147,7 @@ struct qaob {
u8 flags;
u16 cbtbs;
u8 sb_count;
- u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ dma64_t sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
u64 user0;
u64 res4[2];
@@ -208,7 +209,7 @@ struct qdio_buffer_element {
u8 scount;
u8 sflags;
u32 length;
- u64 addr;
+ dma64_t addr;
} __attribute__ ((packed, aligned(16)));
/**
@@ -224,7 +225,7 @@ struct qdio_buffer {
* @sbal: absolute SBAL address
*/
struct sl_element {
- u64 sbal;
+ dma64_t sbal;
} __attribute__ ((packed));
/**
diff --git a/arch/s390/include/asm/runtime-const.h b/arch/s390/include/asm/runtime-const.h
new file mode 100644
index 000000000000..17878b1d048c
--- /dev/null
+++ b/arch/s390/include/asm/runtime-const.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_RUNTIME_CONST_H
+#define _ASM_S390_RUNTIME_CONST_H
+
+#include <linux/uaccess.h>
+
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret; \
+ \
+ asm_inline( \
+ "0: iihf %[__ret],%[c1]\n" \
+ " iilf %[__ret],%[c2]\n" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n" \
+ ".long 0b - .\n" \
+ ".popsection" \
+ : [__ret] "=d" (__ret) \
+ : [c1] "i" (0x01234567UL), \
+ [c2] "i" (0x89abcdefUL)); \
+ __ret; \
+})
+
+#define runtime_const_shift_right_32(val, sym) \
+({ \
+ unsigned int __ret = (val); \
+ \
+ asm_inline( \
+ "0: srl %[__ret],12\n" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n" \
+ ".long 0b - .\n" \
+ ".popsection" \
+ : [__ret] "+d" (__ret)); \
+ __ret; \
+})
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+/* 32-bit immediate for iihf and iilf in bits in I2 field */
+static inline void __runtime_fixup_32(u32 *p, unsigned int val)
+{
+ s390_kernel_write(p, &val, sizeof(val));
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+ __runtime_fixup_32(where + 2, val >> 32);
+ __runtime_fixup_32(where + 8, val);
+}
+
+/* Immediate value is lower 12 bits of D2 field of srl */
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ u32 insn = *(u32 *)where;
+
+ insn &= 0xfffff000;
+ insn |= (val & 63);
+ s390_kernel_write(where, &insn, sizeof(insn));
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif /* _ASM_S390_RUNTIME_CONST_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 5742d23bba13..18f37dff03c9 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -16,6 +16,11 @@
/* 24 + 16 * SCLP_MAX_CORES */
#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
+#define SCLP_ERRNOTIFY_AQ_RESET 0
+#define SCLP_ERRNOTIFY_AQ_REPAIR 1
+#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
+#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3
+
#ifndef __ASSEMBLY__
#include <linux/uio.h>
#include <asm/chpid.h>
@@ -72,6 +77,7 @@ struct sclp_info {
unsigned char has_core_type : 1;
unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
+ unsigned char has_wti : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned char has_64bscao : 1;
@@ -84,9 +90,12 @@ struct sclp_info {
unsigned char has_ibs : 1;
unsigned char has_skey : 1;
unsigned char has_kss : 1;
+ unsigned char has_diag204_bif : 1;
unsigned char has_gisaf : 1;
+ unsigned char has_diag310 : 1;
unsigned char has_diag318 : 1;
unsigned char has_diag320 : 1;
+ unsigned char has_diag324 : 1;
unsigned char has_sipl : 1;
unsigned char has_sipl_eckd : 1;
unsigned char has_dirq : 1;
@@ -109,6 +118,34 @@ struct sclp_info {
};
extern struct sclp_info sclp;
+struct sccb_header {
+ u16 length;
+ u8 function_code;
+ u8 control_mask[3];
+ u16 response_code;
+} __packed;
+
+struct evbuf_header {
+ u16 length;
+ u8 type;
+ u8 flags;
+ u16 _reserved;
+} __packed;
+
+struct err_notify_evbuf {
+ struct evbuf_header header;
+ u8 action;
+ u8 atype;
+ u32 fh;
+ u32 fid;
+ u8 data[];
+} __packed;
+
+struct err_notify_sccb {
+ struct sccb_header header;
+ struct err_notify_evbuf evbuf;
+} __packed;
+
struct zpci_report_error_header {
u8 version; /* Interface version byte */
u8 action; /* Action qualifier byte
@@ -135,6 +172,7 @@ void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len);
void sclp_emergency_printk(const char *s);
+int sclp_init(void);
int sclp_early_get_memsize(unsigned long *mem);
int sclp_early_get_hsa_size(unsigned long *hsa_size);
int _sclp_get_core_info(struct sclp_core_info *info);
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 322bdcd4b616..56003e26cdbf 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/css_chars.h>
+#include <asm/dma-types.h>
#include <asm/cio.h>
/**
@@ -53,7 +54,7 @@ struct cmd_scsw {
__u32 fctl : 3;
__u32 actl : 7;
__u32 stctl : 5;
- __u32 cpa;
+ dma32_t cpa;
__u32 dstat : 8;
__u32 cstat : 8;
__u32 count : 16;
@@ -93,7 +94,7 @@ struct tm_scsw {
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
- u32 tcw;
+ dma32_t tcw;
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
@@ -125,7 +126,7 @@ struct eadm_scsw {
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
- u32 aob;
+ dma32_t aob;
u32 dstat:8;
u32 cstat:8;
u32:16;
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index 06fbabe2f66c..94092f4ae764 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -62,5 +62,7 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+bool kernel_page_present(struct page *page);
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 03bcaa8effb2..70b920b32827 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -34,6 +34,7 @@
#define MACHINE_FLAG_SCC BIT(17)
#define MACHINE_FLAG_PCI_MIO BIT(18)
#define MACHINE_FLAG_RDP BIT(19)
+#define MACHINE_FLAG_SEQ_INSN BIT(20)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
@@ -77,24 +78,25 @@ extern unsigned long max_mappable;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
-#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
-#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
-#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
-
-#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
-#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
-#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
-#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
-#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
-#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
-#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
-#define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST)
-#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
-#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
-#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
-#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
-#define MACHINE_HAS_RDP (S390_lowcore.machine_flags & MACHINE_FLAG_RDP)
+#define MACHINE_IS_VM (get_lowcore()->machine_flags & MACHINE_FLAG_VM)
+#define MACHINE_IS_KVM (get_lowcore()->machine_flags & MACHINE_FLAG_KVM)
+#define MACHINE_IS_LPAR (get_lowcore()->machine_flags & MACHINE_FLAG_LPAR)
+
+#define MACHINE_HAS_DIAG9C (get_lowcore()->machine_flags & MACHINE_FLAG_DIAG9C)
+#define MACHINE_HAS_ESOP (get_lowcore()->machine_flags & MACHINE_FLAG_ESOP)
+#define MACHINE_HAS_IDTE (get_lowcore()->machine_flags & MACHINE_FLAG_IDTE)
+#define MACHINE_HAS_EDAT1 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT1)
+#define MACHINE_HAS_EDAT2 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT2)
+#define MACHINE_HAS_TOPOLOGY (get_lowcore()->machine_flags & MACHINE_FLAG_TOPOLOGY)
+#define MACHINE_HAS_TE (get_lowcore()->machine_flags & MACHINE_FLAG_TE)
+#define MACHINE_HAS_TLB_LC (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_LC)
+#define MACHINE_HAS_TLB_GUEST (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_GUEST)
+#define MACHINE_HAS_NX (get_lowcore()->machine_flags & MACHINE_FLAG_NX)
+#define MACHINE_HAS_GS (get_lowcore()->machine_flags & MACHINE_FLAG_GS)
+#define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
+#define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
+#define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
+#define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN)
/*
* Console mode. Override with conmode=
@@ -115,6 +117,8 @@ extern unsigned int console_irq;
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
+void register_early_console(void);
+
#ifdef CONFIG_VMCP
void vmcp_cma_reserve(void);
#else
@@ -127,20 +131,6 @@ extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
-extern unsigned long __kaslr_offset;
-static inline unsigned long kaslr_offset(void)
-{
- return __kaslr_offset;
-}
-
-extern int __kaslr_enabled;
-static inline int kaslr_enabled(void)
-{
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return __kaslr_enabled;
- return 0;
-}
-
struct oldmem_data {
unsigned long start;
unsigned long size;
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index edee63da08e7..472943b77066 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -38,6 +38,8 @@
#ifndef __ASSEMBLY__
+#include <asm/asm.h>
+
static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
u32 *status)
{
@@ -46,13 +48,12 @@ static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
asm volatile(
" sigp %[r1],%[addr],0(%[order])\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (cc), [r1] "+&d" (r1.pair)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [r1] "+d" (r1.pair)
: [addr] "d" (addr), [order] "a" (order)
- : "cc");
+ : CC_CLOBBER);
*status = r1.even;
- return cc;
+ return CC_TRANSFORM(cc);
}
static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 6e5b1b4b19a9..7feca96c48c6 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -11,7 +11,8 @@
#include <asm/lowcore.h>
#include <asm/processor.h>
-#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
+#define raw_smp_processor_id() (get_lowcore()->cpu_nr)
+#define arch_scale_cpu_capacity smp_cpu_get_capacity
extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift;
@@ -24,7 +25,6 @@ extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-extern void smp_call_online_cpu(void (*func)(void *), void *);
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
extern void smp_emergency_stop(void);
@@ -35,6 +35,9 @@ extern void smp_save_dump_secondary_cpus(void);
extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern void smp_cpu_set_capacity(int cpu, unsigned long val);
+extern void smp_set_core_capacity(int cpu, unsigned long val);
+extern unsigned long smp_cpu_get_capacity(int cpu);
extern int smp_cpu_get_cpu_address(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void);
@@ -59,7 +62,7 @@ static inline void smp_cpus_done(unsigned int max_cpus)
{
}
-extern int smp_rescan_cpus(void);
+extern int smp_rescan_cpus(bool early);
extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
extern int __cpu_disable(void);
diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h
index 1ac5115d3115..42d61296bbad 100644
--- a/arch/s390/include/asm/softirq_stack.h
+++ b/arch/s390/include/asm/softirq_stack.h
@@ -8,7 +8,7 @@
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static inline void do_softirq_own_stack(void)
{
- call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
+ call_on_stack(0, get_lowcore()->async_stack, void, __do_softirq);
}
#endif
#endif /* __ASM_S390_SOFTIRQ_STACK_H */
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index c549893602ea..668dfc5de538 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -2,7 +2,23 @@
#ifndef _ASM_S390_SPARSEMEM_H
#define _ASM_S390_SPARSEMEM_H
-#define SECTION_SIZE_BITS 28
+#define SECTION_SIZE_BITS 27
#define MAX_PHYSMEM_BITS CONFIG_MAX_PHYSMEM_BITS
+#ifdef CONFIG_NUMA
+
+static inline int memory_add_physaddr_to_nid(u64 addr)
+{
+ return 0;
+}
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+
+static inline int phys_to_target_node(u64 start)
+{
+ return 0;
+}
+#define phys_to_target_node phys_to_target_node
+
+#endif /* CONFIG_NUMA */
+
#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 37127cd7749e..f87dd0a84855 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -16,7 +16,7 @@
#include <asm/processor.h>
#include <asm/alternative.h>
-#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
+#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
extern int spin_retry;
@@ -57,8 +57,10 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
+ int old = 0;
+
barrier();
- return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
+ return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -79,10 +81,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
typecheck(int, lp->lock);
kcsan_release();
asm_inline volatile(
- ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */
- " sth %1,%0\n"
- : "=R" (((unsigned short *) &lp->lock)[1])
- : "d" (0) : "cc", "memory");
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */
+ " mvhhi %[lock],0\n"
+ : [lock] "=Q" (((unsigned short *)&lp->lock)[1])
+ :
+ : "memory");
}
/*
@@ -118,7 +121,9 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
+ int old = 0;
+
+ if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000))
arch_write_lock_wait(rw);
}
@@ -133,8 +138,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
int old;
old = READ_ONCE(rw->cnts);
- return (!(old & 0xffff0000) &&
- __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
+ return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1));
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -142,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
int old;
old = READ_ONCE(rw->cnts);
- return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
+ return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000);
}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index b69695e39957..3653ff57d6d9 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -3,7 +3,7 @@
#define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
typedef struct {
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 31ec4f545e03..1d5ca13dc90f 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -2,9 +2,9 @@
#ifndef _ASM_S390_STACKTRACE_H
#define _ASM_S390_STACKTRACE_H
+#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
-#include <asm/switch_to.h>
struct stack_frame_user {
unsigned long back_chain;
@@ -13,6 +13,17 @@ struct stack_frame_user {
unsigned long empty2[4];
};
+struct stack_frame_vdso_wrapper {
+ struct stack_frame_user sf;
+ unsigned long return_address;
+};
+
+struct perf_callchain_entry_ctx;
+
+void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry,
+ const struct pt_regs *regs, bool perf);
+
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
@@ -54,6 +65,7 @@ struct stack_frame {
unsigned long sie_reason;
unsigned long sie_flags;
unsigned long sie_control_block_phys;
+ unsigned long sie_guest_asce;
};
};
unsigned long gprs[10];
diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h
index 4d74d7e33340..827cb208de86 100644
--- a/arch/s390/include/asm/stp.h
+++ b/arch/s390/include/asm/stp.h
@@ -94,5 +94,6 @@ struct stp_stzi {
int stp_sync_check(void);
int stp_island_check(void);
void stp_queue_work(void);
+bool stp_enabled(void);
#endif /* __S390_STP_H */
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 351685de53d2..2ab868cbae6c 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -15,15 +15,12 @@
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
-#define __HAVE_ARCH_MEMSET16 /* arch function */
-#define __HAVE_ARCH_MEMSET32 /* arch function */
-#define __HAVE_ARCH_MEMSET64 /* arch function */
void *memcpy(void *dest, const void *src, size_t n);
void *memset(void *s, int c, size_t n);
void *memmove(void *dest, const void *src, size_t n);
-#ifndef CONFIG_KASAN
+#if !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN)
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
#define __HAVE_ARCH_MEMCMP /* arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
@@ -36,6 +33,9 @@ void *memmove(void *dest, const void *src, size_t n);
#define __HAVE_ARCH_STRNCPY /* arch function */
#define __HAVE_ARCH_STRNLEN /* inline & arch function */
#define __HAVE_ARCH_STRSTR /* arch function */
+#define __HAVE_ARCH_MEMSET16 /* arch function */
+#define __HAVE_ARCH_MEMSET32 /* arch function */
+#define __HAVE_ARCH_MEMSET64 /* arch function */
/* Prototypes for non-inlined arch strings functions. */
int memcmp(const void *s1, const void *s2, size_t n);
@@ -44,7 +44,7 @@ size_t strlcat(char *dest, const char *src, size_t n);
char *strncat(char *dest, const char *src, size_t n);
char *strncpy(char *dest, const char *src, size_t n);
char *strstr(const char *s1, const char *s2);
-#endif /* !CONFIG_KASAN */
+#endif /* !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN) */
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
@@ -74,20 +74,30 @@ void *__memset16(uint16_t *s, uint16_t v, size_t count);
void *__memset32(uint32_t *s, uint32_t v, size_t count);
void *__memset64(uint64_t *s, uint64_t v, size_t count);
+#ifdef __HAVE_ARCH_MEMSET16
static inline void *memset16(uint16_t *s, uint16_t v, size_t count)
{
return __memset16(s, v, count * sizeof(v));
}
+#endif
+#ifdef __HAVE_ARCH_MEMSET32
static inline void *memset32(uint32_t *s, uint32_t v, size_t count)
{
return __memset32(s, v, count * sizeof(v));
}
+#endif
+#ifdef __HAVE_ARCH_MEMSET64
+#ifdef IN_BOOT_STRING_C
+void *memset64(uint64_t *s, uint64_t v, size_t count);
+#else
static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
{
return __memset64(s, v, count * sizeof(v));
}
+#endif
+#endif
#if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY))
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
deleted file mode 100644
index c61b2cc1a8a8..000000000000
--- a/arch/s390/include/asm/switch_to.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 1999, 2009
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#ifndef __ASM_SWITCH_TO_H
-#define __ASM_SWITCH_TO_H
-
-#include <linux/thread_info.h>
-#include <asm/fpu/api.h>
-#include <asm/ptrace.h>
-#include <asm/guarded_storage.h>
-
-extern struct task_struct *__switch_to(void *, void *);
-extern void update_cr_regs(struct task_struct *task);
-
-static inline void save_access_regs(unsigned int *acrs)
-{
- typedef struct { int _[NUM_ACRS]; } acrstype;
-
- asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
-}
-
-static inline void restore_access_regs(unsigned int *acrs)
-{
- typedef struct { int _[NUM_ACRS]; } acrstype;
-
- asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
-}
-
-#define switch_to(prev, next, last) do { \
- /* save_fpu_regs() sets the CIF_FPU flag, which enforces \
- * a restore of the floating point / vector registers as \
- * soon as the next task returns to user space \
- */ \
- save_fpu_regs(); \
- save_access_regs(&prev->thread.acrs[0]); \
- save_ri_cb(prev->thread.ri_cb); \
- save_gs_cb(prev->thread.gs_cb); \
- update_cr_regs(next); \
- restore_access_regs(&next->thread.acrs[0]); \
- restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
- restore_gs_cb(next->thread.gs_cb); \
- prev = __switch_to(prev, next); \
-} while (0)
-
-#endif /* __ASM_SWITCH_TO_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a674c7d25da5..c33f7144d1b9 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -16,7 +16,7 @@
/*
* General size of kernel stacks
*/
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) || defined(CONFIG_KMSAN)
#define THREAD_SIZE_ORDER 4
#else
#define THREAD_SIZE_ORDER 2
@@ -40,6 +40,7 @@ struct thread_info {
unsigned long flags; /* low level flags */
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
unsigned int cpu; /* current CPU */
+ unsigned char sie; /* running in SIE context */
};
/*
@@ -60,44 +61,45 @@ void arch_setup_new_exec(void);
/*
* thread information flags bit numbers
*/
-/* _TIF_WORK bits */
#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_UPROBE 3 /* breakpointed or single-stepping */
-#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
+#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */
+#define TIF_UPROBE 4 /* breakpointed or single-stepping */
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
+#define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
-
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
#define TIF_SINGLE_STEP 19 /* This task is single stepped */
#define TIF_BLOCK_STEP 20 /* This task is block stepped */
#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
-
-/* _TIF_TRACE bits */
#define TIF_SYSCALL_TRACE 24 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */
#define TIF_SECCOMP 26 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */
#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
-#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
+#define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY)
#define _TIF_UPROBE BIT(TIF_UPROBE)
-#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
+#define _TIF_PGSTE BIT(TIF_PGSTE)
+#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
+#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
-
#define _TIF_31BIT BIT(TIF_31BIT)
+#define _TIF_MEMDIE BIT(TIF_MEMDIE)
+#define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
#define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP)
-
+#define _TIF_BLOCK_STEP BIT(TIF_BLOCK_STEP)
+#define _TIF_UPROBE_SINGLESTEP BIT(TIF_UPROBE_SINGLESTEP)
#define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP BIT(TIF_SECCOMP)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 4d646659a5f5..a9460bd6555b 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -13,6 +13,7 @@
#include <linux/preempt.h>
#include <linux/time64.h>
#include <asm/lowcore.h>
+#include <asm/asm.h>
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -44,11 +45,12 @@ static inline int set_tod_clock(__u64 time)
int cc;
asm volatile(
- " sck %1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc) : "Q" (time) : "cc");
- return cc;
+ " sck %[time]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
+ : [time] "Q" (time)
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
static inline int store_tod_clock_ext_cc(union tod_clock *clk)
@@ -56,11 +58,12 @@ static inline int store_tod_clock_ext_cc(union tod_clock *clk)
int cc;
asm volatile(
- " stcke %1\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (cc), "=Q" (*clk) : : "cc");
- return cc;
+ " stcke %[clk]\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [clk] "=Q" (*clk)
+ :
+ : CC_CLOBBER);
+ return CC_TRANSFORM(cc);
}
static __always_inline void store_tod_clock_ext(union tod_clock *tod)
@@ -93,6 +96,7 @@ extern unsigned char ptff_function_mask[16];
#define PTFF_QAF 0x00 /* query available functions */
#define PTFF_QTO 0x01 /* query tod offset */
#define PTFF_QSI 0x02 /* query steering information */
+#define PTFF_QPT 0x03 /* query physical clock */
#define PTFF_QUI 0x04 /* query UTC information */
#define PTFF_ATO 0x40 /* adjust tod offset */
#define PTFF_STO 0x41 /* set tod offset */
@@ -149,28 +153,27 @@ struct ptff_qui {
" lgr 0,%[reg0]\n" \
" lgr 1,%[reg1]\n" \
" ptff\n" \
- " ipm %[rc]\n" \
- " srl %[rc],28\n" \
- : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \
+ CC_IPM(rc) \
+ : CC_OUT(rc, rc), "+m" (*(struct addrtype *)reg1) \
: [reg0] "d" (reg0), [reg1] "d" (reg1) \
- : "cc", "0", "1"); \
- rc; \
+ : CC_CLOBBER_LIST("0", "1")); \
+ CC_TRANSFORM(rc); \
})
static inline unsigned long local_tick_disable(void)
{
unsigned long old;
- old = S390_lowcore.clock_comparator;
- S390_lowcore.clock_comparator = clock_comparator_max;
- set_clock_comparator(S390_lowcore.clock_comparator);
+ old = get_lowcore()->clock_comparator;
+ get_lowcore()->clock_comparator = clock_comparator_max;
+ set_clock_comparator(get_lowcore()->clock_comparator);
return old;
}
static inline void local_tick_enable(unsigned long comp)
{
- S390_lowcore.clock_comparator = comp;
- set_clock_comparator(S390_lowcore.clock_comparator);
+ get_lowcore()->clock_comparator = comp;
+ set_clock_comparator(get_lowcore()->clock_comparator);
}
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
@@ -250,6 +253,11 @@ static __always_inline unsigned long tod_to_ns(unsigned long todval)
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
}
+static __always_inline u128 eitod_to_ns(u128 todval)
+{
+ return (todval * 125) >> 9;
+}
+
/**
* tod_after - compare two 64 bit TOD values
* @a: first 64 bit TOD timestamp
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index d1455a601adc..72655fd2d867 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,11 +22,11 @@
* Pages used for the page tables is a different story. FIXME: more
*/
-void __tlb_remove_table(void *_table);
static inline void tlb_flush(struct mmu_gather *tlb);
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct encoded_page *page,
- int page_size);
+ struct page *page, bool delay_rmap, int page_size);
+static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
+ struct page *page, unsigned int nr_pages, bool delay_rmap);
#define tlb_flush tlb_flush
#define pte_free_tlb pte_free_tlb
@@ -42,14 +42,29 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
*
- * s390 doesn't delay rmap removal, so there is nothing encoded in
- * the page pointer.
+ * s390 doesn't delay rmap removal.
*/
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct encoded_page *page,
- int page_size)
+ struct page *page, bool delay_rmap, int page_size)
{
- free_page_and_swap_cache(encoded_page_ptr(page));
+ VM_WARN_ON_ONCE(delay_rmap);
+
+ free_page_and_swap_cache(page);
+ return false;
+}
+
+static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
+ struct page *page, unsigned int nr_pages, bool delay_rmap)
+{
+ struct encoded_page *encoded_pages[] = {
+ encode_page(page, ENCODED_PAGE_BIT_NR_PAGES_NEXT),
+ encode_nr_pages(nr_pages),
+ };
+
+ VM_WARN_ON_ONCE(delay_rmap);
+ VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1));
+
+ free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages));
return false;
}
@@ -71,7 +86,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb->cleared_pmds = 1;
if (mm_alloc_pgste(tlb->mm))
gmap_unlink(tlb->mm, (unsigned long *)pte, address);
- tlb_remove_ptdesc(tlb, pte);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte));
}
/*
@@ -86,12 +101,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
{
if (mm_pmd_folded(tlb->mm))
return;
- pagetable_pmd_dtor(virt_to_ptdesc(pmd));
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_puds = 1;
- tlb_remove_ptdesc(tlb, pmd);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
/*
@@ -109,7 +123,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
- tlb_remove_ptdesc(tlb, p4d);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
/*
@@ -124,11 +138,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
{
if (mm_pud_folded(tlb->mm))
return;
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_p4ds = 1;
- tlb_remove_ptdesc(tlb, pud);
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
-
#endif /* _S390_TLB_H */
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index a6e2cd89b609..9dfd46dd03c6 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -46,11 +46,6 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
{
unsigned long gmap_asce;
- /*
- * If the machine has IDTE we prefer to do a per mm flush
- * on all cpus instead of doing a local flush if the mm
- * only ran on the local cpu.
- */
preempt_disable();
atomic_inc(&mm->context.flush_count);
/* Reset TLB flush mask */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 3a0ac0c7a9a3..cef06bffad80 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -67,6 +67,9 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
+#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE
+#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3)
+
#define SD_BOOK_INIT SD_CPU_INIT
#ifdef CONFIG_NUMA
diff --git a/arch/s390/include/asm/trace/hiperdispatch.h b/arch/s390/include/asm/trace/hiperdispatch.h
new file mode 100644
index 000000000000..46462ee645b0
--- /dev/null
+++ b/arch/s390/include/asm/trace/hiperdispatch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoint header for hiperdispatch
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_HIPERDISPATCH_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE hiperdispatch
+
+TRACE_EVENT(s390_hd_work_fn,
+ TP_PROTO(int steal_time_percentage,
+ int entitled_core_count,
+ int highcap_core_count),
+ TP_ARGS(steal_time_percentage,
+ entitled_core_count,
+ highcap_core_count),
+ TP_STRUCT__entry(__field(int, steal_time_percentage)
+ __field(int, entitled_core_count)
+ __field(int, highcap_core_count)),
+ TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage;
+ __entry->entitled_core_count = entitled_core_count;
+ __entry->highcap_core_count = highcap_core_count;),
+ TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d",
+ __entry->steal_time_percentage,
+ __entry->entitled_core_count,
+ __entry->highcap_core_count)
+);
+
+TRACE_EVENT(s390_hd_rebuild_domains,
+ TP_PROTO(int current_highcap_core_count,
+ int new_highcap_core_count),
+ TP_ARGS(current_highcap_core_count,
+ new_highcap_core_count),
+ TP_STRUCT__entry(__field(int, current_highcap_core_count)
+ __field(int, new_highcap_core_count)),
+ TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count;
+ __entry->new_highcap_core_count = new_highcap_core_count),
+ TP_printk("change highcap_core_count: %u -> %u",
+ __entry->current_highcap_core_count,
+ __entry->new_highcap_core_count)
+);
+
+#endif /* _TRACE_S390_HIPERDISPATCH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 81ae8a98e7ec..f5920163ee97 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -18,19 +18,121 @@
#include <asm/extable.h>
#include <asm/facility.h>
#include <asm-generic/access_ok.h>
+#include <linux/instrumented.h>
void debug_user_asce(int exit);
-unsigned long __must_check
-raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+union oac {
+ unsigned int val;
+ struct {
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac1;
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac2;
+ };
+};
-unsigned long __must_check
-raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+static __always_inline __must_check unsigned long
+raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
+{
+ unsigned long rem;
+ union oac spec = {
+ .oac2.key = key,
+ .oac2.as = PSW_BITS_AS_SECONDARY,
+ .oac2.k = 1,
+ .oac2.a = 1,
+ };
-#ifndef CONFIG_KASAN
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-#endif
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
+ "1: jz 5f\n"
+ " algr %[size],%[val]\n"
+ " slgr %[from],%[val]\n"
+ " slgr %[to],%[val]\n"
+ " j 0b\n"
+ "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
+ " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
+ " slgr %[rem],%[from]\n"
+ " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
+ "4: slgr %[size],%[rem]\n"
+ " j 6f\n"
+ "5: lghi %[size],0\n"
+ "6:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ EX_TABLE(3b, 6b)
+ EX_TABLE(4b, 6b)
+ : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
+ : [val] "a" (-4096UL), [spec] "d" (spec.val)
+ : "cc", "memory", "0");
+ return size;
+}
+
+static __always_inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return raw_copy_from_user_key(to, from, n, 0);
+}
+
+static __always_inline __must_check unsigned long
+raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
+{
+ unsigned long rem;
+ union oac spec = {
+ .oac1.key = key,
+ .oac1.as = PSW_BITS_AS_SECONDARY,
+ .oac1.k = 1,
+ .oac1.a = 1,
+ };
+
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
+ "1: jz 5f\n"
+ " algr %[size],%[val]\n"
+ " slgr %[to],%[val]\n"
+ " slgr %[from],%[val]\n"
+ " j 0b\n"
+ "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
+ " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
+ " slgr %[rem],%[to]\n"
+ " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
+ "4: slgr %[size],%[rem]\n"
+ " j 6f\n"
+ "5: lghi %[size],0\n"
+ "6:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ EX_TABLE(3b, 6b)
+ EX_TABLE(4b, 6b)
+ : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
+ : [val] "a" (-4096UL), [spec] "d" (spec.val)
+ : "cc", "memory", "0");
+ return size;
+}
+
+static __always_inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return raw_copy_to_user_key(to, from, n, 0);
+}
unsigned long __must_check
_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
@@ -54,163 +156,119 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
return n;
}
-union oac {
- unsigned int val;
- struct {
- struct {
- unsigned short key : 4;
- unsigned short : 4;
- unsigned short as : 2;
- unsigned short : 4;
- unsigned short k : 1;
- unsigned short a : 1;
- } oac1;
- struct {
- unsigned short key : 4;
- unsigned short : 4;
- unsigned short as : 2;
- unsigned short : 4;
- unsigned short k : 1;
- unsigned short a : 1;
- } oac2;
- };
-};
-
int __noreturn __put_user_bad(void);
-#define __put_user_asm(to, from, size) \
-({ \
- union oac __oac_spec = { \
- .oac1.as = PSW_BITS_AS_SECONDARY, \
- .oac1.a = 1, \
- }; \
- int __rc; \
- \
- asm volatile( \
- " lr 0,%[spec]\n" \
- "0: mvcos %[_to],%[_from],%[_size]\n" \
- "1: xr %[rc],%[rc]\n" \
- "2:\n" \
- EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
- EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
- : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
- : [_size] "d" (size), [_from] "Q" (*(from)), \
- [spec] "d" (__oac_spec.val) \
- : "cc", "0"); \
- __rc; \
-})
-
-static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
-{
- int rc;
+#ifdef CONFIG_KMSAN
+#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
+#else
+#define uaccess_kmsan_or_inline __always_inline
+#endif
- switch (size) {
- case 1:
- rc = __put_user_asm((unsigned char __user *)ptr,
- (unsigned char *)x,
- size);
- break;
- case 2:
- rc = __put_user_asm((unsigned short __user *)ptr,
- (unsigned short *)x,
- size);
- break;
- case 4:
- rc = __put_user_asm((unsigned int __user *)ptr,
- (unsigned int *)x,
- size);
- break;
- case 8:
- rc = __put_user_asm((unsigned long __user *)ptr,
- (unsigned long *)x,
- size);
- break;
- default:
- __put_user_bad();
- break;
- }
- return rc;
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define DEFINE_PUT_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__put_user_##type##_noinstr(unsigned type __user *to, \
+ unsigned type *from, \
+ unsigned long size) \
+{ \
+ asm goto( \
+ " llilh %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[Efault]) \
+ EX_TABLE(1b, %l[Efault]) \
+ : [to] "+Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0" \
+ : Efault \
+ ); \
+ return 0; \
+Efault: \
+ return -EFAULT; \
}
-int __noreturn __get_user_bad(void);
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
-#define __get_user_asm(to, from, size) \
-({ \
- union oac __oac_spec = { \
- .oac2.as = PSW_BITS_AS_SECONDARY, \
- .oac2.a = 1, \
- }; \
- int __rc; \
+#define DEFINE_PUT_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__put_user_##type##_noinstr(unsigned type __user *to, \
+ unsigned type *from, \
+ unsigned long size) \
+{ \
+ int rc; \
\
asm volatile( \
- " lr 0,%[spec]\n" \
- "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \
- "1: xr %[rc],%[rc]\n" \
+ " llilh %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: lhi %[rc],0\n" \
"2:\n" \
- EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \
- EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \
- : [rc] "=&d" (__rc), "=Q" (*(to)) \
- : [_size] "d" (size), [_from] "Q" (*(from)), \
- [spec] "d" (__oac_spec.val), [_to] "a" (to), \
- [_ksize] "K" (size) \
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
+ : [rc] "=d" (rc), [to] "+Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
: "cc", "0"); \
- __rc; \
-})
+ return rc; \
+}
-static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
-{
- int rc;
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
- switch (size) {
- case 1:
- rc = __get_user_asm((unsigned char *)x,
- (unsigned char __user *)ptr,
- size);
- break;
- case 2:
- rc = __get_user_asm((unsigned short *)x,
- (unsigned short __user *)ptr,
- size);
- break;
- case 4:
- rc = __get_user_asm((unsigned int *)x,
- (unsigned int __user *)ptr,
- size);
- break;
- case 8:
- rc = __get_user_asm((unsigned long *)x,
- (unsigned long __user *)ptr,
- size);
- break;
- default:
- __get_user_bad();
- break;
- }
- return rc;
+DEFINE_PUT_USER_NOINSTR(char);
+DEFINE_PUT_USER_NOINSTR(short);
+DEFINE_PUT_USER_NOINSTR(int);
+DEFINE_PUT_USER_NOINSTR(long);
+
+#define DEFINE_PUT_USER(type) \
+static __always_inline int \
+__put_user_##type(unsigned type __user *to, unsigned type *from, \
+ unsigned long size) \
+{ \
+ int rc; \
+ \
+ rc = __put_user_##type##_noinstr(to, from, size); \
+ instrument_put_user(*from, to, size); \
+ return rc; \
}
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- */
+DEFINE_PUT_USER(char);
+DEFINE_PUT_USER(short);
+DEFINE_PUT_USER(int);
+DEFINE_PUT_USER(long);
+
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __x = (x); \
- int __pu_err = -EFAULT; \
+ int __prc; \
\
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
+ __prc = __put_user_char((unsigned char __user *)(ptr), \
+ (unsigned char *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
case 2: \
+ __prc = __put_user_short((unsigned short __user *)(ptr),\
+ (unsigned short *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
case 4: \
+ __prc = __put_user_int((unsigned int __user *)(ptr), \
+ (unsigned int *)&__x, \
+ sizeof(*(ptr))); \
+ break; \
case 8: \
- __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __prc = __put_user_long((unsigned long __user *)(ptr), \
+ (unsigned long *)&__x, \
+ sizeof(*(ptr))); \
break; \
default: \
- __put_user_bad(); \
+ __prc = __put_user_bad(); \
break; \
} \
- __builtin_expect(__pu_err, 0); \
+ __builtin_expect(__prc, 0); \
})
#define put_user(x, ptr) \
@@ -219,45 +277,129 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
__put_user(x, ptr); \
})
+int __noreturn __get_user_bad(void);
+
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define DEFINE_GET_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__get_user_##type##_noinstr(unsigned type *to, \
+ const unsigned type __user *from, \
+ unsigned long size) \
+{ \
+ asm goto( \
+ " lhi %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[Efault]) \
+ EX_TABLE(1b, %l[Efault]) \
+ : [to] "=Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0" \
+ : Efault \
+ ); \
+ return 0; \
+Efault: \
+ *to = 0; \
+ return -EFAULT; \
+}
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+#define DEFINE_GET_USER_NOINSTR(type) \
+static uaccess_kmsan_or_inline int \
+__get_user_##type##_noinstr(unsigned type *to, \
+ const unsigned type __user *from, \
+ unsigned long size) \
+{ \
+ int rc; \
+ \
+ asm volatile( \
+ " lhi %%r0,%[spec]\n" \
+ "0: mvcos %[to],%[from],%[size]\n" \
+ "1: lhi %[rc],0\n" \
+ "2:\n" \
+ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
+ : [rc] "=d" (rc), [to] "=Q" (*to) \
+ : [size] "d" (size), [from] "Q" (*from), \
+ [spec] "I" (0x81) \
+ : "cc", "0"); \
+ if (likely(!rc)) \
+ return 0; \
+ *to = 0; \
+ return rc; \
+}
+
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+DEFINE_GET_USER_NOINSTR(char);
+DEFINE_GET_USER_NOINSTR(short);
+DEFINE_GET_USER_NOINSTR(int);
+DEFINE_GET_USER_NOINSTR(long);
+
+#define DEFINE_GET_USER(type) \
+static __always_inline int \
+__get_user_##type(unsigned type *to, const unsigned type __user *from, \
+ unsigned long size) \
+{ \
+ int rc; \
+ \
+ rc = __get_user_##type##_noinstr(to, from, size); \
+ instrument_get_user(*to); \
+ return rc; \
+}
+
+DEFINE_GET_USER(char);
+DEFINE_GET_USER(short);
+DEFINE_GET_USER(int);
+DEFINE_GET_USER(long);
+
#define __get_user(x, ptr) \
({ \
- int __gu_err = -EFAULT; \
+ const __user void *____guptr = (ptr); \
+ int __grc; \
\
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
+ const unsigned char __user *__guptr = ____guptr; \
unsigned char __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
case 2: { \
+ const unsigned short __user *__guptr = ____guptr; \
unsigned short __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
case 4: { \
+ const unsigned int __user *__guptr = ____guptr; \
unsigned int __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
case 8: { \
+ const unsigned long __user *__guptr = ____guptr; \
unsigned long __x; \
\
- __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
+ __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \
}; \
default: \
- __get_user_bad(); \
+ __grc = __get_user_bad(); \
break; \
} \
- __builtin_expect(__gu_err, 0); \
+ __builtin_expect(__grc, 0); \
})
#define get_user(x, ptr) \
@@ -284,111 +426,80 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return __clear_user(to, n);
}
-void *s390_kernel_write(void *dst, const void *src, size_t size);
+void *__s390_kernel_write(void *dst, const void *src, size_t size);
-int __noreturn __put_kernel_bad(void);
+static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
+{
+ if (__is_defined(__DECOMPRESSOR))
+ return memcpy(dst, src, size);
+ return __s390_kernel_write(dst, src, size);
+}
-#define __put_kernel_asm(val, to, insn) \
-({ \
- int __rc; \
- \
- asm volatile( \
- "0: " insn " %[_val],%[_to]\n" \
- "1: xr %[rc],%[rc]\n" \
- "2:\n" \
- EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
- EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
- : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
- : [_val] "d" (val) \
- : "cc"); \
- __rc; \
-})
+void __noreturn __mvc_kernel_nofault_bad(void);
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS)
+
+#define __mvc_kernel_nofault(dst, src, type, err_label) \
do { \
- unsigned long __x = (unsigned long)(*((type *)(src))); \
- int __pk_err; \
- \
switch (sizeof(type)) { \
case 1: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
- break; \
case 2: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
- break; \
case 4: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
- break; \
case 8: \
- __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
+ asm goto( \
+ "0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \
+ "1: nopr %%r7\n" \
+ EX_TABLE(0b, %l[err_label]) \
+ EX_TABLE(1b, %l[err_label]) \
+ : [_dst] "=Q" (*(type *)dst) \
+ : [_src] "Q" (*(type *)(src)), \
+ [_len] "I" (sizeof(type)) \
+ : \
+ : err_label); \
break; \
default: \
- __pk_err = __put_kernel_bad(); \
+ __mvc_kernel_nofault_bad(); \
break; \
} \
- if (unlikely(__pk_err)) \
- goto err_label; \
} while (0)
-int __noreturn __get_kernel_bad(void);
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
-#define __get_kernel_asm(val, from, insn) \
-({ \
- int __rc; \
- \
- asm volatile( \
- "0: " insn " %[_val],%[_from]\n" \
- "1: xr %[rc],%[rc]\n" \
- "2:\n" \
- EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \
- EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \
- : [rc] "=d" (__rc), [_val] "=d" (val) \
- : [_from] "Q" (*(from)) \
- : "cc"); \
- __rc; \
-})
-
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define __mvc_kernel_nofault(dst, src, type, err_label) \
do { \
- int __gk_err; \
+ type *(__dst) = (type *)(dst); \
+ int __rc; \
\
switch (sizeof(type)) { \
- case 1: { \
- unsigned char __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
- *((type *)(dst)) = (type)__x; \
- break; \
- }; \
- case 2: { \
- unsigned short __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
- *((type *)(dst)) = (type)__x; \
- break; \
- }; \
- case 4: { \
- unsigned int __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
- *((type *)(dst)) = (type)__x; \
- break; \
- }; \
- case 8: { \
- unsigned long __x; \
- \
- __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
- *((type *)(dst)) = (type)__x; \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ asm_inline volatile( \
+ "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \
+ "1: lhi %[_rc],0\n" \
+ "2:\n" \
+ EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \
+ EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \
+ : [_rc] "=d" (__rc), \
+ "=m" (*__dst) \
+ : [_src] "Q" (*(type *)(src)), \
+ [_dst] "a" (__dst), \
+ [_len] "I" (sizeof(type))); \
+ if (__rc) \
+ goto err_label; \
break; \
- }; \
default: \
- __gk_err = __get_kernel_bad(); \
+ __mvc_kernel_nofault_bad(); \
break; \
} \
- if (unlikely(__gk_err)) \
- goto err_label; \
} while (0)
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+
+#define __get_kernel_nofault __mvc_kernel_nofault
+#define __put_kernel_nofault __mvc_kernel_nofault
+
void __cmpxchg_user_key_called_with_bad_pointer(void);
#define CMPXCHG_USER_KEY_MAX_LOOPS 128
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 4260bc5ce7f8..70fc671397da 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -35,6 +35,5 @@
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#endif /* _ASM_S390_UNISTD_H_ */
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 0e7bd3873907..46fb0ef6f984 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -2,7 +2,7 @@
/*
* Ultravisor Interfaces
*
- * Copyright IBM Corp. 2019, 2022
+ * Copyright IBM Corp. 2019, 2024
*
* Author(s):
* Vasily Gorbik <gor@linux.ibm.com>
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/gmap.h>
+#include <asm/asm.h>
#define UVC_CC_OK 0
#define UVC_CC_ERROR 1
@@ -28,9 +29,11 @@
#define UVC_RC_INV_STATE 0x0003
#define UVC_RC_INV_LEN 0x0005
#define UVC_RC_NO_RESUME 0x0007
+#define UVC_RC_MORE_DATA 0x0100
#define UVC_RC_NEED_DESTROY 0x8000
#define UVC_CMD_QUI 0x0001
+#define UVC_CMD_QUERY_KEYS 0x0002
#define UVC_CMD_INIT_UV 0x000f
#define UVC_CMD_CREATE_SEC_CONF 0x0100
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
@@ -61,6 +64,7 @@
#define UVC_CMD_ADD_SECRET 0x1031
#define UVC_CMD_LIST_SECRETS 0x1033
#define UVC_CMD_LOCK_SECRETS 0x1034
+#define UVC_CMD_RETR_SECRET 0x1035
/* Bits in installed uv calls */
enum uv_cmds_inst {
@@ -94,6 +98,8 @@ enum uv_cmds_inst {
BIT_UVC_CMD_ADD_SECRET = 29,
BIT_UVC_CMD_LIST_SECRETS = 30,
BIT_UVC_CMD_LOCK_SECRETS = 31,
+ BIT_UVC_CMD_RETR_SECRET = 33,
+ BIT_UVC_CMD_QUERY_KEYS = 34,
};
enum uv_feat_ind {
@@ -140,11 +146,27 @@ struct uv_cb_qui {
u64 reservedf0; /* 0x00f0 */
u64 supp_add_secret_req_ver; /* 0x00f8 */
u64 supp_add_secret_pcf; /* 0x0100 */
- u64 supp_secret_types; /* 0x0180 */
- u16 max_secrets; /* 0x0110 */
- u8 reserved112[0x120 - 0x112]; /* 0x0112 */
+ u64 supp_secret_types; /* 0x0108 */
+ u16 max_assoc_secrets; /* 0x0110 */
+ u16 max_retr_secrets; /* 0x0112 */
+ u8 reserved114[0x120 - 0x114]; /* 0x0114 */
} __packed __aligned(8);
+struct uv_key_hash {
+ u64 dword[4];
+} __packed __aligned(8);
+
+#define UVC_QUERY_KEYS_IDX_HK 0
+#define UVC_QUERY_KEYS_IDX_BACK_HK 1
+
+/* Query Ultravisor Keys */
+struct uv_cb_query_keys {
+ struct uv_cb_header header; /* 0x0000 */
+ u64 reserved08[3]; /* 0x0008 */
+ struct uv_key_hash key_hashes[15]; /* 0x0020 */
+} __packed __aligned(8);
+static_assert(sizeof(struct uv_cb_query_keys) == 0x200);
+
/* Initialize Ultravisor */
struct uv_cb_init {
struct uv_cb_header header;
@@ -317,7 +339,6 @@ struct uv_cb_dump_complete {
* A common UV call struct for pv guests that contains a single address
* Examples:
* Add Secret
- * List Secrets
*/
struct uv_cb_guest_addr {
struct uv_cb_header header;
@@ -326,18 +347,102 @@ struct uv_cb_guest_addr {
u64 reserved28[4];
} __packed __aligned(8);
+#define UVC_RC_RETR_SECR_BUF_SMALL 0x0109
+#define UVC_RC_RETR_SECR_STORE_EMPTY 0x010f
+#define UVC_RC_RETR_SECR_INV_IDX 0x0110
+#define UVC_RC_RETR_SECR_INV_SECRET 0x0111
+
+struct uv_cb_retr_secr {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u16 secret_idx;
+ u16 reserved1a;
+ u32 buf_size;
+ u64 buf_addr;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
+struct uv_cb_list_secrets {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u8 reserved18[6];
+ u16 start_idx;
+ u64 list_addr;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
+enum uv_secret_types {
+ UV_SECRET_INVAL = 0x0,
+ UV_SECRET_NULL = 0x1,
+ UV_SECRET_ASSOCIATION = 0x2,
+ UV_SECRET_PLAIN = 0x3,
+ UV_SECRET_AES_128 = 0x4,
+ UV_SECRET_AES_192 = 0x5,
+ UV_SECRET_AES_256 = 0x6,
+ UV_SECRET_AES_XTS_128 = 0x7,
+ UV_SECRET_AES_XTS_256 = 0x8,
+ UV_SECRET_HMAC_SHA_256 = 0x9,
+ UV_SECRET_HMAC_SHA_512 = 0xa,
+ /* 0x0b - 0x10 reserved */
+ UV_SECRET_ECDSA_P256 = 0x11,
+ UV_SECRET_ECDSA_P384 = 0x12,
+ UV_SECRET_ECDSA_P521 = 0x13,
+ UV_SECRET_ECDSA_ED25519 = 0x14,
+ UV_SECRET_ECDSA_ED448 = 0x15,
+};
+
+/**
+ * uv_secret_list_item_hdr - UV secret metadata.
+ * @index: Index of the secret in the secret list.
+ * @type: Type of the secret. See `enum uv_secret_types`.
+ * @length: Length of the stored secret.
+ */
+struct uv_secret_list_item_hdr {
+ u16 index;
+ u16 type;
+ u32 length;
+} __packed __aligned(8);
+
+#define UV_SECRET_ID_LEN 32
+/**
+ * uv_secret_list_item - UV secret entry.
+ * @hdr: The metadata of this secret.
+ * @id: The ID of this secret, not the secret itself.
+ */
+struct uv_secret_list_item {
+ struct uv_secret_list_item_hdr hdr;
+ u64 reserverd08;
+ u8 id[UV_SECRET_ID_LEN];
+} __packed __aligned(8);
+
+/**
+ * uv_secret_list - UV secret-metadata list.
+ * @num_secr_stored: Number of secrets stored in this list.
+ * @total_num_secrets: Number of secrets stored in the UV for this guest.
+ * @next_secret_idx: positive number if there are more secrets available or zero.
+ * @secrets: Up to 85 UV-secret metadata entries.
+ */
+struct uv_secret_list {
+ u16 num_secr_stored;
+ u16 total_num_secrets;
+ u16 next_secret_idx;
+ u16 reserved_06;
+ u64 reserved_08;
+ struct uv_secret_list_item secrets[85];
+} __packed __aligned(8);
+static_assert(sizeof(struct uv_secret_list) == PAGE_SIZE);
+
static inline int __uv_call(unsigned long r1, unsigned long r2)
{
int cc;
asm volatile(
- " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc)
+ " .insn rrf,0xb9a40000,%[r1],%[r2],0,0\n"
+ CC_IPM(cc)
+ : CC_OUT(cc, cc)
: [r1] "a" (r1), [r2] "a" (r2)
- : "memory", "cc");
- return cc;
+ : CC_CLOBBER_LIST("memory"));
+ return CC_TRANSFORM(cc);
}
static inline int uv_call(unsigned long r1, unsigned long r2)
@@ -382,6 +487,48 @@ static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
return cc ? -EINVAL : 0;
}
+/**
+ * uv_list_secrets() - Do a List Secrets UVC.
+ *
+ * @buf: Buffer to write list into; size of one page.
+ * @start_idx: The smallest index that should be included in the list.
+ * For the fist invocation use 0.
+ * @rc: Pointer to store the return code or NULL.
+ * @rrc: Pointer to store the return reason code or NULL.
+ *
+ * This function calls the List Secrets UVC. The result is written into `buf`,
+ * that needs to be at least one page of writable memory.
+ * `buf` consists of:
+ * * %struct uv_secret_list_hdr
+ * * %struct uv_secret_list_item (multiple)
+ *
+ * For `start_idx` use _0_ for the first call. If there are more secrets available
+ * but could not fit into the page then `rc` is `UVC_RC_MORE_DATA`.
+ * In this case use `uv_secret_list_hdr.next_secret_idx` for `start_idx`.
+ *
+ * Context: might sleep.
+ *
+ * Return: The UVC condition code.
+ */
+static inline int uv_list_secrets(struct uv_secret_list *buf, u16 start_idx,
+ u16 *rc, u16 *rrc)
+{
+ struct uv_cb_list_secrets uvcb = {
+ .header.len = sizeof(uvcb),
+ .header.cmd = UVC_CMD_LIST_SECRETS,
+ .start_idx = start_idx,
+ .list_addr = (u64)buf,
+ };
+ int cc = uv_call_sched(0, (u64)&uvcb);
+
+ if (rc)
+ *rc = uvcb.header.rc;
+ if (rrc)
+ *rrc = uvcb.header.rrc;
+
+ return cc;
+}
+
struct uv_info {
unsigned long inst_calls_list[4];
unsigned long uv_base_stor_len;
@@ -402,7 +549,8 @@ struct uv_info {
unsigned long supp_add_secret_req_ver;
unsigned long supp_add_secret_pcf;
unsigned long supp_secret_types;
- unsigned short max_secrets;
+ unsigned short max_assoc_secrets;
+ unsigned short max_retr_secrets;
};
extern struct uv_info uv_info;
@@ -414,7 +562,6 @@ static inline bool uv_has_feature(u8 feature_bit)
return test_bit_inv(feature_bit, &uv_info.uv_feature_indications);
}
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest;
static inline int is_prot_virt_guest(void)
@@ -442,7 +589,10 @@ static inline int share(unsigned long addr, u16 cmd)
if (!uv_call(0, (u64)&uvcb))
return 0;
- return -EINVAL;
+ pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
+ uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
+ uvcb.header.rc, uvcb.header.rrc);
+ panic("System security cannot be guaranteed unless the system panics now.\n");
}
/*
@@ -466,13 +616,10 @@ static inline int uv_remove_shared(unsigned long addr)
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
}
-#else
-#define is_prot_virt_guest() 0
-static inline int uv_set_shared(unsigned long addr) { return 0; }
-static inline int uv_remove_shared(unsigned long addr) { return 0; }
-#endif
+int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
+ struct uv_secret_list_item_hdr *secret);
+int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size);
-#if IS_ENABLED(CONFIG_KVM)
extern int prot_virt_host;
static inline int is_prot_virt_host(void)
@@ -481,37 +628,13 @@ static inline int is_prot_virt_host(void)
}
int uv_pin_shared(unsigned long paddr);
-int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
-int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
-int uv_destroy_owned_page(unsigned long paddr);
+int uv_destroy_folio(struct folio *folio);
+int uv_destroy_pte(pte_t pte);
+int uv_convert_from_secure_pte(pte_t pte);
+int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb);
int uv_convert_from_secure(unsigned long paddr);
-int uv_convert_owned_from_secure(unsigned long paddr);
-int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
+int uv_convert_from_secure_folio(struct folio *folio);
void setup_uv(void);
-#else
-#define is_prot_virt_host() 0
-static inline void setup_uv(void) {}
-
-static inline int uv_pin_shared(unsigned long paddr)
-{
- return 0;
-}
-
-static inline int uv_destroy_owned_page(unsigned long paddr)
-{
- return 0;
-}
-
-static inline int uv_convert_from_secure(unsigned long paddr)
-{
- return 0;
-}
-
-static inline int uv_convert_owned_from_secure(unsigned long paddr)
-{
- return 0;
-}
-#endif
#endif /* _ASM_S390_UV_H */
diff --git a/arch/s390/include/asm/vdso-symbols.h b/arch/s390/include/asm/vdso-symbols.h
new file mode 100644
index 000000000000..0df17574d788
--- /dev/null
+++ b/arch/s390/include/asm/vdso-symbols.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __S390_VDSO_SYMBOLS_H__
+#define __S390_VDSO_SYMBOLS_H__
+
+#include <generated/vdso64-offsets.h>
+#ifdef CONFIG_COMPAT
+#include <generated/vdso32-offsets.h>
+#endif
+
+#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
+#ifdef CONFIG_COMPAT
+#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
+#else
+#define VDSO32_SYMBOL(tsk, name) (-1UL)
+#endif
+
+#endif /* __S390_VDSO_SYMBOLS_H__ */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 53165aa7813a..92c73e4d97a9 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -6,27 +6,12 @@
#ifndef __ASSEMBLY__
-#include <generated/vdso64-offsets.h>
-#ifdef CONFIG_COMPAT
-#include <generated/vdso32-offsets.h>
-#endif
-
-#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
-#ifdef CONFIG_COMPAT
-#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
-#else
-#define VDSO32_SYMBOL(tsk, name) (-1UL)
-#endif
-
extern struct vdso_data *vdso_data;
int vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
-/* Default link address for the vDSO */
-#define VDSO_LBASE 0
-
#define __VVAR_PAGES 2
#define VDSO_VERSION_STRING LINUX_2.6.29
diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h
deleted file mode 100644
index 73ee89142666..000000000000
--- a/arch/s390/include/asm/vdso/data.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __S390_ASM_VDSO_DATA_H
-#define __S390_ASM_VDSO_DATA_H
-
-#include <linux/types.h>
-#include <vdso/datapage.h>
-
-struct arch_vdso_data {
- __s64 tod_steering_delta;
- __u64 tod_steering_end;
-};
-
-#endif /* __S390_ASM_VDSO_DATA_H */
diff --git a/arch/s390/include/asm/vdso/getrandom.h b/arch/s390/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..36355af7160b
--- /dev/null
+++ b/arch/s390/include/asm/vdso/getrandom.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <vdso/datapage.h>
+#include <asm/vdso/vsyscall.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+/**
+ * getrandom_syscall - Invoke the getrandom() syscall.
+ * @buffer: Destination buffer to fill with random bytes.
+ * @len: Size of @buffer in bytes.
+ * @flags: Zero or more GRND_* flags.
+ * Returns: The number of random bytes written to @buffer, or a negative value indicating an error.
+ */
+static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags)
+{
+ return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags);
+}
+
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ /*
+ * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace
+ * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_
+ * PAGE_OFFSET points to the real VVAR page.
+ */
+ if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
+ return &_vdso_rng_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h
index db84942eb78f..7937765ccfa5 100644
--- a/arch/s390/include/asm/vdso/gettimeofday.h
+++ b/arch/s390/include/asm/vdso/gettimeofday.h
@@ -6,16 +6,13 @@
#define VDSO_HAS_CLOCK_GETRES 1
+#define VDSO_DELTA_NOMASK 1
+
#include <asm/syscall.h>
#include <asm/timex.h>
#include <asm/unistd.h>
#include <linux/compiler.h>
-#define vdso_calc_delta __arch_vdso_calc_delta
-static __always_inline u64 __arch_vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
-{
- return (cycles - last) * mult;
-}
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
diff --git a/arch/s390/include/asm/vdso/time_data.h b/arch/s390/include/asm/vdso/time_data.h
new file mode 100644
index 000000000000..8a08752422e6
--- /dev/null
+++ b/arch/s390/include/asm/vdso/time_data.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __S390_ASM_VDSO_TIME_DATA_H
+#define __S390_ASM_VDSO_TIME_DATA_H
+
+#include <linux/types.h>
+
+struct arch_vdso_time_data {
+ __s64 tod_steering_delta;
+ __u64 tod_steering_end;
+};
+
+#endif /* __S390_ASM_VDSO_TIME_DATA_H */
diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h
index 6c67c08cefdd..3eb576ecd3bd 100644
--- a/arch/s390/include/asm/vdso/vsyscall.h
+++ b/arch/s390/include/asm/vdso/vsyscall.h
@@ -2,15 +2,19 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
+#define __VDSO_RND_DATA_OFFSET 768
+
#ifndef __ASSEMBLY__
#include <linux/hrtimer.h>
-#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
+
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES
+};
static __always_inline struct vdso_data *__s390_get_k_vdso_data(void)
{
@@ -18,6 +22,12 @@ static __always_inline struct vdso_data *__s390_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __s390_get_k_vdso_data
+static __always_inline struct vdso_rng_data *__s390_get_k_vdso_rnd_data(void)
+{
+ return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
+}
+#define __arch_get_k_vdso_rng_data __s390_get_k_vdso_rnd_data
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
index fe17e448c0c5..9d25fb35a042 100644
--- a/arch/s390/include/asm/vtime.h
+++ b/arch/s390/include/asm/vtime.h
@@ -2,20 +2,22 @@
#ifndef _S390_VTIME_H
#define _S390_VTIME_H
-#define __ARCH_HAS_VTIME_TASK_SWITCH
-
static inline void update_timer_sys(void)
{
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
- S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer;
- S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;
+ struct lowcore *lc = get_lowcore();
+
+ lc->system_timer += lc->last_update_timer - lc->exit_timer;
+ lc->user_timer += lc->exit_timer - lc->sys_enter_timer;
+ lc->last_update_timer = lc->sys_enter_timer;
}
static inline void update_timer_mcck(void)
{
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer;
- S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer;
- S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer;
+ struct lowcore *lc = get_lowcore();
+
+ lc->system_timer += lc->last_update_timer - lc->exit_timer;
+ lc->user_timer += lc->exit_timer - lc->mcck_enter_timer;
+ lc->last_update_timer = lc->mcck_enter_timer;
}
#endif /* _S390_VTIME_H */
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h
deleted file mode 100644
index 8c188f1c6d27..000000000000
--- a/arch/s390/include/asm/vx-insn.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Vector Instructions
- *
- * This wrapper header file allows to use the vector instruction macros in
- * both assembler files as well as in inline assemblies in C files.
- */
-
-#ifndef __ASM_S390_VX_INSN_H
-#define __ASM_S390_VX_INSN_H
-
-#include <asm/vx-insn-asm.h>
-
-#ifndef __ASSEMBLY__
-
-asm(".include \"asm/vx-insn-asm.h\"\n");
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_S390_VX_INSN_H */
diff --git a/arch/s390/include/asm/word-at-a-time.h b/arch/s390/include/asm/word-at-a-time.h
index 2579f1694b82..203acd6e431b 100644
--- a/arch/s390/include/asm/word-at-a-time.h
+++ b/arch/s390/include/asm/word-at-a-time.h
@@ -2,7 +2,8 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
#include <asm/asm-extable.h>
#include <asm/bitsperlong.h>
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index b11d98800458..7c364b33c84d 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -294,7 +294,7 @@ struct dasd_snid_ioctl_data {
/********************************************************************************
* SECTION: Definition of IOCTLs
*
- * Here ist how the ioctl-nr should be used:
+ * Here is how the ioctl-nr should be used:
* 0 - 31 DASD driver itself
* 32 - 239 still open
* 240 - 255 reserved for EMC
diff --git a/arch/s390/include/uapi/asm/diag.h b/arch/s390/include/uapi/asm/diag.h
new file mode 100644
index 000000000000..b7e6ccb4ff6e
--- /dev/null
+++ b/arch/s390/include/uapi/asm/diag.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Diag ioctls and its associated structures definitions.
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#ifndef __S390_UAPI_ASM_DIAG_H
+#define __S390_UAPI_ASM_DIAG_H
+
+#include <linux/types.h>
+
+#define DIAG_MAGIC_STR 'D'
+
+struct diag324_pib {
+ __u64 address;
+ __u64 sequence;
+};
+
+struct diag310_memtop {
+ __u64 address;
+ __u64 nesting_lvl;
+};
+
+/* Diag ioctl definitions */
+#define DIAG324_GET_PIBBUF _IOWR(DIAG_MAGIC_STR, 0x77, struct diag324_pib)
+#define DIAG324_GET_PIBLEN _IOR(DIAG_MAGIC_STR, 0x78, size_t)
+#define DIAG310_GET_STRIDE _IOR(DIAG_MAGIC_STR, 0x79, size_t)
+#define DIAG310_GET_MEMTOPLEN _IOWR(DIAG_MAGIC_STR, 0x7a, size_t)
+#define DIAG310_GET_MEMTOPBUF _IOWR(DIAG_MAGIC_STR, 0x7b, struct diag310_memtop)
+
+#endif /* __S390_UAPI_ASM_DIAG_H */
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index abe926d43cbe..60345dd2cba2 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -12,7 +12,320 @@
#include <linux/types.h>
#define __KVM_S390
-#define __KVM_HAVE_GUEST_DEBUG
+
+struct kvm_s390_skeys {
+ __u64 start_gfn;
+ __u64 count;
+ __u64 skeydata_addr;
+ __u32 flags;
+ __u32 reserved[9];
+};
+
+#define KVM_S390_CMMA_PEEK (1 << 0)
+
+/**
+ * kvm_s390_cmma_log - Used for CMMA migration.
+ *
+ * Used both for input and output.
+ *
+ * @start_gfn: Guest page number to start from.
+ * @count: Size of the result buffer.
+ * @flags: Control operation mode via KVM_S390_CMMA_* flags
+ * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
+ * pages are still remaining.
+ * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
+ * in the PGSTE.
+ * @values: Pointer to the values buffer.
+ *
+ * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
+ */
+struct kvm_s390_cmma_log {
+ __u64 start_gfn;
+ __u32 count;
+ __u32 flags;
+ union {
+ __u64 remaining;
+ __u64 mask;
+ };
+ __u64 values;
+};
+
+#define KVM_S390_RESET_POR 1
+#define KVM_S390_RESET_CLEAR 2
+#define KVM_S390_RESET_SUBSYSTEM 4
+#define KVM_S390_RESET_CPU_INIT 8
+#define KVM_S390_RESET_IPL 16
+
+/* for KVM_S390_MEM_OP */
+struct kvm_s390_mem_op {
+ /* in */
+ __u64 gaddr; /* the guest address */
+ __u64 flags; /* flags */
+ __u32 size; /* amount of bytes */
+ __u32 op; /* type of operation */
+ __u64 buf; /* buffer in userspace */
+ union {
+ struct {
+ __u8 ar; /* the access register number */
+ __u8 key; /* access key, ignored if flag unset */
+ __u8 pad1[6]; /* ignored */
+ __u64 old_addr; /* ignored if cmpxchg flag unset */
+ };
+ __u32 sida_offset; /* offset into the sida */
+ __u8 reserved[32]; /* ignored */
+ };
+};
+/* types for kvm_s390_mem_op->op */
+#define KVM_S390_MEMOP_LOGICAL_READ 0
+#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+#define KVM_S390_MEMOP_SIDA_READ 2
+#define KVM_S390_MEMOP_SIDA_WRITE 3
+#define KVM_S390_MEMOP_ABSOLUTE_READ 4
+#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
+#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
+
+/* flags for kvm_s390_mem_op->flags */
+#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
+#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
+#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
+
+/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
+#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
+#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
+
+struct kvm_s390_psw {
+ __u64 mask;
+ __u64 addr;
+};
+
+/* valid values for type in kvm_s390_interrupt */
+#define KVM_S390_SIGP_STOP 0xfffe0000u
+#define KVM_S390_PROGRAM_INT 0xfffe0001u
+#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
+#define KVM_S390_RESTART 0xfffe0003u
+#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
+#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
+#define KVM_S390_MCHK 0xfffe1000u
+#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
+#define KVM_S390_INT_CPU_TIMER 0xffff1005u
+#define KVM_S390_INT_VIRTIO 0xffff2603u
+#define KVM_S390_INT_SERVICE 0xffff2401u
+#define KVM_S390_INT_EMERGENCY 0xffff1201u
+#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
+/* Anything below 0xfffe0000u is taken by INT_IO */
+#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
+ (((schid)) | \
+ ((ssid) << 16) | \
+ ((cssid) << 18) | \
+ ((ai) << 26))
+#define KVM_S390_INT_IO_MIN 0x00000000u
+#define KVM_S390_INT_IO_MAX 0xfffdffffu
+#define KVM_S390_INT_IO_AI_MASK 0x04000000u
+
+
+struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+};
+
+struct kvm_s390_io_info {
+ __u16 subchannel_id;
+ __u16 subchannel_nr;
+ __u32 io_int_parm;
+ __u32 io_int_word;
+};
+
+struct kvm_s390_ext_info {
+ __u32 ext_params;
+ __u32 pad;
+ __u64 ext_params2;
+};
+
+struct kvm_s390_pgm_info {
+ __u64 trans_exc_code;
+ __u64 mon_code;
+ __u64 per_address;
+ __u32 data_exc_code;
+ __u16 code;
+ __u16 mon_class_nr;
+ __u8 per_code;
+ __u8 per_atmid;
+ __u8 exc_access_id;
+ __u8 per_access_id;
+ __u8 op_access_id;
+#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
+#define KVM_S390_PGM_FLAGS_ILC_0 0x02
+#define KVM_S390_PGM_FLAGS_ILC_1 0x04
+#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
+#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
+ __u8 flags;
+ __u8 pad[2];
+};
+
+struct kvm_s390_prefix_info {
+ __u32 address;
+};
+
+struct kvm_s390_extcall_info {
+ __u16 code;
+};
+
+struct kvm_s390_emerg_info {
+ __u16 code;
+};
+
+#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
+struct kvm_s390_stop_info {
+ __u32 flags;
+};
+
+struct kvm_s390_mchk_info {
+ __u64 cr14;
+ __u64 mcic;
+ __u64 failing_storage_address;
+ __u32 ext_damage_code;
+ __u32 pad;
+ __u8 fixed_logout[16];
+};
+
+struct kvm_s390_irq {
+ __u64 type;
+ union {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
+ struct kvm_s390_mchk_info mchk;
+ char reserved[64];
+ } u;
+};
+
+struct kvm_s390_irq_state {
+ __u64 buf;
+ __u32 flags; /* will stay unused for compatibility reasons */
+ __u32 len;
+ __u32 reserved[4]; /* will stay unused for compatibility reasons */
+};
+
+struct kvm_s390_ucas_mapping {
+ __u64 user_addr;
+ __u64 vcpu_addr;
+ __u64 length;
+};
+
+struct kvm_s390_pv_sec_parm {
+ __u64 origin;
+ __u64 length;
+};
+
+struct kvm_s390_pv_unp {
+ __u64 addr;
+ __u64 size;
+ __u64 tweak;
+};
+
+enum pv_cmd_dmp_id {
+ KVM_PV_DUMP_INIT,
+ KVM_PV_DUMP_CONFIG_STOR_STATE,
+ KVM_PV_DUMP_COMPLETE,
+ KVM_PV_DUMP_CPU,
+};
+
+struct kvm_s390_pv_dmp {
+ __u64 subcmd;
+ __u64 buff_addr;
+ __u64 buff_len;
+ __u64 gaddr; /* For dump storage state */
+ __u64 reserved[4];
+};
+
+enum pv_cmd_info_id {
+ KVM_PV_INFO_VM,
+ KVM_PV_INFO_DUMP,
+};
+
+struct kvm_s390_pv_info_dump {
+ __u64 dump_cpu_buffer_len;
+ __u64 dump_config_mem_buffer_per_1m;
+ __u64 dump_config_finalize_len;
+};
+
+struct kvm_s390_pv_info_vm {
+ __u64 inst_calls_list[4];
+ __u64 max_cpus;
+ __u64 max_guests;
+ __u64 max_guest_addr;
+ __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+ __u32 id;
+ __u32 len_max;
+ __u32 len_written;
+ __u32 reserved;
+};
+
+struct kvm_s390_pv_info {
+ struct kvm_s390_pv_info_header header;
+ union {
+ struct kvm_s390_pv_info_dump dump;
+ struct kvm_s390_pv_info_vm vm;
+ };
+};
+
+enum pv_cmd_id {
+ KVM_PV_ENABLE,
+ KVM_PV_DISABLE,
+ KVM_PV_SET_SEC_PARMS,
+ KVM_PV_UNPACK,
+ KVM_PV_VERIFY,
+ KVM_PV_PREP_RESET,
+ KVM_PV_UNSHARE_ALL,
+ KVM_PV_INFO,
+ KVM_PV_DUMP,
+ KVM_PV_ASYNC_CLEANUP_PREPARE,
+ KVM_PV_ASYNC_CLEANUP_PERFORM,
+};
+
+struct kvm_pv_cmd {
+ __u32 cmd; /* Command to be executed */
+ __u16 rc; /* Ultravisor return code */
+ __u16 rrc; /* Ultravisor return reason code */
+ __u64 data; /* Data or address */
+ __u32 flags; /* flags for future extensions. Must be 0 for now */
+ __u32 reserved[3];
+};
+
+struct kvm_s390_zpci_op {
+ /* in */
+ __u32 fh; /* target device */
+ __u8 op; /* operation to perform */
+ __u8 pad[3];
+ union {
+ /* for KVM_S390_ZPCIOP_REG_AEN */
+ struct {
+ __u64 ibv; /* Guest addr of interrupt bit vector */
+ __u64 sb; /* Guest addr of summary bit */
+ __u32 flags;
+ __u32 noi; /* Number of interrupts */
+ __u8 isc; /* Guest interrupt subclass */
+ __u8 sbo; /* Offset of guest summary bit vector */
+ __u16 pad;
+ } reg_aen;
+ __u64 reserved[8];
+ } u;
+};
+
+/* types for kvm_s390_zpci_op->op */
+#define KVM_S390_ZPCIOP_REG_AEN 0
+#define KVM_S390_ZPCIOP_DEREG_AEN 1
+
+/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
+#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
/* Device control API: s390-specific devices */
#define KVM_DEV_FLIC_GET_ALL_IRQS 1
@@ -156,7 +469,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 kdsa[16]; /* with MSA9 */
__u8 sortl[32]; /* with STFLE.150 */
__u8 dfltcc[32]; /* with STFLE.151 */
- __u8 reserved[1728];
+ __u8 pfcr[16]; /* with STFLE.201 */
+ __u8 reserved[1712];
};
#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 5ad76471e73f..ca42e941675d 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -41,23 +41,29 @@
#define PKEY_KEYTYPE_ECC_P521 7
#define PKEY_KEYTYPE_ECC_ED25519 8
#define PKEY_KEYTYPE_ECC_ED448 9
+#define PKEY_KEYTYPE_AES_XTS_128 10
+#define PKEY_KEYTYPE_AES_XTS_256 11
+#define PKEY_KEYTYPE_HMAC_512 12
+#define PKEY_KEYTYPE_HMAC_1024 13
/* the newer ioctls use a pkey_key_type enum for type information */
enum pkey_key_type {
- PKEY_TYPE_CCA_DATA = (__u32) 1,
- PKEY_TYPE_CCA_CIPHER = (__u32) 2,
- PKEY_TYPE_EP11 = (__u32) 3,
- PKEY_TYPE_CCA_ECC = (__u32) 0x1f,
- PKEY_TYPE_EP11_AES = (__u32) 6,
- PKEY_TYPE_EP11_ECC = (__u32) 7,
+ PKEY_TYPE_CCA_DATA = (__u32)1,
+ PKEY_TYPE_CCA_CIPHER = (__u32)2,
+ PKEY_TYPE_EP11 = (__u32)3,
+ PKEY_TYPE_CCA_ECC = (__u32)0x1f,
+ PKEY_TYPE_EP11_AES = (__u32)6,
+ PKEY_TYPE_EP11_ECC = (__u32)7,
+ PKEY_TYPE_PROTKEY = (__u32)8,
+ PKEY_TYPE_UVSECRET = (__u32)9,
};
/* the newer ioctls use a pkey_key_size enum for key size information */
enum pkey_key_size {
- PKEY_SIZE_AES_128 = (__u32) 128,
- PKEY_SIZE_AES_192 = (__u32) 192,
- PKEY_SIZE_AES_256 = (__u32) 256,
- PKEY_SIZE_UNKNOWN = (__u32) 0xFFFFFFFF,
+ PKEY_SIZE_AES_128 = (__u32)128,
+ PKEY_SIZE_AES_192 = (__u32)192,
+ PKEY_SIZE_AES_256 = (__u32)256,
+ PKEY_SIZE_UNKNOWN = (__u32)0xFFFFFFFF,
};
/* some of the newer ioctls use these flags */
@@ -120,6 +126,7 @@ struct pkey_genseck {
__u32 keytype; /* in: key type to generate */
struct pkey_seckey seckey; /* out: the secure key blob */
};
+
#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
/*
@@ -132,6 +139,7 @@ struct pkey_clr2seck {
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_seckey seckey; /* out: the secure key blob */
};
+
#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
/*
@@ -143,6 +151,7 @@ struct pkey_sec2protk {
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
/*
@@ -153,6 +162,7 @@ struct pkey_clr2protk {
struct pkey_clrkey clrkey; /* in: the clear key value */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
/*
@@ -164,6 +174,7 @@ struct pkey_findcard {
__u16 cardnr; /* out: card number */
__u16 domain; /* out: domain number */
};
+
#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
/*
@@ -173,6 +184,7 @@ struct pkey_skey2pkey {
struct pkey_seckey seckey; /* in: the secure key blob */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
/*
@@ -190,6 +202,7 @@ struct pkey_verifykey {
__u16 keysize; /* out: key size in bits */
__u32 attributes; /* out: attribute bits */
};
+
#define PKEY_VERIFYKEY _IOWR(PKEY_IOCTL_MAGIC, 0x07, struct pkey_verifykey)
#define PKEY_VERIFY_ATTR_AES 0x00000001 /* key is an AES key */
#define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */
@@ -221,6 +234,7 @@ struct pkey_kblob2pkey {
__u32 keylen; /* in: the key blob length */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey)
/*
@@ -253,6 +267,7 @@ struct pkey_genseck2 {
__u32 keylen; /* in: available key blob buffer size */
/* out: actual key blob size */
};
+
#define PKEY_GENSECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x11, struct pkey_genseck2)
/*
@@ -287,6 +302,7 @@ struct pkey_clr2seck2 {
__u32 keylen; /* in: available key blob buffer size */
/* out: actual key blob size */
};
+
#define PKEY_CLR2SECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x12, struct pkey_clr2seck2)
/*
@@ -324,6 +340,7 @@ struct pkey_verifykey2 {
enum pkey_key_size size; /* out: the key size */
__u32 flags; /* out: additional key info flags */
};
+
#define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2)
/*
@@ -346,6 +363,7 @@ struct pkey_kblob2pkey2 {
__u32 apqn_entries; /* in: # of apqn target list entries */
struct pkey_protkey protkey; /* out: the protected key */
};
+
#define PKEY_KBLOB2PROTK2 _IOWR(PKEY_IOCTL_MAGIC, 0x1A, struct pkey_kblob2pkey2)
/*
@@ -382,6 +400,7 @@ struct pkey_apqns4key {
__u32 apqn_entries; /* in: max # of apqn entries in the list */
/* out: # apqns stored into the list */
};
+
#define PKEY_APQNS4K _IOWR(PKEY_IOCTL_MAGIC, 0x1B, struct pkey_apqns4key)
/*
@@ -421,6 +440,7 @@ struct pkey_apqns4keytype {
__u32 apqn_entries; /* in: max # of apqn entries in the list */
/* out: # apqns stored into the list */
};
+
#define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype)
/*
@@ -447,6 +467,7 @@ struct pkey_kblob2pkey3 {
__u32 pkeylen; /* in/out: size of pkey buffer/actual len of pkey */
__u8 __user *pkey; /* in: pkey blob buffer space ptr */
};
+
#define PKEY_KBLOB2PROTK3 _IOWR(PKEY_IOCTL_MAGIC, 0x1D, struct pkey_kblob2pkey3)
#endif /* _UAPI_PKEY_H */
diff --git a/arch/s390/include/uapi/asm/uvdevice.h b/arch/s390/include/uapi/asm/uvdevice.h
index b9c2f14a6af3..4947f26ad9fb 100644
--- a/arch/s390/include/uapi/asm/uvdevice.h
+++ b/arch/s390/include/uapi/asm/uvdevice.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright IBM Corp. 2022
+ * Copyright IBM Corp. 2022, 2024
* Author(s): Steffen Eiden <seiden@linux.ibm.com>
*/
#ifndef __S390_ASM_UVDEVICE_H
@@ -52,7 +52,7 @@ struct uvio_uvdev_info {
__u64 supp_uvio_cmds;
/*
* If bit `n` is set, the Ultravisor(UV) supports the UV-call
- * corresponding to the IOCTL with nr `n` in the calling contextx (host
+ * corresponding to the IOCTL with nr `n` in the calling context (host
* or guest). The value is only valid if the corresponding bit in
* @supp_uvio_cmds is set as well.
*/
@@ -71,6 +71,7 @@ struct uvio_uvdev_info {
#define UVIO_ATT_ADDITIONAL_MAX_LEN 0x8000
#define UVIO_ADD_SECRET_MAX_LEN 0x100000
#define UVIO_LIST_SECRETS_LEN 0x1000
+#define UVIO_RETR_SECRET_MAX_LEN 0x2000
#define UVIO_DEVICE_NAME "uv"
#define UVIO_TYPE_UVC 'u'
@@ -81,22 +82,25 @@ enum UVIO_IOCTL_NR {
UVIO_IOCTL_ADD_SECRET_NR,
UVIO_IOCTL_LIST_SECRETS_NR,
UVIO_IOCTL_LOCK_SECRETS_NR,
+ UVIO_IOCTL_RETR_SECRET_NR,
/* must be the last entry */
UVIO_IOCTL_NUM_IOCTLS
};
-#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
-#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
-#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
-#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
-#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
-#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb)
+#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR)
+#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR)
+#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_IOCTL_RETR_SECRET UVIO_IOCTL(UVIO_IOCTL_RETR_SECRET_NR)
-#define UVIO_SUPP_CALL(nr) (1ULL << (nr))
-#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
-#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
-#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
-#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
-#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_SUPP_CALL(nr) (1ULL << (nr))
+#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR)
+#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR)
+#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR)
+#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR)
+#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR)
+#define UVIO_SUPP_RETR_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_RETR_SECRET_NR)
#endif /* __S390_ASM_UVDEVICE_H */