diff options
Diffstat (limited to 'arch/s390/include')
148 files changed, 4583 insertions, 3287 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 4b904110d27c..297bf7157968 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -7,3 +7,4 @@ generated-y += unistd_nr.h generic-y += asm-offsets.h generic-y += kvm_types.h generic-y += mcs_spinlock.h +generic-y += mmzone.h diff --git a/arch/s390/include/asm/abs_lowcore.h b/arch/s390/include/asm/abs_lowcore.h index 6f264b79e377..317c07c09ae4 100644 --- a/arch/s390/include/asm/abs_lowcore.h +++ b/arch/s390/include/asm/abs_lowcore.h @@ -2,6 +2,7 @@ #ifndef _ASM_S390_ABS_LOWCORE_H #define _ASM_S390_ABS_LOWCORE_H +#include <linux/smp.h> #include <asm/lowcore.h> #define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore)) diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h deleted file mode 100644 index 608f6287ca9c..000000000000 --- a/arch/s390/include/asm/alternative-asm.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_S390_ALTERNATIVE_ASM_H -#define _ASM_S390_ALTERNATIVE_ASM_H - -#ifdef __ASSEMBLY__ - -/* - * Issue one struct alt_instr descriptor entry (need to put it into - * the section .altinstructions, see below). This entry contains - * enough information for the alternatives patching code to patch an - * instruction. See apply_alternatives(). - */ -.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature - .long \orig_start - . - .long \alt_start - . - .word \feature - .byte \orig_end - \orig_start - .org . - ( \orig_end - \orig_start ) & 1 - .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start ) - .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start ) -.endm - -/* - * Define an alternative between two instructions. If @feature is - * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. - */ -.macro ALTERNATIVE oldinstr, newinstr, feature - .pushsection .altinstr_replacement,"ax" -770: \newinstr -771: .popsection -772: \oldinstr -773: .pushsection .altinstructions,"a" - alt_entry 772b, 773b, 770b, 771b, \feature - .popsection -.endm - -/* - * Define an alternative between two instructions. If @feature is - * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. - */ -.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 - .pushsection .altinstr_replacement,"ax" -770: \newinstr1 -771: \newinstr2 -772: .popsection -773: \oldinstr -774: .pushsection .altinstructions,"a" - alt_entry 773b, 774b, 770b, 771b,\feature1 - alt_entry 773b, 774b, 771b, 772b,\feature2 - .popsection -.endm - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_S390_ALTERNATIVE_ASM_H */ diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h index dd93b92c3ab6..1c56480def9e 100644 --- a/arch/s390/include/asm/alternative.h +++ b/arch/s390/include/asm/alternative.h @@ -2,7 +2,56 @@ #ifndef _ASM_S390_ALTERNATIVE_H #define _ASM_S390_ALTERNATIVE_H -#ifndef __ASSEMBLY__ +/* + * Each alternative comes with a 32 bit feature field: + * union { + * u32 feature; + * struct { + * u32 ctx : 4; + * u32 type : 8; + * u32 data : 20; + * }; + * } + * + * @ctx is a bitfield, where only one bit must be set. Each bit defines + * in which context an alternative is supposed to be applied to the + * kernel image: + * + * - from the decompressor before the kernel itself is executed + * - from early kernel code from within the kernel + * + * @type is a number which defines the type and with that the type + * specific alternative patching. + * + * @data is additional type specific information which defines if an + * alternative should be applied. + */ + +#define ALT_CTX_EARLY 1 +#define ALT_CTX_LATE 2 +#define ALT_CTX_ALL (ALT_CTX_EARLY | ALT_CTX_LATE) + +#define ALT_TYPE_FACILITY 0 +#define ALT_TYPE_FEATURE 1 +#define ALT_TYPE_SPEC 2 + +#define ALT_DATA_SHIFT 0 +#define ALT_TYPE_SHIFT 20 +#define ALT_CTX_SHIFT 28 + +#define ALT_FACILITY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \ + ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \ + (facility) << ALT_DATA_SHIFT) + +#define ALT_FEATURE(feature) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \ + ALT_TYPE_FEATURE << ALT_TYPE_SHIFT | \ + (feature) << ALT_DATA_SHIFT) + +#define ALT_SPEC(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \ + ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \ + (facility) << ALT_DATA_SHIFT) + +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/stddef.h> @@ -11,12 +60,30 @@ struct alt_instr { s32 instr_offset; /* original instruction */ s32 repl_offset; /* offset to replacement instruction */ - u16 facility; /* facility bit set for replacement */ + union { + u32 feature; /* feature required for replacement */ + struct { + u32 ctx : 4; /* context */ + u32 type : 8; /* type of alternative */ + u32 data : 20; /* patching information */ + }; + }; u8 instrlen; /* length of original instruction */ } __packed; -void apply_alternative_instructions(void); -void apply_alternatives(struct alt_instr *start, struct alt_instr *end); +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; + +void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx); + +static inline void apply_alternative_instructions(void) +{ + __apply_alternatives(__alt_instructions, __alt_instructions_end, ALT_CTX_LATE); +} + +static inline void apply_alternatives(struct alt_instr *start, struct alt_instr *end) +{ + __apply_alternatives(start, end, ALT_CTX_ALL); +} /* * +---------------------------------+ @@ -48,10 +115,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); #define OLDINSTR(oldinstr) \ "661:\n\t" oldinstr "\n662:\n" -#define ALTINSTR_ENTRY(facility, num) \ +#define ALTINSTR_ENTRY(feature, num) \ "\t.long 661b - .\n" /* old instruction */ \ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \ - "\t.word " __stringify(facility) "\n" /* facility bit */ \ + "\t.long " __stringify(feature) "\n" /* feature */ \ "\t.byte " oldinstr_len "\n" /* instruction len */ \ "\t.org . - (" oldinstr_len ") & 1\n" \ "\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \ @@ -61,24 +128,24 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" /* alternative assembly primitive: */ -#define ALTERNATIVE(oldinstr, altinstr, facility) \ +#define ALTERNATIVE(oldinstr, altinstr, feature) \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(altinstr, 1) \ ".popsection\n" \ OLDINSTR(oldinstr) \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(facility, 1) \ + ALTINSTR_ENTRY(feature, 1) \ ".popsection\n" -#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\ +#define ALTERNATIVE_2(oldinstr, altinstr1, feature1, altinstr2, feature2)\ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(altinstr1, 1) \ ALTINSTR_REPLACEMENT(altinstr2, 2) \ ".popsection\n" \ OLDINSTR(oldinstr) \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(facility1, 1) \ - ALTINSTR_ENTRY(facility2, 2) \ + ALTINSTR_ENTRY(feature1, 1) \ + ALTINSTR_ENTRY(feature2, 2) \ ".popsection\n" /* @@ -93,12 +160,12 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); * For non barrier like inlines please define new variants * without volatile and memory clobber. */ -#define alternative(oldinstr, altinstr, facility) \ - asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory") +#define alternative(oldinstr, altinstr, feature) \ + asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) : : : "memory") -#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \ - asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ - altinstr2, facility2) ::: "memory") +#define alternative_2(oldinstr, altinstr1, feature1, altinstr2, feature2) \ + asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, feature1, \ + altinstr2, feature2) ::: "memory") /* Alternative inline assembly with input. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ @@ -106,8 +173,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); : : input) /* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, altinstr, facility, output, input...) \ - asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) \ +#define alternative_io(oldinstr, altinstr, feature, output, input...) \ + asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) \ : output : input) /* Use this macro if more than one output parameter is needed. */ @@ -116,6 +183,56 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); /* Use this macro if clobbers are needed without inputs. */ #define ASM_NO_INPUT_CLOBBER(clobber...) : clobber -#endif /* __ASSEMBLY__ */ +#else /* __ASSEMBLER__ */ + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature + .long \orig_start - . + .long \alt_start - . + .long \feature + .byte \orig_end - \orig_start + .org . - ( \orig_end - \orig_start ) & 1 + .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start ) + .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start ) +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature + .pushsection .altinstr_replacement,"ax" +770: \newinstr +771: .popsection +772: \oldinstr +773: .pushsection .altinstructions,"a" + alt_entry 772b, 773b, 770b, 771b, \feature + .popsection +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 + .pushsection .altinstr_replacement,"ax" +770: \newinstr1 +771: \newinstr2 +772: .popsection +773: \oldinstr +774: .pushsection .altinstructions,"a" + alt_entry 773b, 774b, 770b, 771b,\feature1 + alt_entry 773b, 774b, 771b, 772b,\feature2 + .popsection +.endm + +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_ALTERNATIVE_H */ diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index 395b02d6a133..b24459f692fa 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -38,16 +38,30 @@ typedef unsigned int ap_qid_t; * The ap queue status word is returned by all three AP functions * (PQAP, NQAP and DQAP). There's a set of flags in the first * byte, followed by a 1 byte response code. + * + * For convenience the 'value' field is a 32 bit access of the + * whole status and the 'status_bits' and 'rc' fields comprise + * the leftmost 8 status bits and the response_code. */ struct ap_queue_status { - unsigned int queue_empty : 1; - unsigned int replies_waiting : 1; - unsigned int queue_full : 1; - unsigned int : 3; - unsigned int async : 1; - unsigned int irq_enabled : 1; - unsigned int response_code : 8; - unsigned int : 16; + union { + unsigned int value : 32; + struct { + unsigned int status_bits : 8; + unsigned int rc : 8; + unsigned int : 16; + }; + struct { + unsigned int queue_empty : 1; + unsigned int replies_waiting : 1; + unsigned int queue_full : 1; + unsigned int : 3; + unsigned int async : 1; + unsigned int irq_enabled : 1; + unsigned int response_code : 8; + unsigned int : 16; + }; + }; }; /* @@ -103,7 +117,7 @@ struct ap_tapq_hwinfo { unsigned int accel : 1; /* A */ unsigned int ep11 : 1; /* X */ unsigned int apxa : 1; /* APXA */ - unsigned int : 1; + unsigned int slcf : 1; /* Cmd filtering avail. */ unsigned int class : 8; unsigned int bs : 2; /* SE bind/assoc */ unsigned int : 14; @@ -143,7 +157,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, " lghi 2,0\n" /* 0 into gr2 */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - " lgr %[reg2],2\n" /* gr2 into reg2 */ + " lgr %[reg2],2" /* gr2 into reg2 */ : [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2) : [qid] "d" (qid) : "cc", "0", "1", "2"); @@ -186,7 +200,7 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit) asm volatile( " lgr 0,%[reg0]\n" /* qid arg into gr0 */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg1],1" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1.value) : [reg0] "d" (reg0) : "cc", "0", "1"); @@ -211,7 +225,7 @@ static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit) asm volatile( " lgr 0,%[reg0]\n" /* qid arg into gr0 */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg1],1" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1.value) : [reg0] "d" (reg0) : "cc", "0", "1"); @@ -315,7 +329,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */ " lgr 2,%[reg2]\n" /* ni addr into gr2 */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg1],1" /* gr1 (status) into reg1 */ : [reg1] "+&d" (reg1.value) : [reg0] "d" (reg0), [reg2] "d" (reg2) : "cc", "memory", "0", "1", "2"); @@ -363,7 +377,7 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, " lgr 1,%[reg1]\n" /* qact in info into gr1 */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - " lgr %[reg2],2\n" /* qact out info into reg2 */ + " lgr %[reg2],2" /* qact out info into reg2 */ : [reg1] "+&d" (reg1.value), [reg2] "=&d" (reg2) : [reg0] "d" (reg0) : "cc", "0", "1", "2"); @@ -388,7 +402,7 @@ static inline struct ap_queue_status ap_bapq(ap_qid_t qid) asm volatile( " lgr 0,%[reg0]\n" /* qid arg into gr0 */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg1],1" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1.value) : [reg0] "d" (reg0) : "cc", "0", "1"); @@ -416,7 +430,7 @@ static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx) " lgr 0,%[reg0]\n" /* qid arg into gr0 */ " lgr 2,%[reg2]\n" /* secret index into gr2 */ " .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg1],1" /* gr1 (status) into reg1 */ : [reg1] "=&d" (reg1.value) : [reg0] "d" (reg0), [reg2] "d" (reg2) : "cc", "0", "1", "2"); @@ -453,7 +467,7 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid, " lgr 0,%[reg0]\n" /* qid param in gr0 */ "0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n" " brc 2,0b\n" /* handle partial completion */ - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ + " lgr %[reg1],1" /* gr1 (status) into reg1 */ : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), [nqap_r2] "+&d" (nqap_r2.pair) : [nqap_r1] "d" (nqap_r1.pair) @@ -518,7 +532,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid, " brc 6,0b\n" /* handle partial complete */ "2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - " lgr %[reg2],2\n" /* gr2 (res length) into reg2 */ + " lgr %[reg2],2" /* gr2 (res length) into reg2 */ : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2), [rp1] "+&d" (rp1.pair), [rp2] "+&d" (rp2.pair) diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h index a92ebbc7aa7a..99b2902c10fd 100644 --- a/arch/s390/include/asm/appldata.h +++ b/arch/s390/include/asm/appldata.h @@ -9,6 +9,7 @@ #define _ASM_S390_APPLDATA_H #include <linux/io.h> +#include <asm/machine.h> #include <asm/diag.h> #define APPLDATA_START_INTERVAL_REC 0x80 @@ -48,7 +49,7 @@ static inline int appldata_asm(struct appldata_parameter_list *parm_list, { int ry; - if (!MACHINE_IS_VM) + if (!machine_is_vm()) return -EOPNOTSUPP; parm_list->diag = 0xdc; parm_list->function = fn; diff --git a/arch/s390/include/asm/arch-stackprotector.h b/arch/s390/include/asm/arch-stackprotector.h new file mode 100644 index 000000000000..953627259e91 --- /dev/null +++ b/arch/s390/include/asm/arch-stackprotector.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_ARCH_STACKPROTECTOR_H +#define _ASM_S390_ARCH_STACKPROTECTOR_H + +extern unsigned long __stack_chk_guard; +extern int stack_protector_debug; + +void __stack_protector_apply_early(unsigned long kernel_start); +int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start); + +static inline void stack_protector_apply_early(unsigned long kernel_start) +{ + if (IS_ENABLED(CONFIG_STACKPROTECTOR)) + __stack_protector_apply_early(kernel_start); +} + +static inline int stack_protector_apply(unsigned long *start, unsigned long *end) +{ + if (IS_ENABLED(CONFIG_STACKPROTECTOR)) + return __stack_protector_apply(start, end, 0); + return 0; +} + +#endif /* _ASM_S390_ARCH_STACKPROTECTOR_H */ diff --git a/arch/s390/include/asm/arch_hweight.h b/arch/s390/include/asm/arch_hweight.h new file mode 100644 index 000000000000..aca08b0acbc1 --- /dev/null +++ b/arch/s390/include/asm/arch_hweight.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_ARCH_HWEIGHT_H +#define _ASM_S390_ARCH_HWEIGHT_H + +#include <linux/types.h> +#include <asm/march.h> + +static __always_inline unsigned long popcnt_z196(unsigned long w) +{ + unsigned long cnt; + + asm volatile(".insn rrf,0xb9e10000,%[cnt],%[w],0,0" + : [cnt] "=d" (cnt) + : [w] "d" (w) + : "cc"); + return cnt; +} + +static __always_inline unsigned long popcnt_z15(unsigned long w) +{ + unsigned long cnt; + + asm volatile(".insn rrf,0xb9e10000,%[cnt],%[w],8,0" + : [cnt] "=d" (cnt) + : [w] "d" (w) + : "cc"); + return cnt; +} + +static __always_inline unsigned long __arch_hweight64(__u64 w) +{ + if (__is_defined(MARCH_HAS_Z15_FEATURES)) + return popcnt_z15(w); + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { + w = popcnt_z196(w); + w += w >> 32; + w += w >> 16; + w += w >> 8; + return w & 0xff; + } + return __sw_hweight64(w); +} + +static __always_inline unsigned int __arch_hweight32(unsigned int w) +{ + if (__is_defined(MARCH_HAS_Z15_FEATURES)) + return popcnt_z15(w); + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { + w = popcnt_z196(w); + w += w >> 16; + w += w >> 8; + return w & 0xff; + } + return __sw_hweight32(w); +} + +static __always_inline unsigned int __arch_hweight16(unsigned int w) +{ + if (__is_defined(MARCH_HAS_Z15_FEATURES)) + return popcnt_z15((unsigned short)w); + if (__is_defined(MARCH_HAS_Z196_FEATURES)) { + w = popcnt_z196(w); + w += w >> 8; + return w & 0xff; + } + return __sw_hweight16(w); +} + +static __always_inline unsigned int __arch_hweight8(unsigned int w) +{ + if (__is_defined(MARCH_HAS_Z196_FEATURES)) + return popcnt_z196((unsigned char)w); + return __sw_hweight8(w); +} + +#endif /* _ASM_S390_ARCH_HWEIGHT_H */ diff --git a/arch/s390/include/asm/asce.h b/arch/s390/include/asm/asce.h new file mode 100644 index 000000000000..f6dfaaba735a --- /dev/null +++ b/arch/s390/include/asm/asce.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_ASCE_H +#define _ASM_S390_ASCE_H + +#include <linux/thread_info.h> +#include <linux/irqflags.h> +#include <asm/lowcore.h> +#include <asm/ctlreg.h> + +static inline bool enable_sacf_uaccess(void) +{ + unsigned long flags; + + if (test_thread_flag(TIF_ASCE_PRIMARY)) + return true; + local_irq_save(flags); + local_ctl_load(1, &get_lowcore()->kernel_asce); + set_thread_flag(TIF_ASCE_PRIMARY); + local_irq_restore(flags); + return false; +} + +static inline void disable_sacf_uaccess(bool previous) +{ + unsigned long flags; + + if (previous) + return; + local_irq_save(flags); + local_ctl_load(1, &get_lowcore()->user_asce); + clear_thread_flag(TIF_ASCE_PRIMARY); + local_irq_restore(flags); +} + +#endif /* _ASM_S390_ASCE_H */ diff --git a/arch/s390/include/asm/asm-const.h b/arch/s390/include/asm/asm-const.h index 11f615eb0066..1cfffad9eea0 100644 --- a/arch/s390/include/asm/asm-const.h +++ b/arch/s390/include/asm/asm-const.h @@ -2,7 +2,7 @@ #ifndef _ASM_S390_ASM_CONST_H #define _ASM_S390_ASM_CONST_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ # define stringify_in_c(...) __VA_ARGS__ #else /* This version of stringify will deal with commas... */ diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h index 4a6b0a8b6412..d23ea0c94e4e 100644 --- a/arch/s390/include/asm/asm-extable.h +++ b/arch/s390/include/asm/asm-extable.h @@ -9,11 +9,13 @@ #define EX_TYPE_NONE 0 #define EX_TYPE_FIXUP 1 #define EX_TYPE_BPF 2 -#define EX_TYPE_UA_STORE 3 -#define EX_TYPE_UA_LOAD_MEM 4 +#define EX_TYPE_UA_FAULT 3 #define EX_TYPE_UA_LOAD_REG 5 #define EX_TYPE_UA_LOAD_REGPAIR 6 #define EX_TYPE_ZEROPAD 7 +#define EX_TYPE_FPC 8 +#define EX_TYPE_UA_MVCOS_TO 9 +#define EX_TYPE_UA_MVCOS_FROM 10 #define EX_DATA_REG_ERR_SHIFT 0 #define EX_DATA_REG_ERR GENMASK(3, 0) @@ -69,11 +71,8 @@ #define EX_TABLE_AMODE31(_fault, _target) \ __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0) -#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \ - __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0) - -#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \ - __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len) +#define EX_TABLE_UA_FAULT(_fault, _target, _regerr) \ + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_FAULT, _regerr, _regerr, 0) #define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0) @@ -84,4 +83,13 @@ #define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0) +#define EX_TABLE_FPC(_fault, _target) \ + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0) + +#define EX_TABLE_UA_MVCOS_TO(_fault, _target) \ + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_TO, __stringify(%%r0), __stringify(%%r0), 0) + +#define EX_TABLE_UA_MVCOS_FROM(_fault, _target) \ + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_FROM, __stringify(%%r0), __stringify(%%r0), 0) + #endif /* __ASM_EXTABLE_H */ diff --git a/arch/s390/include/asm/asm.h b/arch/s390/include/asm/asm.h new file mode 100644 index 000000000000..e9062b01e2a2 --- /dev/null +++ b/arch/s390/include/asm/asm.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ASM_H +#define _ASM_S390_ASM_H + +#include <linux/stringify.h> + +/* + * Helper macros to be used for flag output operand handling. + * Inline assemblies must use four of the five supplied macros: + * + * Use CC_IPM(sym) at the end of the inline assembly; this extracts the + * condition code and program mask with the ipm instruction and writes it to + * the variable with symbolic name [sym] if the compiler has no support for + * flag output operands. If the compiler has support for flag output operands + * this generates no code. + * + * Use CC_OUT(sym, var) at the output operand list of an inline assembly. This + * defines an output operand with symbolic name [sym] for the variable + * [var]. [var] must be an int variable and [sym] must be identical with [sym] + * used with CC_IPM(). + * + * Use either CC_CLOBBER or CC_CLOBBER_LIST() for the clobber list. Use + * CC_CLOBBER if the clobber list contains only "cc", otherwise use + * CC_CLOBBER_LIST() and add all clobbers as argument to the macro. + * + * Use CC_TRANSFORM() to convert the variable [var] which contains the + * extracted condition code. If the condition code is extracted with ipm, the + * [var] also contains the program mask. CC_TRANSFORM() moves the condition + * code to the two least significant bits and sets all other bits to zero. + */ +#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_ASM_FLAG_OUTPUT_BROKEN)) + +#define __HAVE_ASM_FLAG_OUTPUTS__ + +#define CC_IPM(sym) +#define CC_OUT(sym, var) "=@cc" (var) +#define CC_TRANSFORM(cc) ({ cc; }) +#define CC_CLOBBER +#define CC_CLOBBER_LIST(...) __VA_ARGS__ + +#else + +#define CC_IPM(sym) " ipm %[" __stringify(sym) "]\n" +#define CC_OUT(sym, var) [sym] "=d" (var) +#define CC_TRANSFORM(cc) ({ (cc) >> 28; }) +#define CC_CLOBBER "cc" +#define CC_CLOBBER_LIST(...) "cc", __VA_ARGS__ + +#endif + +#endif /* _ASM_S390_ASM_H */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 0c4cad7d5a5b..b36dd6a1d652 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -17,13 +17,13 @@ static __always_inline int arch_atomic_read(const atomic_t *v) { - return __atomic_read(v); + return __atomic_read(&v->counter); } #define arch_atomic_read arch_atomic_read static __always_inline void arch_atomic_set(atomic_t *v, int i) { - __atomic_set(v, i); + __atomic_set(&v->counter, i); } #define arch_atomic_set arch_atomic_set @@ -45,6 +45,36 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v) } #define arch_atomic_add arch_atomic_add +static __always_inline void arch_atomic_inc(atomic_t *v) +{ + __atomic_add_const(1, &v->counter); +} +#define arch_atomic_inc arch_atomic_inc + +static __always_inline void arch_atomic_dec(atomic_t *v) +{ + __atomic_add_const(-1, &v->counter); +} +#define arch_atomic_dec arch_atomic_dec + +static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) +{ + return __atomic_add_and_test_barrier(-i, &v->counter); +} +#define arch_atomic_sub_and_test arch_atomic_sub_and_test + +static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) +{ + return __atomic_add_const_and_test_barrier(-1, &v->counter); +} +#define arch_atomic_dec_and_test arch_atomic_dec_and_test + +static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) +{ + return __atomic_add_const_and_test_barrier(1, &v->counter); +} +#define arch_atomic_inc_and_test arch_atomic_inc_and_test + #define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v) #define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v) #define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v) @@ -72,25 +102,35 @@ ATOMIC_OPS(xor) #define arch_atomic_fetch_or arch_atomic_fetch_or #define arch_atomic_fetch_xor arch_atomic_fetch_xor -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) +static __always_inline int arch_atomic_xchg(atomic_t *v, int new) +{ + return arch_xchg(&v->counter, new); +} +#define arch_atomic_xchg arch_atomic_xchg static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { - return __atomic_cmpxchg(&v->counter, old, new); + return arch_cmpxchg(&v->counter, old, new); } #define arch_atomic_cmpxchg arch_atomic_cmpxchg +static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + return arch_try_cmpxchg(&v->counter, old, new); +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg + #define ATOMIC64_INIT(i) { (i) } static __always_inline s64 arch_atomic64_read(const atomic64_t *v) { - return __atomic64_read(v); + return __atomic64_read((long *)&v->counter); } #define arch_atomic64_read arch_atomic64_read static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i) { - __atomic64_set(v, i); + __atomic64_set((long *)&v->counter, i); } #define arch_atomic64_set arch_atomic64_set @@ -112,14 +152,54 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v) } #define arch_atomic64_add arch_atomic64_add -#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) +static __always_inline void arch_atomic64_inc(atomic64_t *v) +{ + __atomic64_add_const(1, (long *)&v->counter); +} +#define arch_atomic64_inc arch_atomic64_inc + +static __always_inline void arch_atomic64_dec(atomic64_t *v) +{ + __atomic64_add_const(-1, (long *)&v->counter); +} +#define arch_atomic64_dec arch_atomic64_dec + +static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return __atomic64_add_and_test_barrier(-i, (long *)&v->counter); +} +#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test + +static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v) +{ + return __atomic64_add_const_and_test_barrier(-1, (long *)&v->counter); +} +#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test + +static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v) +{ + return __atomic64_add_const_and_test_barrier(1, (long *)&v->counter); +} +#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test + +static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new) +{ + return arch_xchg(&v->counter, new); +} +#define arch_atomic64_xchg arch_atomic64_xchg static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { - return __atomic64_cmpxchg((long *)&v->counter, old, new); + return arch_cmpxchg(&v->counter, old, new); } #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg +static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + return arch_try_cmpxchg(&v->counter, old, new); +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg + #define ATOMIC64_OPS(op) \ static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ { \ diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h index 7fa5f96a553a..845b77864412 100644 --- a/arch/s390/include/asm/atomic_ops.h +++ b/arch/s390/include/asm/atomic_ops.h @@ -8,41 +8,57 @@ #ifndef __ARCH_S390_ATOMIC_OPS__ #define __ARCH_S390_ATOMIC_OPS__ -static __always_inline int __atomic_read(const atomic_t *v) +#include <linux/limits.h> +#include <asm/march.h> +#include <asm/asm.h> + +static __always_inline int __atomic_read(const int *ptr) { - int c; + int val; asm volatile( - " l %0,%1\n" - : "=d" (c) : "R" (v->counter)); - return c; + " l %[val],%[ptr]" + : [val] "=d" (val) : [ptr] "R" (*ptr)); + return val; } -static __always_inline void __atomic_set(atomic_t *v, int i) +static __always_inline void __atomic_set(int *ptr, int val) { - asm volatile( - " st %1,%0\n" - : "=R" (v->counter) : "d" (i)); + if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) { + asm volatile( + " mvhi %[ptr],%[val]" + : [ptr] "=Q" (*ptr) : [val] "K" (val)); + } else { + asm volatile( + " st %[val],%[ptr]" + : [ptr] "=R" (*ptr) : [val] "d" (val)); + } } -static __always_inline s64 __atomic64_read(const atomic64_t *v) +static __always_inline long __atomic64_read(const long *ptr) { - s64 c; + long val; asm volatile( - " lg %0,%1\n" - : "=d" (c) : "RT" (v->counter)); - return c; + " lg %[val],%[ptr]" + : [val] "=d" (val) : [ptr] "RT" (*ptr)); + return val; } -static __always_inline void __atomic64_set(atomic64_t *v, s64 i) +static __always_inline void __atomic64_set(long *ptr, long val) { - asm volatile( - " stg %1,%0\n" - : "=RT" (v->counter) : "d" (i)); + if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) { + asm volatile( + " mvghi %[ptr],%[val]" + : [ptr] "=Q" (*ptr) : [val] "K" (val)); + } else { + asm volatile( + " stg %[val],%[ptr]" + : [ptr] "=RT" (*ptr) : [val] "d" (val)); + } } -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifdef MARCH_HAS_Z196_FEATURES #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ static __always_inline op_type op_name(op_type val, op_type *ptr) \ @@ -50,7 +66,7 @@ static __always_inline op_type op_name(op_type val, op_type *ptr) \ op_type old; \ \ asm volatile( \ - op_string " %[old],%[val],%[ptr]\n" \ + op_string " %[old],%[val],%[ptr]" \ op_barrier \ : [old] "=d" (old), [ptr] "+QS" (*ptr) \ : [val] "d" (val) : "cc", "memory"); \ @@ -58,8 +74,8 @@ static __always_inline op_type op_name(op_type val, op_type *ptr) \ } \ #define __ATOMIC_OPS(op_name, op_type, op_string) \ - __ATOMIC_OP(op_name, op_type, op_string, "\n") \ - __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") + __ATOMIC_OP(op_name, op_type, op_string, "") \ + __ATOMIC_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") __ATOMIC_OPS(__atomic_add, int, "laa") __ATOMIC_OPS(__atomic_and, int, "lan") @@ -78,14 +94,14 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg") static __always_inline void op_name(op_type val, op_type *ptr) \ { \ asm volatile( \ - op_string " %[ptr],%[val]\n" \ + op_string " %[ptr],%[val]" \ op_barrier \ : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\ } #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \ - __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \ - __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") + __ATOMIC_CONST_OP(op_name, op_type, op_string, "") \ + __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi") __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi") @@ -93,7 +109,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi") #undef __ATOMIC_CONST_OPS #undef __ATOMIC_CONST_OP -#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#else /* MARCH_HAS_Z196_FEATURES */ #define __ATOMIC_OP(op_name, op_string) \ static __always_inline int op_name(int val, int *ptr) \ @@ -147,55 +163,83 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr") #undef __ATOMIC64_OPS -#define __atomic_add_const(val, ptr) __atomic_add(val, ptr) -#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr) -#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr) -#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr) +#define __atomic_add_const(val, ptr) ((void)__atomic_add(val, ptr)) +#define __atomic_add_const_barrier(val, ptr) ((void)__atomic_add(val, ptr)) +#define __atomic64_add_const(val, ptr) ((void)__atomic64_add(val, ptr)) +#define __atomic64_add_const_barrier(val, ptr) ((void)__atomic64_add(val, ptr)) -#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#endif /* MARCH_HAS_Z196_FEATURES */ -static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new) -{ - asm volatile( - " cs %[old],%[new],%[ptr]" - : [old] "+d" (old), [ptr] "+Q" (*ptr) - : [new] "d" (new) - : "cc", "memory"); - return old; -} +#if defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) -static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new) -{ - int old_expected = old; +#define __ATOMIC_TEST_OP(op_name, op_type, op_string, op_barrier) \ +static __always_inline bool op_name(op_type val, op_type *ptr) \ +{ \ + op_type tmp; \ + int cc; \ + \ + asm volatile( \ + op_string " %[tmp],%[val],%[ptr]" \ + op_barrier \ + : "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \ + : [val] "d" (val) \ + : "memory"); \ + return (cc == 0) || (cc == 2); \ +} \ - asm volatile( - " cs %[old],%[new],%[ptr]" - : [old] "+d" (old), [ptr] "+Q" (*ptr) - : [new] "d" (new) - : "cc", "memory"); - return old == old_expected; -} +#define __ATOMIC_TEST_OPS(op_name, op_type, op_string) \ + __ATOMIC_TEST_OP(op_name, op_type, op_string, "") \ + __ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") -static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new) -{ - asm volatile( - " csg %[old],%[new],%[ptr]" - : [old] "+d" (old), [ptr] "+QS" (*ptr) - : [new] "d" (new) - : "cc", "memory"); - return old; +__ATOMIC_TEST_OPS(__atomic_add_and_test, int, "laal") +__ATOMIC_TEST_OPS(__atomic64_add_and_test, long, "laalg") + +#undef __ATOMIC_TEST_OPS +#undef __ATOMIC_TEST_OP + +#define __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, op_barrier) \ +static __always_inline bool op_name(op_type val, op_type *ptr) \ +{ \ + int cc; \ + \ + asm volatile( \ + op_string " %[ptr],%[val]" \ + op_barrier \ + : "=@cc" (cc), [ptr] "+QS" (*ptr) \ + : [val] "i" (val) \ + : "memory"); \ + return (cc == 0) || (cc == 2); \ } -static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new) -{ - long old_expected = old; +#define __ATOMIC_CONST_TEST_OPS(op_name, op_type, op_string) \ + __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, "") \ + __ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") - asm volatile( - " csg %[old],%[new],%[ptr]" - : [old] "+d" (old), [ptr] "+QS" (*ptr) - : [new] "d" (new) - : "cc", "memory"); - return old == old_expected; +__ATOMIC_CONST_TEST_OPS(__atomic_add_const_and_test, int, "alsi") +__ATOMIC_CONST_TEST_OPS(__atomic64_add_const_and_test, long, "algsi") + +#undef __ATOMIC_CONST_TEST_OPS +#undef __ATOMIC_CONST_TEST_OP + +#else /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */ + +#define __ATOMIC_TEST_OP(op_name, op_func, op_type) \ +static __always_inline bool op_name(op_type val, op_type *ptr) \ +{ \ + return op_func(val, ptr) == -val; \ } +__ATOMIC_TEST_OP(__atomic_add_and_test, __atomic_add, int) +__ATOMIC_TEST_OP(__atomic_add_and_test_barrier, __atomic_add_barrier, int) +__ATOMIC_TEST_OP(__atomic_add_const_and_test, __atomic_add, int) +__ATOMIC_TEST_OP(__atomic_add_const_and_test_barrier, __atomic_add_barrier, int) +__ATOMIC_TEST_OP(__atomic64_add_and_test, __atomic64_add, long) +__ATOMIC_TEST_OP(__atomic64_add_and_test_barrier, __atomic64_add_barrier, long) +__ATOMIC_TEST_OP(__atomic64_add_const_and_test, __atomic64_add, long) +__ATOMIC_TEST_OP(__atomic64_add_const_and_test_barrier, __atomic64_add_barrier, long) + +#undef __ATOMIC_TEST_OP + +#endif /* defined(MARCH_HAS_Z196_FEATURES) && defined(__HAVE_ASM_FLAG_OUTPUTS__) */ + #endif /* __ARCH_S390_ATOMIC_OPS__ */ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 82de2a7c4160..f3184073e754 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -8,17 +8,19 @@ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H +#include <asm/march.h> + /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. */ -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifdef MARCH_HAS_Z196_FEATURES /* Fast-BCR without checkpoint synchronization */ -#define __ASM_BCR_SERIALIZE "bcr 14,0\n" +#define __ASM_BCR_SERIALIZE "bcr 14,0" #else -#define __ASM_BCR_SERIALIZE "bcr 15,0\n" +#define __ASM_BCR_SERIALIZE "bcr 15,0" #endif static __always_inline void bcr_serialize(void) @@ -67,12 +69,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, if (__builtin_constant_p(size) && size > 0) { asm(" clgr %2,%1\n" - " slbgr %0,%0\n" + " slbgr %0,%0" :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); return mask; } asm(" clgr %1,%2\n" - " slbgr %0,%0\n" + " slbgr %0,%0" :"=d" (mask) : "d" (size), "d" (index) :"cc"); return ~mask; } diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index c467dffa8c12..5f10074665b0 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -36,184 +36,45 @@ #include <linux/typecheck.h> #include <linux/compiler.h> #include <linux/types.h> -#include <asm/atomic_ops.h> -#include <asm/barrier.h> - -#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) - -static inline unsigned long * -__bitops_word(unsigned long nr, const volatile unsigned long *ptr) -{ - unsigned long addr; - - addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3); - return (unsigned long *)addr; -} - -static inline unsigned long __bitops_mask(unsigned long nr) -{ - return 1UL << (nr & (BITS_PER_LONG - 1)); -} - -static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr) -{ - unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask = __bitops_mask(nr); - - __atomic64_or(mask, (long *)addr); -} - -static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr) -{ - unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask = __bitops_mask(nr); - - __atomic64_and(~mask, (long *)addr); -} - -static __always_inline void arch_change_bit(unsigned long nr, - volatile unsigned long *ptr) -{ - unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask = __bitops_mask(nr); - - __atomic64_xor(mask, (long *)addr); -} - -static inline bool arch_test_and_set_bit(unsigned long nr, - volatile unsigned long *ptr) -{ - unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask = __bitops_mask(nr); - unsigned long old; - - old = __atomic64_or_barrier(mask, (long *)addr); - return old & mask; -} - -static inline bool arch_test_and_clear_bit(unsigned long nr, - volatile unsigned long *ptr) -{ - unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask = __bitops_mask(nr); - unsigned long old; - - old = __atomic64_and_barrier(~mask, (long *)addr); - return old & mask; -} - -static inline bool arch_test_and_change_bit(unsigned long nr, - volatile unsigned long *ptr) -{ - unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask = __bitops_mask(nr); - unsigned long old; - - old = __atomic64_xor_barrier(mask, (long *)addr); - return old & mask; -} - -static __always_inline void -arch___set_bit(unsigned long nr, volatile unsigned long *addr) -{ - unsigned long *p = __bitops_word(nr, addr); - unsigned long mask = __bitops_mask(nr); - - *p |= mask; -} - -static __always_inline void -arch___clear_bit(unsigned long nr, volatile unsigned long *addr) -{ - unsigned long *p = __bitops_word(nr, addr); - unsigned long mask = __bitops_mask(nr); - - *p &= ~mask; -} - -static __always_inline void -arch___change_bit(unsigned long nr, volatile unsigned long *addr) -{ - unsigned long *p = __bitops_word(nr, addr); - unsigned long mask = __bitops_mask(nr); - - *p ^= mask; -} - -static __always_inline bool -arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) -{ - unsigned long *p = __bitops_word(nr, addr); - unsigned long mask = __bitops_mask(nr); - unsigned long old; - - old = *p; - *p |= mask; - return old & mask; -} - -static __always_inline bool -arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) -{ - unsigned long *p = __bitops_word(nr, addr); - unsigned long mask = __bitops_mask(nr); - unsigned long old; - - old = *p; - *p &= ~mask; - return old & mask; -} - -static __always_inline bool -arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) -{ - unsigned long *p = __bitops_word(nr, addr); - unsigned long mask = __bitops_mask(nr); - unsigned long old; - - old = *p; - *p ^= mask; - return old & mask; -} - -#define arch_test_bit generic_test_bit -#define arch_test_bit_acquire generic_test_bit_acquire - -static inline bool arch_test_and_set_bit_lock(unsigned long nr, - volatile unsigned long *ptr) -{ - if (arch_test_bit(nr, ptr)) - return true; - return arch_test_and_set_bit(nr, ptr); -} - -static inline void arch_clear_bit_unlock(unsigned long nr, - volatile unsigned long *ptr) -{ - smp_mb__before_atomic(); - arch_clear_bit(nr, ptr); -} - -static inline void arch___clear_bit_unlock(unsigned long nr, - volatile unsigned long *ptr) -{ - smp_mb(); - arch___clear_bit(nr, ptr); -} - -static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, - volatile unsigned long *ptr) -{ - unsigned long old; - - old = __atomic64_xor_barrier(mask, (long *)ptr); - return old & BIT(7); +#include <asm/asm.h> + +#define arch___set_bit generic___set_bit +#define arch___clear_bit generic___clear_bit +#define arch___change_bit generic___change_bit +#define arch___test_and_set_bit generic___test_and_set_bit +#define arch___test_and_clear_bit generic___test_and_clear_bit +#define arch___test_and_change_bit generic___test_and_change_bit +#define arch_test_bit_acquire generic_test_bit_acquire + +static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsigned long *ptr) +{ +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + const volatile unsigned char *addr; + unsigned long mask; + int cc; + + /* + * With CONFIG_PROFILE_ALL_BRANCHES enabled gcc fails to + * handle __builtin_constant_p() in some cases. + */ + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && __builtin_constant_p(nr)) { + addr = (const volatile unsigned char *)ptr; + addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE; + mask = 1UL << (nr & (BITS_PER_BYTE - 1)); + asm volatile( + " tm %[addr],%[mask]" + : "=@cc" (cc) + : [addr] "Q" (*addr), [mask] "I" (mask) + ); + return cc == 3; + } +#endif + return generic_test_bit(nr, ptr); } -#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte -#include <asm-generic/bitops/instrumented-atomic.h> -#include <asm-generic/bitops/instrumented-non-atomic.h> -#include <asm-generic/bitops/instrumented-lock.h> +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-instrumented-non-atomic.h> +#include <asm-generic/bitops/lock.h> /* * Functions which use MSB0 bit numbering. @@ -261,6 +122,8 @@ static inline bool test_bit_inv(unsigned long nr, return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); } +#ifndef CONFIG_CC_HAS_BUILTIN_FFS + /** * __flogr - find leftmost one * @word - The word to search @@ -269,11 +132,12 @@ static inline bool test_bit_inv(unsigned long nr, * where the most significant bit has bit number 0. * If no bit is set this function returns 64. */ -static inline unsigned char __flogr(unsigned long word) +static __always_inline __attribute_const__ unsigned long __flogr(unsigned long word) { - if (__builtin_constant_p(word)) { - unsigned long bit = 0; + unsigned long bit; + if (__builtin_constant_p(word)) { + bit = 0; if (!word) return 64; if (!(word & 0xffffffff00000000UL)) { @@ -302,85 +166,49 @@ static inline unsigned char __flogr(unsigned long word) } return bit; } else { - union register_pair rp; + union register_pair rp __uninitialized; rp.even = word; - asm volatile( - " flogr %[rp],%[rp]\n" - : [rp] "+d" (rp.pair) : : "cc"); - return rp.even; + asm("flogr %[rp],%[rp]" + : [rp] "+d" (rp.pair) : : "cc"); + bit = rp.even; + /* + * The result of the flogr instruction is a value in the range + * of 0..64. Let the compiler know that the AND operation can + * be optimized away. + */ + __assume(bit <= 64); + return bit & 127; } } /** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - return __flogr(-word & word) ^ (BITS_PER_LONG - 1); -} - -/** * ffs - find first bit set * @word: the word to search * * This is defined the same way as the libc and * compiler builtin ffs routines (man ffs). */ -static inline int ffs(int word) +static __always_inline __flatten __attribute_const__ int ffs(int word) { - unsigned long mask = 2 * BITS_PER_LONG - 1; unsigned int val = (unsigned int)word; - return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask; + return BITS_PER_LONG - __flogr(-val & val); } -/** - * __fls - find last (most-significant) set bit in a long word - * @word: the word to search - * - * Undefined if no set bit exists, so code should check against 0 first. - */ -static inline unsigned long __fls(unsigned long word) -{ - return __flogr(word) ^ (BITS_PER_LONG - 1); -} +#else /* CONFIG_CC_HAS_BUILTIN_FFS */ -/** - * fls64 - find last set bit in a 64-bit word - * @word: the word to search - * - * This is defined in a similar way as the libc and compiler builtin - * ffsll, but returns the position of the most significant set bit. - * - * fls64(value) returns 0 if value is 0 or the position of the last - * set bit if value is nonzero. The last (most significant) bit is - * at position 64. - */ -static inline int fls64(unsigned long word) -{ - unsigned long mask = 2 * BITS_PER_LONG - 1; - - return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask; -} +#include <asm-generic/bitops/builtin-ffs.h> -/** - * fls - find last (most-significant) bit set - * @word: the word to search - * - * This is defined the same way as ffs. - * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. - */ -static inline int fls(unsigned int word) -{ - return fls64(word); -} +#endif /* CONFIG_CC_HAS_BUILTIN_FFS */ +#include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/ffz.h> -#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/builtin-__fls.h> +#include <asm-generic/bitops/builtin-fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm/arch_hweight.h> +#include <asm-generic/bitops/const_hweight.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h index f7eed27b3220..f55f8227058e 100644 --- a/arch/s390/include/asm/boot_data.h +++ b/arch/s390/include/asm/boot_data.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_S390_BOOT_DATA_H +#include <linux/string.h> #include <asm/setup.h> #include <asm/ipl.h> @@ -15,4 +16,54 @@ extern unsigned long ipl_cert_list_size; extern unsigned long early_ipl_comp_list_addr; extern unsigned long early_ipl_comp_list_size; +extern char boot_rb[PAGE_SIZE * 2]; +extern bool boot_earlyprintk; +extern size_t boot_rb_off; +extern char bootdebug_filter[128]; +extern bool bootdebug; + +#define boot_rb_foreach(cb) \ + do { \ + size_t off = boot_rb_off + strlen(boot_rb + boot_rb_off) + 1; \ + size_t len; \ + for (; off < sizeof(boot_rb) && (len = strlen(boot_rb + off)); off += len + 1) \ + cb(boot_rb + off); \ + for (off = 0; off < boot_rb_off && (len = strlen(boot_rb + off)); off += len + 1) \ + cb(boot_rb + off); \ + } while (0) + +/* + * bootdebug_filter is a comma separated list of strings, + * where each string can be a prefix of the message. + */ +static inline bool bootdebug_filter_match(const char *buf) +{ + char *p = bootdebug_filter, *s; + char *end; + + if (!*p) + return true; + + end = p + strlen(p); + while (p < end) { + p = skip_spaces(p); + s = memscan(p, ',', end - p); + if (!strncmp(p, buf, s - p)) + return true; + p = s + 1; + } + return false; +} + +static inline const char *skip_timestamp(const char *buf) +{ +#ifdef CONFIG_PRINTK_TIME + const char *p = memchr(buf, ']', strlen(buf)); + + if (p && p[1] == ' ') + return p + 2; +#endif + return buf; +} + #endif /* _ASM_S390_BOOT_DATA_H */ diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index c500d45fb465..acb4b13d98c5 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -2,69 +2,55 @@ #ifndef _ASM_S390_BUG_H #define _ASM_S390_BUG_H -#include <linux/compiler.h> - -#ifdef CONFIG_BUG - -#ifdef CONFIG_DEBUG_BUGVERBOSE - -#define __EMIT_BUG(x) do { \ - asm_inline volatile( \ - "0: mc 0,0\n" \ - ".section .rodata.str,\"aMS\",@progbits,1\n" \ - "1: .asciz \""__FILE__"\"\n" \ - ".previous\n" \ - ".section __bug_table,\"aw\"\n" \ - "2: .long 0b-.\n" \ - " .long 1b-.\n" \ - " .short %0,%1\n" \ - " .org 2b+%2\n" \ - ".previous\n" \ - : : "i" (__LINE__), \ - "i" (x), \ - "i" (sizeof(struct bug_entry))); \ -} while (0) - -#else /* CONFIG_DEBUG_BUGVERBOSE */ - -#define __EMIT_BUG(x) do { \ - asm_inline volatile( \ - "0: mc 0,0\n" \ - ".section __bug_table,\"aw\"\n" \ - "1: .long 0b-.\n" \ - " .short %0\n" \ - " .org 1b+%1\n" \ - ".previous\n" \ - : : "i" (x), \ - "i" (sizeof(struct bug_entry))); \ +#include <linux/stringify.h> + +#ifndef CONFIG_DEBUG_BUGVERBOSE +#define _BUGVERBOSE_LOCATION(file, line) +#else +#define __BUGVERBOSE_LOCATION(file, line) \ + .pushsection .rodata.str, "aMS", @progbits, 1; \ + 10002: .ascii file "\0"; \ + .popsection; \ + \ + .long 10002b - .; \ + .short line; +#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) +#endif + +#ifndef CONFIG_GENERIC_BUG +#define __BUG_ENTRY(cond_str, flags) +#else +#define __BUG_ENTRY(cond_str, flags) \ + .pushsection __bug_table, "aw"; \ + .align 4; \ + 10000: .long 10001f - .; \ + _BUGVERBOSE_LOCATION(WARN_CONDITION_STR(cond_str) __FILE__, __LINE__) \ + .short flags; \ + .popsection; \ + 10001: +#endif + +#define ASM_BUG_FLAGS(cond_str, flags) \ + __BUG_ENTRY(cond_str, flags) \ + mc 0,0 + +#define ASM_BUG() ASM_BUG_FLAGS("", 0) + +#define __BUG_FLAGS(cond_str, flags) \ + asm_inline volatile(__stringify(ASM_BUG_FLAGS(cond_str, flags))); + +#define __WARN_FLAGS(cond_str, flags) \ +do { \ + __BUG_FLAGS(cond_str, BUGFLAG_WARNING|(flags)); \ } while (0) -#endif /* CONFIG_DEBUG_BUGVERBOSE */ - -#define BUG() do { \ - __EMIT_BUG(0); \ - unreachable(); \ +#define BUG() \ +do { \ + __BUG_FLAGS("", 0); \ + unreachable(); \ } while (0) -#define __WARN_FLAGS(flags) do { \ - __EMIT_BUG(BUGFLAG_WARNING|(flags)); \ -} while (0) - -#define WARN_ON(x) ({ \ - int __ret_warn_on = !!(x); \ - if (__builtin_constant_p(__ret_warn_on)) { \ - if (__ret_warn_on) \ - __WARN(); \ - } else { \ - if (unlikely(__ret_warn_on)) \ - __WARN(); \ - } \ - unlikely(__ret_warn_on); \ -}) - #define HAVE_ARCH_BUG -#define HAVE_ARCH_WARN_ON -#endif /* CONFIG_BUG */ #include <asm-generic/bug.h> diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 436365ff6c19..e3afcece375e 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -210,7 +210,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); #define get_ccwdev_lock(x) (x)->ccwlock #define to_ccwdev(n) container_of(n, struct ccw_device, dev) -#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) +#define to_ccwdrv(n) container_of_const(n, struct ccw_driver, driver) extern struct ccw_device *ccw_device_create_console(struct ccw_driver *); extern void ccw_device_destroy_console(struct ccw_device *); diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index b89159591ca0..7e83dc2d3b06 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -13,6 +13,7 @@ #define _S390_CHECKSUM_H #include <linux/instrumented.h> +#include <linux/kmsan-checks.h> #include <linux/in6.h> static inline __wsum cksm(const void *buff, int len, __wsum sum) @@ -23,9 +24,10 @@ static inline __wsum cksm(const void *buff, int len, __wsum sum) }; instrument_read(buff, len); - asm volatile("\n" + kmsan_check_memory(buff, len); + asm volatile( "0: cksm %[sum],%[rp]\n" - " jo 0b\n" + " jo 0b" : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory"); return sum; } diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index b6b619f340a5..0a82ae2300b6 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -18,6 +18,8 @@ #include <asm/scsw.h> +#define CCW_MAX_BYTE_COUNT 65535 + /** * struct ccw1 - channel command word * @cmd_code: command code diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index aae0315374de..008357996262 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -11,191 +11,237 @@ #include <linux/mmdebug.h> #include <linux/types.h> #include <linux/bug.h> +#include <asm/asm.h> -void __xchg_called_with_bad_pointer(void); +void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new) +{ + asm volatile( + " cs %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} + +static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new) +{ + asm volatile( + " csg %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr) + : [new] "d" (new) + : "memory", "cc"); + return old; +} -static __always_inline unsigned long -__arch_xchg(unsigned long x, unsigned long address, int size) +static inline u8 __arch_cmpxchg1(u64 ptr, u8 old, u8 new) { - unsigned long old; - int shift; + union { + u8 b[4]; + u32 w; + } old32, new32; + u32 prev; + int i; + i = ptr & 3; + ptr &= ~0x3; + prev = READ_ONCE(*(u32 *)ptr); + do { + old32.w = prev; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + prev = __cs_asm(ptr, old32.w, new32.w); + } while (prev != old32.w); + return old; +} + +static inline u16 __arch_cmpxchg2(u64 ptr, u16 old, u16 new) +{ + union { + u16 b[2]; + u32 w; + } old32, new32; + u32 prev; + int i; + + i = (ptr & 3) >> 1; + ptr &= ~0x3; + prev = READ_ONCE(*(u32 *)ptr); + do { + old32.w = prev; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + prev = __cs_asm(ptr, old32.w, new32.w); + } while (prev != old32.w); + return old; +} + +static __always_inline u64 __arch_cmpxchg(u64 ptr, u64 old, u64 new, int size) +{ switch (size) { - case 1: - shift = (3 ^ (address & 3)) << 3; - address ^= address & 3; - asm volatile( - " l %0,%1\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)) - : "memory", "cc", "0"); - return old >> shift; - case 2: - shift = (2 ^ (address & 2)) << 3; - address ^= address & 2; - asm volatile( - " l %0,%1\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)) - : "memory", "cc", "0"); - return old >> shift; - case 4: - asm volatile( - " l %0,%1\n" - "0: cs %0,%2,%1\n" - " jl 0b\n" - : "=&d" (old), "+Q" (*(int *) address) - : "d" (x) - : "memory", "cc"); - return old; - case 8: - asm volatile( - " lg %0,%1\n" - "0: csg %0,%2,%1\n" - " jl 0b\n" - : "=&d" (old), "+QS" (*(long *) address) - : "d" (x) - : "memory", "cc"); - return old; + case 1: return __arch_cmpxchg1(ptr, old & 0xff, new & 0xff); + case 2: return __arch_cmpxchg2(ptr, old & 0xffff, new & 0xffff); + case 4: return __cs_asm(ptr, old & 0xffffffff, new & 0xffffffff); + case 8: return __csg_asm(ptr, old, new); + default: __cmpxchg_called_with_bad_pointer(); } - __xchg_called_with_bad_pointer(); - return x; + return old; } -#define arch_xchg(ptr, x) \ +#define arch_cmpxchg(ptr, o, n) \ +({ \ + (__typeof__(*(ptr)))__arch_cmpxchg((unsigned long)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg_local arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg + +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + +#define arch_try_cmpxchg(ptr, oldp, new) \ ({ \ - __typeof__(*(ptr)) __ret; \ + __typeof__(ptr) __oldp = (__typeof__(ptr))(oldp); \ + __typeof__(*(ptr)) __old = *__oldp; \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __prev; \ + int __cc; \ \ - __ret = (__typeof__(*(ptr))) \ - __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \ - sizeof(*(ptr))); \ - __ret; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + case 2: { \ + __prev = arch_cmpxchg((ptr), (__old), (__new)); \ + __cc = (__prev != __old); \ + if (unlikely(__cc)) \ + *__oldp = __prev; \ + break; \ + } \ + case 4: { \ + asm volatile( \ + " cs %[__old],%[__new],%[__ptr]" \ + : [__old] "+d" (*__oldp), \ + [__ptr] "+Q" (*(ptr)), \ + "=@cc" (__cc) \ + : [__new] "d" (__new) \ + : "memory"); \ + break; \ + } \ + case 8: { \ + asm volatile( \ + " csg %[__old],%[__new],%[__ptr]" \ + : [__old] "+d" (*__oldp), \ + [__ptr] "+QS" (*(ptr)), \ + "=@cc" (__cc) \ + : [__new] "d" (__new) \ + : "memory"); \ + break; \ + } \ + default: \ + __cmpxchg_called_with_bad_pointer(); \ + } \ + likely(__cc == 0); \ }) -void __cmpxchg_called_with_bad_pointer(void); +#else /* __HAVE_ASM_FLAG_OUTPUTS__ */ -static __always_inline unsigned long __cmpxchg(unsigned long address, - unsigned long old, - unsigned long new, int size) +#define arch_try_cmpxchg(ptr, oldp, new) \ +({ \ + __typeof__((ptr)) __oldp = (__typeof__(ptr))(oldp); \ + __typeof__(*(ptr)) __old = *__oldp; \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __prev; \ + \ + __prev = arch_cmpxchg((ptr), (__old), (__new)); \ + if (unlikely(__prev != __old)) \ + *__oldp = __prev; \ + likely(__prev == __old); \ +}) + +#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */ + +#define arch_try_cmpxchg64 arch_try_cmpxchg +#define arch_try_cmpxchg_local arch_try_cmpxchg +#define arch_try_cmpxchg64_local arch_try_cmpxchg + +void __xchg_called_with_bad_pointer(void); + +static inline u8 __arch_xchg1(u64 ptr, u8 x) +{ + int shift = (3 ^ (ptr & 3)) << 3; + u32 mask, old, new; + + ptr &= ~0x3; + mask = ~(0xff << shift); + old = READ_ONCE(*(u32 *)ptr); + do { + new = old & mask; + new |= x << shift; + } while (!arch_try_cmpxchg((u32 *)ptr, &old, new)); + return old >> shift; +} + +static inline u16 __arch_xchg2(u64 ptr, u16 x) +{ + int shift = (2 ^ (ptr & 2)) << 3; + u32 mask, old, new; + + ptr &= ~0x3; + mask = ~(0xffff << shift); + old = READ_ONCE(*(u32 *)ptr); + do { + new = old & mask; + new |= x << shift; + } while (!arch_try_cmpxchg((u32 *)ptr, &old, new)); + return old >> shift; +} + +static __always_inline u64 __arch_xchg(u64 ptr, u64 x, int size) { switch (size) { - case 1: { - unsigned int prev, shift, mask; - - shift = (3 ^ (address & 3)) << 3; - address ^= address & 3; - old = (old & 0xff) << shift; - new = (new & 0xff) << shift; - mask = ~(0xff << shift); - asm volatile( - " l %[prev],%[address]\n" - " nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "0: lr %[tmp],%[prev]\n" - " cs %[prev],%[new],%[address]\n" - " jnl 1f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jz 0b\n" - "1:" - : [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (old), - [new] "+&d" (new), - [mask] "+&d" (mask) - :: "memory", "cc"); - return prev >> shift; - } - case 2: { - unsigned int prev, shift, mask; - - shift = (2 ^ (address & 2)) << 3; - address ^= address & 2; - old = (old & 0xffff) << shift; - new = (new & 0xffff) << shift; - mask = ~(0xffff << shift); - asm volatile( - " l %[prev],%[address]\n" - " nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "0: lr %[tmp],%[prev]\n" - " cs %[prev],%[new],%[address]\n" - " jnl 1f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jz 0b\n" - "1:" - : [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (old), - [new] "+&d" (new), - [mask] "+&d" (mask) - :: "memory", "cc"); - return prev >> shift; - } + case 1: + return __arch_xchg1(ptr, x & 0xff); + case 2: + return __arch_xchg2(ptr, x & 0xffff); case 4: { - unsigned int prev = old; - - asm volatile( - " cs %[prev],%[new],%[address]\n" - : [prev] "+&d" (prev), - [address] "+Q" (*(int *)address) - : [new] "d" (new) - : "memory", "cc"); - return prev; + u32 old = READ_ONCE(*(u32 *)ptr); + + do { + } while (!arch_try_cmpxchg((u32 *)ptr, &old, x & 0xffffffff)); + return old; } case 8: { - unsigned long prev = old; - - asm volatile( - " csg %[prev],%[new],%[address]\n" - : [prev] "+&d" (prev), - [address] "+QS" (*(long *)address) - : [new] "d" (new) - : "memory", "cc"); - return prev; + u64 old = READ_ONCE(*(u64 *)ptr); + + do { + } while (!arch_try_cmpxchg((u64 *)ptr, &old, x)); + return old; } } - __cmpxchg_called_with_bad_pointer(); - return old; + __xchg_called_with_bad_pointer(); + return x; } -#define arch_cmpxchg(ptr, o, n) \ +#define arch_xchg(ptr, x) \ ({ \ - __typeof__(*(ptr)) __ret; \ - \ - __ret = (__typeof__(*(ptr))) \ - __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - __ret; \ + (__typeof__(*(ptr)))__arch_xchg((unsigned long)(ptr), \ + (unsigned long)(x), \ + sizeof(*(ptr))); \ }) -#define arch_cmpxchg64 arch_cmpxchg -#define arch_cmpxchg_local arch_cmpxchg -#define arch_cmpxchg64_local arch_cmpxchg - #define system_has_cmpxchg128() 1 static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new) { asm volatile( - " cdsg %[old],%[new],%[ptr]\n" + " cdsg %[old],%[new],%[ptr]" : [old] "+d" (old), [ptr] "+QS" (*ptr) : [new] "d" (new) : "memory", "cc"); @@ -203,5 +249,25 @@ static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 n } #define arch_cmpxchg128 arch_cmpxchg128 +#define arch_cmpxchg128_local arch_cmpxchg128 + +#ifdef __HAVE_ASM_FLAG_OUTPUTS__ + +static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new) +{ + int cc; + + asm volatile( + " cdsg %[old],%[new],%[ptr]" + : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc) + : [new] "d" (new) + : "memory"); + return likely(cc == 0); +} + +#define arch_try_cmpxchg128 arch_try_cmpxchg128 +#define arch_try_cmpxchg128_local arch_try_cmpxchg128 + +#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */ #endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h deleted file mode 100644 index 3cb9d813f022..000000000000 --- a/arch/s390/include/asm/compat.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_S390X_COMPAT_H -#define _ASM_S390X_COMPAT_H -/* - * Architecture specific compatibility types - */ -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/sched/task_stack.h> -#include <linux/thread_info.h> -#include <asm/ptrace.h> - -#define compat_mode_t compat_mode_t -typedef u16 compat_mode_t; - -#define __compat_uid_t __compat_uid_t -typedef u16 __compat_uid_t; -typedef u16 __compat_gid_t; - -#define compat_dev_t compat_dev_t -typedef u16 compat_dev_t; - -#define compat_ipc_pid_t compat_ipc_pid_t -typedef u16 compat_ipc_pid_t; - -#define compat_statfs compat_statfs - -#include <asm-generic/compat.h> - -#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \ - typeof(0?(__force t)0:0ULL), u64)) - -#define __SC_DELOUSE(t,v) ({ \ - BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \ - (__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \ -}) - -#define PSW32_MASK_USER 0x0000FF00UL - -#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ - PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ - PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ - PSW32_ASC_PRIMARY) - -#define COMPAT_UTS_MACHINE "s390\0\0\0\0" - -typedef u16 compat_nlink_t; - -typedef struct { - u32 mask; - u32 addr; -} __aligned(8) psw_compat_t; - -typedef struct { - psw_compat_t psw; - u32 gprs[NUM_GPRS]; - u32 acrs[NUM_ACRS]; - u32 orig_gpr2; -} s390_compat_regs; - -typedef struct { - u32 gprs_high[NUM_GPRS]; -} s390_compat_regs_high; - -struct compat_stat { - compat_dev_t st_dev; - u16 __pad1; - compat_ino_t st_ino; - compat_mode_t st_mode; - compat_nlink_t st_nlink; - __compat_uid_t st_uid; - __compat_gid_t st_gid; - compat_dev_t st_rdev; - u16 __pad2; - u32 st_size; - u32 st_blksize; - u32 st_blocks; - u32 st_atime; - u32 st_atime_nsec; - u32 st_mtime; - u32 st_mtime_nsec; - u32 st_ctime; - u32 st_ctime_nsec; - u32 __unused4; - u32 __unused5; -}; - -struct compat_statfs { - u32 f_type; - u32 f_bsize; - u32 f_blocks; - u32 f_bfree; - u32 f_bavail; - u32 f_files; - u32 f_ffree; - compat_fsid_t f_fsid; - u32 f_namelen; - u32 f_frsize; - u32 f_flags; - u32 f_spare[4]; -}; - -struct compat_statfs64 { - u32 f_type; - u32 f_bsize; - u64 f_blocks; - u64 f_bfree; - u64 f_bavail; - u64 f_files; - u64 f_ffree; - compat_fsid_t f_fsid; - u32 f_namelen; - u32 f_frsize; - u32 f_flags; - u32 f_spare[5]; -}; - -/* - * A pointer passed in from user mode. This should not - * be used for syscall parameters, just declare them - * as pointers because the syscall entry code will have - * appropriately converted them already. - */ - -static inline void __user *compat_ptr(compat_uptr_t uptr) -{ - return (void __user *)(unsigned long)(uptr & 0x7fffffffUL); -} -#define compat_ptr(uptr) compat_ptr(uptr) - -#ifdef CONFIG_COMPAT - -static inline int is_compat_task(void) -{ - return test_thread_flag(TIF_31BIT); -} - -#endif - -#endif /* _ASM_S390X_COMPAT_H */ diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index c786538e397c..a83683169d98 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -12,6 +12,7 @@ #define _ASM_S390_CPACF_H #include <asm/facility.h> +#include <linux/kmsan-checks.h> /* * Instruction opcodes for the CPACF instructions @@ -53,6 +54,10 @@ #define CPACF_KM_XTS_256 0x34 #define CPACF_KM_PXTS_128 0x3a #define CPACF_KM_PXTS_256 0x3c +#define CPACF_KM_XTS_128_FULL 0x52 +#define CPACF_KM_XTS_256_FULL 0x54 +#define CPACF_KM_PXTS_128_FULL 0x5a +#define CPACF_KM_PXTS_256_FULL 0x5c /* * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) @@ -120,23 +125,35 @@ #define CPACF_KMAC_DEA 0x01 #define CPACF_KMAC_TDEA_128 0x02 #define CPACF_KMAC_TDEA_192 0x03 +#define CPACF_KMAC_HMAC_SHA_224 0x70 +#define CPACF_KMAC_HMAC_SHA_256 0x71 +#define CPACF_KMAC_HMAC_SHA_384 0x72 +#define CPACF_KMAC_HMAC_SHA_512 0x73 +#define CPACF_KMAC_PHMAC_SHA_224 0x78 +#define CPACF_KMAC_PHMAC_SHA_256 0x79 +#define CPACF_KMAC_PHMAC_SHA_384 0x7a +#define CPACF_KMAC_PHMAC_SHA_512 0x7b /* * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT) * instruction */ -#define CPACF_PCKMO_QUERY 0x00 -#define CPACF_PCKMO_ENC_DES_KEY 0x01 -#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 -#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 -#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 -#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 -#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 -#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 -#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 -#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 -#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 -#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 +#define CPACF_PCKMO_QUERY 0x00 +#define CPACF_PCKMO_ENC_DES_KEY 0x01 +#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02 +#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03 +#define CPACF_PCKMO_ENC_AES_128_KEY 0x12 +#define CPACF_PCKMO_ENC_AES_192_KEY 0x13 +#define CPACF_PCKMO_ENC_AES_256_KEY 0x14 +#define CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY 0x15 +#define CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY 0x16 +#define CPACF_PCKMO_ENC_ECC_P256_KEY 0x20 +#define CPACF_PCKMO_ENC_ECC_P384_KEY 0x21 +#define CPACF_PCKMO_ENC_ECC_P521_KEY 0x22 +#define CPACF_PCKMO_ENC_ECC_ED25519_KEY 0x28 +#define CPACF_PCKMO_ENC_ECC_ED448_KEY 0x29 +#define CPACF_PCKMO_ENC_HMAC_512_KEY 0x76 +#define CPACF_PCKMO_ENC_HMAC_1024_KEY 0x7a /* * Function codes for the PRNO (PERFORM RANDOM NUMBER OPERATION) @@ -164,7 +181,40 @@ #define CPACF_KMA_LAAD 0x200 /* Last-AAD */ #define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */ +/* + * Flags for the KIMD/KLMD (COMPUTE INTERMEDIATE/LAST MESSAGE DIGEST) + * instructions + */ +#define CPACF_KIMD_NIP 0x8000 +#define CPACF_KLMD_DUFOP 0x4000 +#define CPACF_KLMD_NIP 0x8000 + +/* + * Function codes for KDSA (COMPUTE DIGITAL SIGNATURE AUTHENTICATION) + * instruction + */ +#define CPACF_KDSA_QUERY 0x00 +#define CPACF_KDSA_ECDSA_VERIFY_P256 0x01 +#define CPACF_KDSA_ECDSA_VERIFY_P384 0x02 +#define CPACF_KDSA_ECDSA_VERIFY_P521 0x03 +#define CPACF_KDSA_ECDSA_SIGN_P256 0x09 +#define CPACF_KDSA_ECDSA_SIGN_P384 0x0a +#define CPACF_KDSA_ECDSA_SIGN_P521 0x0b +#define CPACF_KDSA_ENC_ECDSA_SIGN_P256 0x11 +#define CPACF_KDSA_ENC_ECDSA_SIGN_P384 0x12 +#define CPACF_KDSA_ENC_ECDSA_SIGN_P521 0x13 +#define CPACF_KDSA_EDDSA_VERIFY_ED25519 0x20 +#define CPACF_KDSA_EDDSA_VERIFY_ED448 0x24 +#define CPACF_KDSA_EDDSA_SIGN_ED25519 0x28 +#define CPACF_KDSA_EDDSA_SIGN_ED448 0x2c +#define CPACF_KDSA_ENC_EDDSA_SIGN_ED25519 0x30 +#define CPACF_KDSA_ENC_EDDSA_SIGN_ED448 0x34 + +#define CPACF_FC_QUERY 0x00 +#define CPACF_FC_QUERY_AUTH_INFO 0x7F + typedef struct { unsigned char bytes[16]; } cpacf_mask_t; +typedef struct { unsigned char bytes[256]; } cpacf_qai_t; /* * Prototype for a not existing function to produce a link @@ -174,80 +224,85 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t; void __cpacf_bad_opcode(void); static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2, - cpacf_mask_t *mask) + u8 *pb, u8 fc) { asm volatile( - " la %%r1,%[mask]\n" - " xgr %%r0,%%r0\n" - " .insn rre,%[opc] << 16,%[r1],%[r2]\n" - : [mask] "=R" (*mask) - : [opc] "i" (opc), + " la %%r1,%[pb]\n" + " lghi %%r0,%[fc]\n" + " .insn rre,%[opc] << 16,%[r1],%[r2]" + : [pb] "=R" (*pb) + : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1), [r2] "i" (r2) - : "cc", "r0", "r1"); + : "cc", "memory", "r0", "r1"); } -static __always_inline void __cpacf_query_rrf(u32 opc, - u8 r1, u8 r2, u8 r3, u8 m4, - cpacf_mask_t *mask) +static __always_inline void __cpacf_query_rrf(u32 opc, u8 r1, u8 r2, u8 r3, + u8 m4, u8 *pb, u8 fc) { asm volatile( - " la %%r1,%[mask]\n" - " xgr %%r0,%%r0\n" - " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n" - : [mask] "=R" (*mask) - : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2), - [r3] "i" (r3), [m4] "i" (m4) - : "cc", "r0", "r1"); + " la %%r1,%[pb]\n" + " lghi %%r0,%[fc]\n" + " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]" + : [pb] "=R" (*pb) + : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1), + [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4) + : "cc", "memory", "r0", "r1"); } -static __always_inline void __cpacf_query(unsigned int opcode, - cpacf_mask_t *mask) +static __always_inline void __cpacf_query_insn(unsigned int opcode, void *pb, + u8 fc) { switch (opcode) { case CPACF_KDSA: - __cpacf_query_rre(CPACF_KDSA, 0, 2, mask); + __cpacf_query_rre(CPACF_KDSA, 0, 2, pb, fc); break; case CPACF_KIMD: - __cpacf_query_rre(CPACF_KIMD, 0, 2, mask); + __cpacf_query_rre(CPACF_KIMD, 0, 2, pb, fc); break; case CPACF_KLMD: - __cpacf_query_rre(CPACF_KLMD, 0, 2, mask); + __cpacf_query_rre(CPACF_KLMD, 0, 2, pb, fc); break; case CPACF_KM: - __cpacf_query_rre(CPACF_KM, 2, 4, mask); + __cpacf_query_rre(CPACF_KM, 2, 4, pb, fc); break; case CPACF_KMA: - __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask); + __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, pb, fc); break; case CPACF_KMAC: - __cpacf_query_rre(CPACF_KMAC, 0, 2, mask); + __cpacf_query_rre(CPACF_KMAC, 0, 2, pb, fc); break; case CPACF_KMC: - __cpacf_query_rre(CPACF_KMC, 2, 4, mask); + __cpacf_query_rre(CPACF_KMC, 2, 4, pb, fc); break; case CPACF_KMCTR: - __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask); + __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, pb, fc); break; case CPACF_KMF: - __cpacf_query_rre(CPACF_KMF, 2, 4, mask); + __cpacf_query_rre(CPACF_KMF, 2, 4, pb, fc); break; case CPACF_KMO: - __cpacf_query_rre(CPACF_KMO, 2, 4, mask); + __cpacf_query_rre(CPACF_KMO, 2, 4, pb, fc); break; case CPACF_PCC: - __cpacf_query_rre(CPACF_PCC, 0, 0, mask); + __cpacf_query_rre(CPACF_PCC, 0, 0, pb, fc); break; case CPACF_PCKMO: - __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask); + __cpacf_query_rre(CPACF_PCKMO, 0, 0, pb, fc); break; case CPACF_PRNO: - __cpacf_query_rre(CPACF_PRNO, 2, 4, mask); + __cpacf_query_rre(CPACF_PRNO, 2, 4, pb, fc); break; default: __cpacf_bad_opcode(); } } +static __always_inline void __cpacf_query(unsigned int opcode, + cpacf_mask_t *mask) +{ + __cpacf_query_insn(opcode, mask, CPACF_FC_QUERY); +} + static __always_inline int __cpacf_check_opcode(unsigned int opcode) { switch (opcode) { @@ -268,6 +323,8 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode) return test_facility(57); /* check for MSA5 */ case CPACF_KMA: return test_facility(146); /* check for MSA8 */ + case CPACF_KDSA: + return test_facility(155); /* check for MSA9 */ default: __cpacf_bad_opcode(); return 0; @@ -275,14 +332,15 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode) } /** - * cpacf_query() - check if a specific CPACF function is available + * cpacf_query() - Query the function code mask for this CPACF opcode * @opcode: the opcode of the crypto instruction - * @func: the function code to test for + * @mask: ptr to struct cpacf_mask_t * * Executes the query function for the given crypto instruction @opcode * and checks if @func is available * - * Returns 1 if @func is available for @opcode, 0 otherwise + * On success 1 is returned and the mask is filled with the function + * code mask for this CPACF opcode, otherwise 0 is returned. */ static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask) { @@ -299,7 +357,8 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func) return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0; } -static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func) +static __always_inline int cpacf_query_func(unsigned int opcode, + unsigned int func) { cpacf_mask_t mask; @@ -308,6 +367,32 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu return 0; } +static __always_inline void __cpacf_qai(unsigned int opcode, cpacf_qai_t *qai) +{ + __cpacf_query_insn(opcode, qai, CPACF_FC_QUERY_AUTH_INFO); +} + +/** + * cpacf_qai() - Get the query authentication information for a CPACF opcode + * @opcode: the opcode of the crypto instruction + * @mask: ptr to struct cpacf_qai_t + * + * Executes the query authentication information function for the given crypto + * instruction @opcode and checks if @func is available + * + * On success 1 is returned and the mask is filled with the query authentication + * information for this CPACF opcode, otherwise 0 is returned. + */ +static __always_inline int cpacf_qai(unsigned int opcode, cpacf_qai_t *qai) +{ + if (cpacf_query_func(opcode, CPACF_FC_QUERY_AUTH_INFO)) { + __cpacf_qai(opcode, qai); + return 1; + } + memset(qai, 0, sizeof(*qai)); + return 0; +} + /** * cpacf_km() - executes the KM (CIPHER MESSAGE) instruction * @func: the function code passed to KM; see CPACF_KM_xxx defines @@ -331,7 +416,7 @@ static inline int cpacf_km(unsigned long func, void *param, " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,%[dst],%[src]\n" - " brc 1,0b\n" /* handle partial completion */ + " brc 1,0b" /* handle partial completion */ : [src] "+&d" (s.pair), [dst] "+&d" (d.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_KM) @@ -363,7 +448,7 @@ static inline int cpacf_kmc(unsigned long func, void *param, " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,%[dst],%[src]\n" - " brc 1,0b\n" /* handle partial completion */ + " brc 1,0b" /* handle partial completion */ : [src] "+&d" (s.pair), [dst] "+&d" (d.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_KMC) @@ -390,8 +475,8 @@ static inline void cpacf_kimd(unsigned long func, void *param, asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" - "0: .insn rre,%[opc] << 16,0,%[src]\n" - " brc 1,0b\n" /* handle partial completion */ + "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" + " brc 1,0b" /* handle partial completion */ : [src] "+&d" (s.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)(param)), [opc] "i" (CPACF_KIMD) @@ -415,8 +500,8 @@ static inline void cpacf_klmd(unsigned long func, void *param, asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" - "0: .insn rre,%[opc] << 16,0,%[src]\n" - " brc 1,0b\n" /* handle partial completion */ + "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" + " brc 1,0b" /* handle partial completion */ : [src] "+&d" (s.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_KLMD) @@ -424,29 +509,30 @@ static inline void cpacf_klmd(unsigned long func, void *param, } /** - * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) - * instruction - * @func: the function code passed to KM; see CPACF_KMAC_xxx defines + * _cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) + * instruction and updates flags in gr0 + * @gr0: pointer to gr0 (fc and flags) passed to KMAC; see CPACF_KMAC_xxx defines * @param: address of parameter block; see POP for details on each func * @src: address of source memory area * @src_len: length of src operand in bytes * * Returns 0 for the query func, number of processed bytes for digest funcs */ -static inline int cpacf_kmac(unsigned long func, void *param, - const u8 *src, long src_len) +static inline int _cpacf_kmac(unsigned long *gr0, void *param, + const u8 *src, long src_len) { union register_pair s; s.even = (unsigned long)src; s.odd = (unsigned long)src_len; asm volatile( - " lgr 0,%[fc]\n" + " lgr 0,%[r0]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,0,%[src]\n" " brc 1,0b\n" /* handle partial completion */ - : [src] "+&d" (s.pair) - : [fc] "d" (func), [pba] "d" ((unsigned long)param), + " lgr %[r0],0" + : [r0] "+d" (*gr0), [src] "+&d" (s.pair) + : [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_KMAC) : "cc", "memory", "0", "1"); @@ -454,6 +540,22 @@ static inline int cpacf_kmac(unsigned long func, void *param, } /** + * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) + * instruction + * @func: function code passed to KMAC; see CPACF_KMAC_xxx defines + * @param: address of parameter block; see POP for details on each func + * @src: address of source memory area + * @src_len: length of src operand in bytes + * + * Returns 0 for the query func, number of processed bytes for digest funcs + */ +static inline int cpacf_kmac(unsigned long func, void *param, + const u8 *src, long src_len) +{ + return _cpacf_kmac(&func, param, src, src_len); +} + +/** * cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction * @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines * @param: address of parameter block; see POP for details on each func @@ -478,7 +580,7 @@ static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest, " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n" - " brc 1,0b\n" /* handle partial completion */ + " brc 1,0b" /* handle partial completion */ : [src] "+&d" (s.pair), [dst] "+&d" (d.pair), [ctr] "+&d" (c.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)param), @@ -512,7 +614,7 @@ static inline void cpacf_prno(unsigned long func, void *param, " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,%[dst],%[seed]\n" - " brc 1,0b\n" /* handle partial completion */ + " brc 1,0b" /* handle partial completion */ : [dst] "+&d" (d.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)param), [seed] "d" (s.pair), [opc] "i" (CPACF_PRNO) @@ -538,10 +640,12 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len, asm volatile ( " lghi 0,%[fc]\n" "0: .insn rre,%[opc] << 16,%[ucbuf],%[cbuf]\n" - " brc 1,0b\n" /* handle partial completion */ + " brc 1,0b" /* handle partial completion */ : [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair) : [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO) : "cc", "memory", "0"); + kmsan_unpoison_memory(ucbuf, ucbuf_len); + kmsan_unpoison_memory(cbuf, cbuf_len); } /** @@ -549,18 +653,30 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len, * instruction * @func: the function code passed to PCC; see CPACF_KM_xxx defines * @param: address of parameter block; see POP for details on each func + * + * Returns the condition code, this is + * 0 - cc code 0 (normal completion) + * 1 - cc code 1 (protected key wkvp mismatch or src operand out of range) + * 2 - cc code 2 (something invalid, scalar multiply infinity, ...) + * Condition code 3 (partial completion) is handled within the asm code + * and never returned. */ -static inline void cpacf_pcc(unsigned long func, void *param) +static inline int cpacf_pcc(unsigned long func, void *param) { + int cc; + asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */ " brc 1,0b\n" /* handle partial completion */ - : + CC_IPM(cc) + : CC_OUT(cc, cc) : [fc] "d" (func), [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_PCC) - : "cc", "memory", "0", "1"); + : CC_CLOBBER_LIST("memory", "0", "1")); + + return CC_TRANSFORM(cc); } /** @@ -576,7 +692,7 @@ static inline void cpacf_pckmo(long func, void *param) asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" - " .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */ + " .insn rre,%[opc] << 16,0,0" /* PCKMO opcode */ : : [fc] "d" (func), [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_PCKMO) @@ -609,7 +725,7 @@ static inline void cpacf_kma(unsigned long func, void *param, u8 *dest, " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n" - " brc 1,0b\n" /* handle partial completion */ + " brc 1,0b" /* handle partial completion */ : [dst] "+&d" (d.pair), [src] "+&d" (s.pair), [aad] "+&d" (a.pair) : [fc] "d" (func), [pba] "d" ((unsigned long)param), diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h index 26c710cd3485..5672e3fab52b 100644 --- a/arch/s390/include/asm/cpu.h +++ b/arch/s390/include/asm/cpu.h @@ -9,7 +9,7 @@ #ifndef _ASM_S390_CPU_H #define _ASM_S390_CPU_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/jump_label.h> @@ -24,5 +24,5 @@ struct cpuid DECLARE_STATIC_KEY_FALSE(cpu_has_bear); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_CPU_H */ diff --git a/arch/s390/include/asm/cpu_mf-insn.h b/arch/s390/include/asm/cpu_mf-insn.h index a68b362e0964..941663939cc7 100644 --- a/arch/s390/include/asm/cpu_mf-insn.h +++ b/arch/s390/include/asm/cpu_mf-insn.h @@ -8,7 +8,7 @@ #ifndef _ASM_S390_CPU_MF_INSN_H #define _ASM_S390_CPU_MF_INSN_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ /* Macro to generate the STCCTM instruction with a customized * M3 field designating the counter set. @@ -17,6 +17,6 @@ .insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2 .endm -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index a0de5b9b02ea..1798fbd59068 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -10,8 +10,10 @@ #define _ASM_S390_CPU_MF_H #include <linux/errno.h> +#include <linux/kmsan-checks.h> #include <asm/asm-extable.h> #include <asm/facility.h> +#include <asm/asm.h> asm(".include \"asm/cpu_mf-insn.h\"\n"); @@ -169,7 +171,7 @@ static inline int qctri(struct cpumf_ctr_info *info) { int rc = -EINVAL; - asm volatile ( + asm_inline volatile ( "0: qctri %1\n" "1: lhi %0,0\n" "2:\n" @@ -183,12 +185,13 @@ static inline int lcctl(u64 ctl) { int cc; - asm volatile ( - " lcctl %1\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) : "Q" (ctl) : "cc"); - return cc; + asm_inline volatile ( + " lcctl %[ctl]\n" + CC_IPM(cc) + : CC_OUT(cc, cc) + : [ctl] "Q" (ctl) + : CC_CLOBBER); + return CC_TRANSFORM(cc); } /* Extract CPU counter */ @@ -197,13 +200,14 @@ static inline int __ecctr(u64 ctr, u64 *content) u64 _content; int cc; - asm volatile ( - " ecctr %0,%2\n" - " ipm %1\n" - " srl %1,28\n" - : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc"); + asm_inline volatile ( + " ecctr %[_content],%[ctr]\n" + CC_IPM(cc) + : CC_OUT(cc, cc), [_content] "=d" (_content) + : [ctr] "d" (ctr) + : CC_CLOBBER); *content = _content; - return cc; + return CC_TRANSFORM(cc); } /* Extract CPU counter */ @@ -233,13 +237,17 @@ static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest) int cc; asm volatile ( - " STCCTM %2,%3,%1\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "Q" (*dest), "d" (range), "i" (set) - : "cc", "memory"); - return cc; + " STCCTM %[range],%[set],%[dest]\n" + CC_IPM(cc) + : CC_OUT(cc, cc) + : [dest] "Q" (*dest), [range] "d" (range), [set] "i" (set) + : CC_CLOBBER_LIST("memory")); + /* + * If cc == 2, less than RANGE counters are stored, but it's not easy + * to tell how many. Always unpoison the whole range for simplicity. + */ + kmsan_unpoison_memory(dest, range * sizeof(u64)); + return CC_TRANSFORM(cc); } /* Query sampling information */ @@ -259,19 +267,20 @@ static inline int qsi(struct hws_qsi_info_block *info) /* Load sampling controls */ static inline int lsctl(struct hws_lsctl_request_block *req) { - int cc; + int cc, exception; - cc = 1; + exception = 1; asm volatile( - "0: lsctl 0(%1)\n" - "1: ipm %0\n" - " srl %0,28\n" + "0: lsctl %[req]\n" + "1: lhi %[exc],0\n" "2:\n" + CC_IPM(cc) EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) - : "+d" (cc), "+a" (req) - : "m" (*req) - : "cc", "memory"); - - return cc ? -EINVAL : 0; + : CC_OUT(cc, cc), [exc] "+d" (exception) + : [req] "Q" (*req) + : CC_CLOBBER); + if (exception || CC_TRANSFORM(cc)) + return -EINVAL; + return 0; } #endif /* _ASM_S390_CPU_MF_H */ diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h index 931204613753..d6fb999c8c6d 100644 --- a/arch/s390/include/asm/cpufeature.h +++ b/arch/s390/include/asm/cpufeature.h @@ -9,10 +9,13 @@ #ifndef __ASM_S390_CPUFEATURE_H #define __ASM_S390_CPUFEATURE_H +#include <asm/facility.h> + enum { S390_CPU_FEATURE_MSA, S390_CPU_FEATURE_VXRS, S390_CPU_FEATURE_UV, + S390_CPU_FEATURE_D288, MAX_CPU_FEATURES }; @@ -20,4 +23,15 @@ enum { int cpu_have_feature(unsigned int nr); +#define cpu_has_bear() test_facility(193) +#define cpu_has_edat1() test_facility(8) +#define cpu_has_edat2() test_facility(78) +#define cpu_has_gs() test_facility(133) +#define cpu_has_nx() test_facility(130) +#define cpu_has_rdp() test_facility(194) +#define cpu_has_seq_insn() test_facility(85) +#define cpu_has_tlb_lc() test_facility(51) +#define cpu_has_topology() test_facility(11) +#define cpu_has_vx() test_facility(129) + #endif /* __ASM_S390_CPUFEATURE_H */ diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 638137d46c85..a03f64033760 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -25,7 +25,7 @@ struct css_general_char { u64 : 2; u64 : 3; - u64 aif_osa : 1; /* bit 67 */ + u64 aif_qdio : 1;/* bit 67 */ u64 : 12; u64 eadm_rf : 1; /* bit 80 */ u64 : 1; diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h index 72a9556d04f3..1765a0320933 100644 --- a/arch/s390/include/asm/ctlreg.h +++ b/arch/s390/include/asm/ctlreg.h @@ -80,7 +80,7 @@ #define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(CR14_EXTERNAL_DAMAGE_SUBMASK_BIT) #define CR14_WARNING_SUBMASK BIT(CR14_WARNING_SUBMASK_BIT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/bug.h> @@ -100,7 +100,7 @@ struct ctlreg { BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \ typecheck(struct ctlreg, array[0]); \ asm volatile( \ - " lctlg %[_low],%[_high],%[_arr]\n" \ + " lctlg %[_low],%[_high],%[_arr]" \ : \ : [_arr] "Q" (*(struct addrtype *)(&array)), \ [_low] "i" (low), [_high] "i" (high) \ @@ -119,7 +119,7 @@ struct ctlreg { BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \ typecheck(struct ctlreg, array[0]); \ asm volatile( \ - " stctg %[_low],%[_high],%[_arr]\n" \ + " stctg %[_low],%[_high],%[_arr]" \ : [_arr] "=Q" (*(struct addrtype *)(&array)) \ : [_low] "i" (low), [_high] "i" (high)); \ } while (0) @@ -127,7 +127,7 @@ struct ctlreg { static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg) { asm volatile( - " lctlg %[cr],%[cr],%[reg]\n" + " lctlg %[cr],%[cr],%[reg]" : : [reg] "Q" (*reg), [cr] "i" (cr) : "memory"); @@ -136,7 +136,7 @@ static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg) static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg) { asm volatile( - " stctg %[cr],%[cr],%[reg]\n" + " stctg %[cr],%[cr],%[reg]" : [reg] "=Q" (*reg) : [cr] "i" (cr)); } @@ -202,8 +202,9 @@ union ctlreg0 { unsigned long : 3; unsigned long ccc : 1; /* Cryptography counter control */ unsigned long pec : 1; /* PAI extension control */ - unsigned long : 17; - unsigned long : 3; + unsigned long : 15; + unsigned long wti : 1; /* Warning-track */ + unsigned long : 4; unsigned long lap : 1; /* Low-address-protection control */ unsigned long : 4; unsigned long edat : 1; /* Enhanced-DAT-enablement control */ @@ -251,5 +252,5 @@ union ctlreg15 { }; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_S390_CTLREG_H */ diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h index 68f84315277c..f9529f7cf62c 100644 --- a/arch/s390/include/asm/current.h +++ b/arch/s390/include/asm/current.h @@ -11,9 +11,25 @@ #define _S390_CURRENT_H #include <asm/lowcore.h> +#include <asm/machine.h> struct task_struct; -#define current ((struct task_struct *const)S390_lowcore.current_task) +static __always_inline struct task_struct *get_current(void) +{ + unsigned long ptr, lc_current; + + lc_current = offsetof(struct lowcore, current_task); + asm_inline( + ALTERNATIVE(" lg %[ptr],%[offzero](%%r0)\n", + " lg %[ptr],%[offalt](%%r0)\n", + ALT_FEATURE(MFEATURE_LOWCORE)) + : [ptr] "=d" (ptr) + : [offzero] "i" (lc_current), + [offalt] "i" (lc_current + LOWCORE_ALT_ADDRESS)); + return (struct task_struct *)ptr; +} + +#define current get_current() #endif /* !(_S390_CURRENT_H) */ diff --git a/arch/s390/include/asm/dat-bits.h b/arch/s390/include/asm/dat-bits.h new file mode 100644 index 000000000000..8d65eec2f124 --- /dev/null +++ b/arch/s390/include/asm/dat-bits.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DAT table and related structures + * + * Copyright IBM Corp. 2024 + * + */ + +#ifndef _S390_DAT_BITS_H +#define _S390_DAT_BITS_H + +union asce { + unsigned long val; + struct { + unsigned long rsto: 52;/* Region- or Segment-Table Origin */ + unsigned long : 2; + unsigned long g : 1; /* Subspace Group control */ + unsigned long p : 1; /* Private Space control */ + unsigned long s : 1; /* Storage-Alteration-Event control */ + unsigned long x : 1; /* Space-Switch-Event control */ + unsigned long r : 1; /* Real-Space control */ + unsigned long : 1; + unsigned long dt : 2; /* Designation-Type control */ + unsigned long tl : 2; /* Region- or Segment-Table Length */ + }; +}; + +enum { + ASCE_TYPE_SEGMENT = 0, + ASCE_TYPE_REGION3 = 1, + ASCE_TYPE_REGION2 = 2, + ASCE_TYPE_REGION1 = 3 +}; + +union region1_table_entry { + unsigned long val; + struct { + unsigned long rto: 52;/* Region-Table Origin */ + unsigned long : 2; + unsigned long p : 1; /* DAT-Protection Bit */ + unsigned long : 1; + unsigned long tf : 2; /* Region-Second-Table Offset */ + unsigned long i : 1; /* Region-Invalid Bit */ + unsigned long : 1; + unsigned long tt : 2; /* Table-Type Bits */ + unsigned long tl : 2; /* Region-Second-Table Length */ + }; +}; + +union region2_table_entry { + unsigned long val; + struct { + unsigned long rto: 52;/* Region-Table Origin */ + unsigned long : 2; + unsigned long p : 1; /* DAT-Protection Bit */ + unsigned long : 1; + unsigned long tf : 2; /* Region-Third-Table Offset */ + unsigned long i : 1; /* Region-Invalid Bit */ + unsigned long : 1; + unsigned long tt : 2; /* Table-Type Bits */ + unsigned long tl : 2; /* Region-Third-Table Length */ + }; +}; + +struct region3_table_entry_fc0 { + unsigned long sto: 52;/* Segment-Table Origin */ + unsigned long : 1; + unsigned long fc : 1; /* Format-Control */ + unsigned long p : 1; /* DAT-Protection Bit */ + unsigned long : 1; + unsigned long tf : 2; /* Segment-Table Offset */ + unsigned long i : 1; /* Region-Invalid Bit */ + unsigned long cr : 1; /* Common-Region Bit */ + unsigned long tt : 2; /* Table-Type Bits */ + unsigned long tl : 2; /* Segment-Table Length */ +}; + +struct region3_table_entry_fc1 { + unsigned long rfaa: 33;/* Region-Frame Absolute Address */ + unsigned long : 14; + unsigned long av : 1; /* ACCF-Validity Control */ + unsigned long acc : 4; /* Access-Control Bits */ + unsigned long f : 1; /* Fetch-Protection Bit */ + unsigned long fc : 1; /* Format-Control */ + unsigned long p : 1; /* DAT-Protection Bit */ + unsigned long iep : 1; /* Instruction-Execution-Protection */ + unsigned long : 2; + unsigned long i : 1; /* Region-Invalid Bit */ + unsigned long cr : 1; /* Common-Region Bit */ + unsigned long tt : 2; /* Table-Type Bits */ + unsigned long : 2; +}; + +union region3_table_entry { + unsigned long val; + struct region3_table_entry_fc0 fc0; + struct region3_table_entry_fc1 fc1; + struct { + unsigned long : 53; + unsigned long fc: 1; /* Format-Control */ + unsigned long : 4; + unsigned long i : 1; /* Region-Invalid Bit */ + unsigned long cr: 1; /* Common-Region Bit */ + unsigned long tt: 2; /* Table-Type Bits */ + unsigned long : 2; + }; +}; + +struct segment_table_entry_fc0 { + unsigned long pto: 53;/* Page-Table Origin */ + unsigned long fc : 1; /* Format-Control */ + unsigned long p : 1; /* DAT-Protection Bit */ + unsigned long : 3; + unsigned long i : 1; /* Segment-Invalid Bit */ + unsigned long cs : 1; /* Common-Segment Bit */ + unsigned long tt : 2; /* Table-Type Bits */ + unsigned long : 2; +}; + +struct segment_table_entry_fc1 { + unsigned long sfaa: 44;/* Segment-Frame Absolute Address */ + unsigned long : 3; + unsigned long av : 1; /* ACCF-Validity Control */ + unsigned long acc : 4; /* Access-Control Bits */ + unsigned long f : 1; /* Fetch-Protection Bit */ + unsigned long fc : 1; /* Format-Control */ + unsigned long p : 1; /* DAT-Protection Bit */ + unsigned long iep : 1; /* Instruction-Execution-Protection */ + unsigned long : 2; + unsigned long i : 1; /* Segment-Invalid Bit */ + unsigned long cs : 1; /* Common-Segment Bit */ + unsigned long tt : 2; /* Table-Type Bits */ + unsigned long : 2; +}; + +union segment_table_entry { + unsigned long val; + struct segment_table_entry_fc0 fc0; + struct segment_table_entry_fc1 fc1; + struct { + unsigned long : 53; + unsigned long fc: 1; /* Format-Control */ + unsigned long : 4; + unsigned long i : 1; /* Segment-Invalid Bit */ + unsigned long cs: 1; /* Common-Segment Bit */ + unsigned long tt: 2; /* Table-Type Bits */ + unsigned long : 2; + }; +}; + +union page_table_entry { + unsigned long val; + struct { + unsigned long pfra: 52;/* Page-Frame Real Address */ + unsigned long z : 1; /* Zero Bit */ + unsigned long i : 1; /* Page-Invalid Bit */ + unsigned long p : 1; /* DAT-Protection Bit */ + unsigned long iep : 1; /* Instruction-Execution-Protection */ + unsigned long : 8; + }; +}; + +enum { + TABLE_TYPE_SEGMENT = 0, + TABLE_TYPE_REGION3 = 1, + TABLE_TYPE_REGION2 = 2, + TABLE_TYPE_REGION1 = 3 +}; + +#endif /* _S390_DAT_BITS_H */ diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index ccd4e148b5ed..6375276d94ea 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -66,14 +66,15 @@ typedef int (debug_header_proc_t) (debug_info_t *id, struct debug_view *view, int area, debug_entry_t *entry, - char *out_buf); + char *out_buf, size_t out_buf_size); typedef int (debug_format_proc_t) (debug_info_t *id, struct debug_view *view, char *out_buf, + size_t out_buf_size, const char *in_buf); typedef int (debug_prolog_proc_t) (debug_info_t *id, struct debug_view *view, - char *out_buf); + char *out_buf, size_t out_buf_size); typedef int (debug_input_proc_t) (debug_info_t *id, struct debug_view *view, struct file *file, @@ -81,8 +82,13 @@ typedef int (debug_input_proc_t) (debug_info_t *id, size_t in_buf_size, loff_t *offset); int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view, - int area, debug_entry_t *entry, char *out_buf); + int area, debug_entry_t *entry, + char *out_buf, size_t out_buf_size); +#define DEBUG_SPRINTF_MAX_ARGS 10 +int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view, + char *out_buf, size_t out_buf_size, + const char *inbuf); struct debug_view { char name[DEBUG_MAX_NAME_LEN]; debug_prolog_proc_t *prolog_proc; @@ -112,6 +118,9 @@ debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas, int buf_size, umode_t mode, uid_t uid, gid_t gid); +ssize_t debug_dump(debug_info_t *id, struct debug_view *view, + char *buf, size_t buf_size, bool reverse); + void debug_unregister(debug_info_t *id); void debug_set_level(debug_info_t *id, int new_level); diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 20b94220113b..8db8db3b1018 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -12,6 +12,7 @@ #include <linux/if_ether.h> #include <linux/percpu.h> #include <asm/asm-extable.h> +#include <asm/sclp.h> #include <asm/cio.h> enum diag_stat_enum { @@ -35,8 +36,11 @@ enum diag_stat_enum { DIAG_STAT_X2FC, DIAG_STAT_X304, DIAG_STAT_X308, + DIAG_STAT_X310, DIAG_STAT_X318, DIAG_STAT_X320, + DIAG_STAT_X324, + DIAG_STAT_X49C, DIAG_STAT_X500, NR_DIAG_STAT }; @@ -62,7 +66,7 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) end_addr = pfn_to_phys(start_pfn + num_pfn - 1); diag_stat_inc(DIAG_STAT_X010); - asm volatile( + asm_inline volatile( "0: diag %0,%1,0x10\n" "1: nopr %%r7\n" EX_TABLE(0b, 1b) @@ -117,6 +121,8 @@ enum diag204_sc { }; #define DIAG204_SUBCODE_MASK 0xffff +#define DIAG204_BIF_BIT 0x80000000 +#define DIAG204_BUSY_WAIT (HZ / 10) /* The two available diag 204 data formats */ enum diag204_format { @@ -326,6 +332,11 @@ union diag318_info { }; }; +static inline bool diag204_has_bif(void) +{ + return sclp.has_diag204_bif; +} + int diag204(unsigned long subcode, unsigned long size, void *addr); int diag224(void *ptr); int diag26c(void *req, void *resp, enum diag26c_sc subcode); @@ -355,4 +366,12 @@ void _diag0c_amode31(unsigned long rx); void _diag308_reset_amode31(void); int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len); +/* diag 49c subcodes */ +enum diag49c_sc { + DIAG49C_SUBC_ACK = 0, + DIAG49C_SUBC_REG = 1 +}; + +int diag49c(unsigned long subcode); + #endif /* _ASM_S390_DIAG_H */ diff --git a/arch/s390/include/asm/diag288.h b/arch/s390/include/asm/diag288.h new file mode 100644 index 000000000000..5e1b43cea9d6 --- /dev/null +++ b/arch/s390/include/asm/diag288.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_DIAG288_H +#define _ASM_S390_DIAG288_H + +#include <asm/asm-extable.h> +#include <asm/types.h> + +#define MIN_INTERVAL 15 /* Minimal time supported by diag288 */ +#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ + +#define WDT_DEFAULT_TIMEOUT 30 + +/* Function codes - init, change, cancel */ +#define WDT_FUNC_INIT 0 +#define WDT_FUNC_CHANGE 1 +#define WDT_FUNC_CANCEL 2 +#define WDT_FUNC_CONCEAL 0x80000000 + +/* Action codes for LPAR watchdog */ +#define LPARWDT_RESTART 0 + +static inline int __diag288(unsigned int func, unsigned int timeout, + unsigned long action, unsigned int len) +{ + union register_pair r1 = { .even = func, .odd = timeout, }; + union register_pair r3 = { .even = action, .odd = len, }; + int rc = -EINVAL; + + asm volatile( + " diag %[r1],%[r3],0x288\n" + "0: lhi %[rc],0\n" + "1:" + EX_TABLE(0b, 1b) + : [rc] "+d" (rc) + : [r1] "d" (r1.pair), [r3] "d" (r3.pair) + : "cc", "memory"); + return rc; +} + +#endif /* _ASM_S390_DIAG288_H */ diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h index 390906b8e386..e3ad6798d0cd 100644 --- a/arch/s390/include/asm/dwarf.h +++ b/arch/s390/include/asm/dwarf.h @@ -2,7 +2,7 @@ #ifndef _ASM_S390_DWARF_H #define _ASM_S390_DWARF_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc @@ -33,6 +33,6 @@ .cfi_sections .eh_frame, .debug_frame #endif -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_DWARF_H */ diff --git a/arch/s390/include/asm/ebcdic.h b/arch/s390/include/asm/ebcdic.h index efb50fc6866c..7164cb658435 100644 --- a/arch/s390/include/asm/ebcdic.h +++ b/arch/s390/include/asm/ebcdic.h @@ -22,18 +22,18 @@ extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */ static inline void codepage_convert(const __u8 *codepage, volatile char *addr, unsigned long nr) { - if (nr-- <= 0) + if (!nr--) return; asm volatile( - " bras 1,1f\n" - " tr 0(1,%0),0(%2)\n" - "0: tr 0(256,%0),0(%2)\n" + " j 2f\n" + "0: tr 0(1,%0),0(%2)\n" + "1: tr 0(256,%0),0(%2)\n" " la %0,256(%0)\n" - "1: ahi %1,-256\n" - " jnm 0b\n" - " ex %1,0(1)" + "2: aghi %1,-256\n" + " jnm 1b\n" + " exrl %1,0b" : "+&a" (addr), "+&a" (nr) - : "a" (codepage) : "cc", "memory", "1"); + : "a" (codepage) : "cc", "memory"); } #define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr) diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 70a30ae258b7..bb63fa4d20bb 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -91,6 +91,14 @@ /* Keep this the last entry. */ #define R_390_NUM 61 +/* + * HWCAP flags - for AT_HWCAP + * + * Bits 32-63 are reserved for use by libc. + * Bit 31 is reserved and will be used by libc to determine if a second + * argument is passed to IFUNC resolvers. This will be implemented when + * there is a need for AT_HWCAP2. + */ enum { HWCAP_NR_ESAN3 = 0, HWCAP_NR_ZARCH = 1, @@ -150,15 +158,10 @@ enum { #define ELF_DATA ELFDATA2MSB #define ELF_ARCH EM_S390 -/* s390 specific phdr types */ -#define PT_S390_PGSTE 0x70000000 - /* * ELF register definitions.. */ -#include <linux/compat.h> - #include <asm/ptrace.h> #include <asm/syscall.h> #include <asm/user.h> @@ -166,9 +169,6 @@ enum { typedef s390_fp_regs elf_fpregset_t; typedef s390_regs elf_gregset_t; -typedef s390_fp_regs compat_elf_fpregset_t; -typedef s390_compat_regs compat_elf_gregset_t; - #include <linux/sched/mm.h> /* for task_struct */ #include <asm/mmu_context.h> @@ -178,39 +178,6 @@ typedef s390_compat_regs compat_elf_gregset_t; #define elf_check_arch(x) \ (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ && (x)->e_ident[EI_CLASS] == ELF_CLASS) -#define compat_elf_check_arch(x) \ - (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ - && (x)->e_ident[EI_CLASS] == ELF_CLASS) -#define compat_start_thread start_thread31 - -struct arch_elf_state { - int rc; -}; - -#define INIT_ARCH_ELF_STATE { .rc = 0 } - -#define arch_check_elf(ehdr, interp, interp_ehdr, state) (0) -#ifdef CONFIG_PGSTE -#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ -({ \ - struct arch_elf_state *_state = state; \ - if ((phdr)->p_type == PT_S390_PGSTE && \ - !page_table_allocate_pgste && \ - !test_thread_flag(TIF_PGSTE) && \ - !current->mm->context.alloc_pgste) { \ - set_thread_flag(TIF_PGSTE); \ - set_pt_regs_flag(task_pt_regs(current), \ - PIF_EXECVE_PGSTE_RESTART); \ - _state->rc = -EAGAIN; \ - } \ - _state->rc; \ -}) -#else -#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ -({ \ - (state)->rc; \ -}) -#endif /* For SVR4/S390 the function pointer to be registered with `atexit` is passed in R14. */ @@ -227,9 +194,7 @@ struct arch_elf_state { the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. 64-bit tasks are aligned to 4GB. */ -#define ELF_ET_DYN_BASE (is_compat_task() ? \ - (STACK_TOP / 3 * 2) : \ - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) +#define ELF_ET_DYN_BASE ((STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ @@ -248,43 +213,21 @@ extern unsigned long elf_hwcap; extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) -#ifndef CONFIG_COMPAT #define SET_PERSONALITY(ex) \ do { \ set_personality(PER_LINUX | \ (current->personality & (~PER_MASK))); \ - current->thread.sys_call_table = sys_call_table; \ -} while (0) -#else /* CONFIG_COMPAT */ -#define SET_PERSONALITY(ex) \ -do { \ - if (personality(current->personality) != PER_LINUX32) \ - set_personality(PER_LINUX | \ - (current->personality & ~PER_MASK)); \ - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ - set_thread_flag(TIF_31BIT); \ - current->thread.sys_call_table = \ - sys_call_table_emu; \ - } else { \ - clear_thread_flag(TIF_31BIT); \ - current->thread.sys_call_table = \ - sys_call_table; \ - } \ } while (0) -#endif /* CONFIG_COMPAT */ /* * Cache aliasing on the latest machines calls for a mapping granularity - * of 512KB for the anonymous mapping base. For 64-bit processes use a - * 512KB alignment and a randomization of up to 1GB. For 31-bit processes - * the virtual address space is limited, use no alignment and limit the - * randomization to 8MB. - * For the additional randomization of the program break use 32MB for - * 64-bit and 8MB for 31-bit. + * of 512KB for the anonymous mapping base. Use a 512KB alignment and a + * randomization of up to 1GB. + * For the additional randomization of the program break use 32MB. */ -#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) -#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) -#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) +#define BRK_RND_MASK (0x1fffUL) +#define MMAP_RND_MASK (0x3ff80UL) +#define MMAP_ALIGN_MASK (0x7fUL) #define STACK_RND_MASK MMAP_RND_MASK /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 35555c944630..979af986a8fe 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -59,4 +59,14 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare +static __always_inline bool arch_in_rcu_eqs(void) +{ + if (IS_ENABLED(CONFIG_KVM)) + return current->flags & PF_VCPU; + + return false; +} + +#define arch_in_rcu_eqs arch_in_rcu_eqs + #endif diff --git a/arch/s390/include/asm/extmem.h b/arch/s390/include/asm/extmem.h index e0a06060afdd..225ee89c3f5e 100644 --- a/arch/s390/include/asm/extmem.h +++ b/arch/s390/include/asm/extmem.h @@ -6,7 +6,7 @@ #ifndef _ASM_S390X_DCSS_H #define _ASM_S390X_DCSS_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * DCSS segment is defined as a contiguous range of pages using DEFSEG command. diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index 796007125dff..5f5b1aa6c233 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -14,13 +14,12 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/preempt.h> - +#include <asm/alternative.h> #include <asm/lowcore.h> #define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8) extern u64 stfle_fac_list[16]; -extern u64 alt_stfle_fac_list[16]; static inline void __set_facility(unsigned long nr, void *facilities) { @@ -40,33 +39,56 @@ static inline void __clear_facility(unsigned long nr, void *facilities) ptr[nr >> 3] &= ~(0x80 >> (nr & 7)); } -static inline int __test_facility(unsigned long nr, void *facilities) +static __always_inline bool __test_facility(unsigned long nr, void *facilities) { unsigned char *ptr; if (nr >= MAX_FACILITY_BIT) - return 0; + return false; ptr = (unsigned char *) facilities + (nr >> 3); return (*ptr & (0x80 >> (nr & 7))) != 0; } /* + * __test_facility_constant() generates a single instruction branch. If the + * tested facility is available (likely) the branch is patched into a nop. + * + * Do not use this function unless you know what you are doing. All users are + * supposed to use test_facility() which will do the right thing. + */ +static __always_inline bool __test_facility_constant(unsigned long nr) +{ + asm goto( + ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FACILITY(%[nr])) + : + : [nr] "i" (nr) + : + : l_no); + return true; +l_no: + return false; +} + +/* * The test_facility function uses the bit ordering where the MSB is bit 0. * That makes it easier to query facility bits with the bit number as * documented in the Principles of Operation. */ -static inline int test_facility(unsigned long nr) +static __always_inline bool test_facility(unsigned long nr) { unsigned long facilities_als[] = { FACILITIES_ALS }; - if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) { - if (__test_facility(nr, &facilities_als)) - return 1; + if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(nr)) { + if (nr < sizeof(facilities_als) * 8) { + if (__test_facility(nr, &facilities_als)) + return true; + } + return __test_facility_constant(nr); } return __test_facility(nr, &stfle_fac_list); } -static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size) +static inline unsigned long __stfle_asm(u64 *fac_list, int size) { unsigned long reg0 = size - 1; @@ -74,7 +96,7 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size) " lgr 0,%[reg0]\n" " .insn s,0xb2b00000,%[list]\n" /* stfle */ " lgr %[reg0],0\n" - : [reg0] "+&d" (reg0), [list] "+Q" (*stfle_fac_list) + : [reg0] "+&d" (reg0), [list] "+Q" (*fac_list) : : "memory", "cc", "0"); return reg0; @@ -82,32 +104,32 @@ static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size) /** * stfle - Store facility list extended - * @stfle_fac_list: array where facility list can be stored + * @fac_list: array where facility list can be stored * @size: size of passed in array in double words */ -static inline void __stfle(u64 *stfle_fac_list, int size) +static inline void __stfle(u64 *fac_list, int size) { unsigned long nr; u32 stfl_fac_list; asm volatile( " stfl 0(0)\n" - : "=m" (S390_lowcore.stfl_fac_list)); - stfl_fac_list = S390_lowcore.stfl_fac_list; - memcpy(stfle_fac_list, &stfl_fac_list, 4); + : "=m" (get_lowcore()->stfl_fac_list)); + stfl_fac_list = get_lowcore()->stfl_fac_list; + memcpy(fac_list, &stfl_fac_list, 4); nr = 4; /* bytes stored by stfl */ if (stfl_fac_list & 0x01000000) { /* More facility bits available with stfle */ - nr = __stfle_asm(stfle_fac_list, size); + nr = __stfle_asm(fac_list, size); nr = min_t(unsigned long, (nr + 1) * 8, size * 8); } - memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); + memset((char *)fac_list + nr, 0, size * 8 - nr); } -static inline void stfle(u64 *stfle_fac_list, int size) +static inline void stfle(u64 *fac_list, int size) { preempt_disable(); - __stfle(stfle_fac_list, size); + __stfle(fac_list, size); preempt_enable(); } diff --git a/arch/s390/include/asm/fprobe.h b/arch/s390/include/asm/fprobe.h new file mode 100644 index 000000000000..5ef600b372f4 --- /dev/null +++ b/arch/s390/include/asm/fprobe.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_FPROBE_H +#define _ASM_S390_FPROBE_H + +#include <asm-generic/fprobe.h> + +#undef FPROBE_HEADER_MSB_PATTERN +#define FPROBE_HEADER_MSB_PATTERN 0 + +#endif /* _ASM_S390_FPROBE_H */ diff --git a/arch/s390/include/asm/fpu-insn-asm.h b/arch/s390/include/asm/fpu-insn-asm.h index 02ccfe46050a..cc0468fdf2d0 100644 --- a/arch/s390/include/asm/fpu-insn-asm.h +++ b/arch/s390/include/asm/fpu-insn-asm.h @@ -16,7 +16,7 @@ #error only <asm/fpu-insn.h> can be included directly #endif -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ /* Macros to generate vector instruction byte code */ @@ -407,6 +407,28 @@ MRXBOPC 0, 0x0E, v1 .endm +/* VECTOR STORE BYTE REVERSED ELEMENTS */ + .macro VSTBR vr1, disp, index="%r0", base, m + VX_NUM v1, \vr1 + GR_NUM x2, \index + GR_NUM b2, \base + .word 0xE600 | ((v1&15) << 4) | (x2&15) + .word (b2 << 12) | (\disp) + MRXBOPC \m, 0x0E, v1 +.endm +.macro VSTBRH vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 1 +.endm +.macro VSTBRF vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 2 +.endm +.macro VSTBRG vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 3 +.endm +.macro VSTBRQ vr1, disp, index="%r0", base + VSTBR \vr1, \disp, \index, \base, 4 +.endm + /* VECTOR STORE MULTIPLE */ .macro VSTM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom @@ -728,5 +750,5 @@ MRXBOPC 0, 0x77, v1, v2, v3 .endm -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_S390_FPU_INSN_ASM_H */ diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h index c1e2e521d9af..96727f3bd0dc 100644 --- a/arch/s390/include/asm/fpu-insn.h +++ b/arch/s390/include/asm/fpu-insn.h @@ -9,9 +9,10 @@ #include <asm/fpu-insn-asm.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/instrumented.h> +#include <linux/kmsan.h> #include <asm/asm-extable.h> asm(".include \"asm/fpu-insn-asm.h\"\n"); @@ -38,7 +39,7 @@ asm(".include \"asm/fpu-insn-asm.h\"\n"); static __always_inline void fpu_cefbr(u8 f1, s32 val) { - asm volatile("cefbr %[f1],%[val]\n" + asm volatile("cefbr %[f1],%[val]" : : [f1] "I" (f1), [val] "d" (val) : "memory"); @@ -48,7 +49,7 @@ static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode) { unsigned long val; - asm volatile("cgebr %[val],%[mode],%[f2]\n" + asm volatile("cgebr %[val],%[mode],%[f2]" : [val] "=d" (val) : [f2] "I" (f2), [mode] "I" (mode) : "memory"); @@ -57,7 +58,7 @@ static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode) static __always_inline void fpu_debr(u8 f1, u8 f2) { - asm volatile("debr %[f1],%[f2]\n" + asm volatile("debr %[f1],%[f2]" : : [f1] "I" (f1), [f2] "I" (f2) : "memory"); @@ -66,7 +67,7 @@ static __always_inline void fpu_debr(u8 f1, u8 f2) static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg) { instrument_read(reg, sizeof(*reg)); - asm volatile("ld %[fpr],%[reg]\n" + asm volatile("ld %[fpr],%[reg]" : : [fpr] "I" (fpr), [reg] "Q" (reg->ui) : "memory"); @@ -74,7 +75,7 @@ static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg) static __always_inline void fpu_ldgr(u8 f1, u32 val) { - asm volatile("ldgr %[f1],%[val]\n" + asm volatile("ldgr %[f1],%[val]" : : [f1] "I" (f1), [val] "d" (val) : "memory"); @@ -100,19 +101,12 @@ static __always_inline void fpu_lfpc(unsigned int *fpc) */ static inline void fpu_lfpc_safe(unsigned int *fpc) { - u32 tmp; - instrument_read(fpc, sizeof(*fpc)); - asm volatile("\n" - "0: lfpc %[fpc]\n" - "1: nopr %%r7\n" - ".pushsection .fixup, \"ax\"\n" - "2: lghi %[tmp],0\n" - " sfpc %[tmp]\n" - " jg 1b\n" - ".popsection\n" - EX_TABLE(1b, 2b) - : [tmp] "=d" (tmp) + asm_inline volatile( + " lfpc %[fpc]\n" + "0: nopr %%r7\n" + EX_TABLE_FPC(0b, 0b) + : : [fpc] "Q" (*fpc) : "memory"); } @@ -120,7 +114,7 @@ static inline void fpu_lfpc_safe(unsigned int *fpc) static __always_inline void fpu_std(unsigned short fpr, freg_t *reg) { instrument_write(reg, sizeof(*reg)); - asm volatile("std %[fpr],%[reg]\n" + asm volatile("std %[fpr],%[reg]" : [reg] "=Q" (reg->ui) : [fpr] "I" (fpr) : "memory"); @@ -183,33 +177,33 @@ static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3) : "memory"); } -#ifdef CONFIG_CC_IS_CLANG +#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS static __always_inline void fpu_vl(u8 v1, const void *vxr) { instrument_read(vxr, sizeof(__vector128)); - asm volatile("\n" - " la 1,%[vxr]\n" - " VL %[v1],0,,1\n" - : - : [vxr] "R" (*(__vector128 *)vxr), - [v1] "I" (v1) - : "memory", "1"); + asm volatile("VL %[v1],%O[vxr],,%R[vxr]" + : + : [vxr] "Q" (*(__vector128 *)vxr), + [v1] "I" (v1) + : "memory"); } -#else /* CONFIG_CC_IS_CLANG */ +#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ static __always_inline void fpu_vl(u8 v1, const void *vxr) { instrument_read(vxr, sizeof(__vector128)); - asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n" - : - : [vxr] "Q" (*(__vector128 *)vxr), - [v1] "I" (v1) - : "memory"); + asm volatile( + " la 1,%[vxr]\n" + " VL %[v1],0,,1" + : + : [vxr] "R" (*(__vector128 *)vxr), + [v1] "I" (v1) + : "memory", "1"); } -#endif /* CONFIG_CC_IS_CLANG */ +#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ static __always_inline void fpu_vleib(u8 v, s16 val, u8 index) { @@ -238,7 +232,7 @@ static __always_inline u64 fpu_vlgvf(u8 v, u16 index) return val; } -#ifdef CONFIG_CC_IS_CLANG +#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) { @@ -246,17 +240,15 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) size = min(index + 1, sizeof(__vector128)); instrument_read(vxr, size); - asm volatile("\n" - " la 1,%[vxr]\n" - " VLL %[v1],%[index],0,1\n" - : - : [vxr] "R" (*(u8 *)vxr), - [index] "d" (index), - [v1] "I" (v1) - : "memory", "1"); + asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]" + : + : [vxr] "Q" (*(u8 *)vxr), + [index] "d" (index), + [v1] "I" (v1) + : "memory"); } -#else /* CONFIG_CC_IS_CLANG */ +#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) { @@ -264,17 +256,19 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) size = min(index + 1, sizeof(__vector128)); instrument_read(vxr, size); - asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n" - : - : [vxr] "Q" (*(u8 *)vxr), - [index] "d" (index), - [v1] "I" (v1) - : "memory"); + asm volatile( + " la 1,%[vxr]\n" + " VLL %[v1],%[index],0,1" + : + : [vxr] "R" (*(u8 *)vxr), + [index] "d" (index), + [v1] "I" (v1) + : "memory", "1"); } -#endif /* CONFIG_CC_IS_CLANG */ +#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ -#ifdef CONFIG_CC_IS_CLANG +#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS #define fpu_vlm(_v1, _v3, _vxrs) \ ({ \ @@ -284,17 +278,15 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) } *_v = (void *)(_vxrs); \ \ instrument_read(_v, size); \ - asm volatile("\n" \ - " la 1,%[vxrs]\n" \ - " VLM %[v1],%[v3],0,1\n" \ - : \ - : [vxrs] "R" (*_v), \ - [v1] "I" (_v1), [v3] "I" (_v3) \ - : "memory", "1"); \ + asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]" \ + : \ + : [vxrs] "Q" (*_v), \ + [v1] "I" (_v1), [v3] "I" (_v3) \ + : "memory"); \ (_v3) - (_v1) + 1; \ }) -#else /* CONFIG_CC_IS_CLANG */ +#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ #define fpu_vlm(_v1, _v3, _vxrs) \ ({ \ @@ -304,15 +296,17 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) } *_v = (void *)(_vxrs); \ \ instrument_read(_v, size); \ - asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ - : \ - : [vxrs] "Q" (*_v), \ - [v1] "I" (_v1), [v3] "I" (_v3) \ - : "memory"); \ + asm volatile( \ + " la 1,%[vxrs]\n" \ + " VLM %[v1],%[v3],0,1" \ + : \ + : [vxrs] "R" (*_v), \ + [v1] "I" (_v1), [v3] "I" (_v3) \ + : "memory", "1"); \ (_v3) - (_v1) + 1; \ }) -#endif /* CONFIG_CC_IS_CLANG */ +#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ static __always_inline void fpu_vlr(u8 v1, u8 v2) { @@ -362,33 +356,33 @@ static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3) : "memory"); } -#ifdef CONFIG_CC_IS_CLANG +#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS static __always_inline void fpu_vst(u8 v1, const void *vxr) { instrument_write(vxr, sizeof(__vector128)); - asm volatile("\n" - " la 1,%[vxr]\n" - " VST %[v1],0,,1\n" - : [vxr] "=R" (*(__vector128 *)vxr) - : [v1] "I" (v1) - : "memory", "1"); + asm volatile("VST %[v1],%O[vxr],,%R[vxr]" + : [vxr] "=Q" (*(__vector128 *)vxr) + : [v1] "I" (v1) + : "memory"); } -#else /* CONFIG_CC_IS_CLANG */ +#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ static __always_inline void fpu_vst(u8 v1, const void *vxr) { instrument_write(vxr, sizeof(__vector128)); - asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n" - : [vxr] "=Q" (*(__vector128 *)vxr) - : [v1] "I" (v1) - : "memory"); + asm volatile( + " la 1,%[vxr]\n" + " VST %[v1],0,,1" + : [vxr] "=R" (*(__vector128 *)vxr) + : [v1] "I" (v1) + : "memory", "1"); } -#endif /* CONFIG_CC_IS_CLANG */ +#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ -#ifdef CONFIG_CC_IS_CLANG +#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) { @@ -396,15 +390,14 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) size = min(index + 1, sizeof(__vector128)); instrument_write(vxr, size); - asm volatile("\n" - " la 1,%[vxr]\n" - " VSTL %[v1],%[index],0,1\n" - : [vxr] "=R" (*(u8 *)vxr) - : [index] "d" (index), [v1] "I" (v1) - : "memory", "1"); + asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]" + : [vxr] "=Q" (*(u8 *)vxr) + : [index] "d" (index), [v1] "I" (v1) + : "memory"); + kmsan_unpoison_memory(vxr, size); } -#else /* CONFIG_CC_IS_CLANG */ +#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) { @@ -412,15 +405,18 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) size = min(index + 1, sizeof(__vector128)); instrument_write(vxr, size); - asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n" - : [vxr] "=Q" (*(u8 *)vxr) - : [index] "d" (index), [v1] "I" (v1) - : "memory"); + asm volatile( + " la 1,%[vxr]\n" + " VSTL %[v1],%[index],0,1" + : [vxr] "=R" (*(u8 *)vxr) + : [index] "d" (index), [v1] "I" (v1) + : "memory", "1"); + kmsan_unpoison_memory(vxr, size); } -#endif /* CONFIG_CC_IS_CLANG */ +#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ -#ifdef CONFIG_CC_IS_CLANG +#ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS #define fpu_vstm(_v1, _v3, _vxrs) \ ({ \ @@ -430,16 +426,14 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) } *_v = (void *)(_vxrs); \ \ instrument_write(_v, size); \ - asm volatile("\n" \ - " la 1,%[vxrs]\n" \ - " VSTM %[v1],%[v3],0,1\n" \ - : [vxrs] "=R" (*_v) \ - : [v1] "I" (_v1), [v3] "I" (_v3) \ - : "memory", "1"); \ + asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]" \ + : [vxrs] "=Q" (*_v) \ + : [v1] "I" (_v1), [v3] "I" (_v3) \ + : "memory"); \ (_v3) - (_v1) + 1; \ }) -#else /* CONFIG_CC_IS_CLANG */ +#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ #define fpu_vstm(_v1, _v3, _vxrs) \ ({ \ @@ -449,14 +443,16 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) } *_v = (void *)(_vxrs); \ \ instrument_write(_v, size); \ - asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ - : [vxrs] "=Q" (*_v) \ - : [v1] "I" (_v1), [v3] "I" (_v3) \ - : "memory"); \ + asm volatile( \ + " la 1,%[vxrs]\n" \ + " VSTM %[v1],%[v3],0,1" \ + : [vxrs] "=R" (*_v) \ + : [v1] "I" (_v1), [v3] "I" (_v3) \ + : "memory", "1"); \ (_v3) - (_v1) + 1; \ }) -#endif /* CONFIG_CC_IS_CLANG */ +#endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ static __always_inline void fpu_vupllf(u8 v1, u8 v2) { @@ -482,5 +478,5 @@ static __always_inline void fpu_vzero(u8 v) : "memory"); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_S390_FPU_INSN_H */ diff --git a/arch/s390/include/asm/fpu.h b/arch/s390/include/asm/fpu.h index c84cb33913e2..960c6c67ad6c 100644 --- a/arch/s390/include/asm/fpu.h +++ b/arch/s390/include/asm/fpu.h @@ -44,6 +44,7 @@ #ifndef _ASM_S390_FPU_H #define _ASM_S390_FPU_H +#include <linux/cpufeature.h> #include <linux/processor.h> #include <linux/preempt.h> #include <linux/string.h> @@ -51,12 +52,6 @@ #include <asm/sigcontext.h> #include <asm/fpu-types.h> #include <asm/fpu-insn.h> -#include <asm/facility.h> - -static inline bool cpu_has_vx(void) -{ - return likely(test_facility(129)); -} enum { KERNEL_FPC_BIT = 0, diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 77e479d44f1e..692c484ec163 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -2,13 +2,27 @@ #ifndef _ASM_S390_FTRACE_H #define _ASM_S390_FTRACE_H -#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR #define ARCH_SUPPORTS_FTRACE_OPS 1 #define MCOUNT_INSN_SIZE 6 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ +#include <asm/stacktrace.h> -unsigned long return_address(unsigned int n); +static __always_inline unsigned long return_address(unsigned int n) +{ + struct stack_frame *sf; + + if (!n) + return (unsigned long)__builtin_return_address(0); + + sf = (struct stack_frame *)current_frame_address(); + do { + sf = (struct stack_frame *)sf->back_chain; + if (!sf) + return 0; + } while (--n); + return sf->gprs[8]; +} #define ftrace_return_address(n) return_address(n) void ftrace_caller(void); @@ -25,6 +39,7 @@ struct dyn_arch_ftrace { }; struct module; struct dyn_ftrace; +struct ftrace_ops; bool ftrace_need_init_nop(void); #define ftrace_need_init_nop ftrace_need_init_nop @@ -36,62 +51,44 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; } +#define ftrace_get_symaddr(fentry_ip) ((unsigned long)(fentry_ip)) -struct ftrace_regs { - struct pt_regs regs; -}; +#include <linux/ftrace_regs.h> static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) { - struct pt_regs *regs = &fregs->regs; + struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs; if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS)) return regs; return NULL; } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -struct fgraph_ret_regs { - unsigned long gpr2; - unsigned long fp; -}; - -static __always_inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs) -{ - return ret_regs->gpr2; -} - -static __always_inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs) +static __always_inline void +ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, + unsigned long ip) { - return ret_regs->fp; + arch_ftrace_regs(fregs)->regs.psw.addr = ip; } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#undef ftrace_regs_get_frame_pointer static __always_inline unsigned long -ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs) +ftrace_regs_get_frame_pointer(struct ftrace_regs *fregs) { - return fregs->regs.psw.addr; + return ftrace_regs_get_stack_pointer(fregs); } -static __always_inline void -ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, - unsigned long ip) +static __always_inline unsigned long +ftrace_regs_get_return_address(const struct ftrace_regs *fregs) { - fregs->regs.psw.addr = ip; + return arch_ftrace_regs(fregs)->regs.gprs[14]; } -#define ftrace_regs_get_argument(fregs, n) \ - regs_get_kernel_argument(&(fregs)->regs, n) -#define ftrace_regs_get_stack_pointer(fregs) \ - kernel_stack_pointer(&(fregs)->regs) -#define ftrace_regs_return_value(fregs) \ - regs_return_value(&(fregs)->regs) -#define ftrace_regs_set_return_value(fregs, ret) \ - regs_set_return_value(&(fregs)->regs, ret) -#define ftrace_override_function_with_return(fregs) \ - override_function_with_return(&(fregs)->regs) -#define ftrace_regs_query_register_offset(name) \ - regs_query_register_offset(name) +#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \ + (_regs)->psw.mask = 0; \ + (_regs)->psw.addr = arch_ftrace_regs(fregs)->regs.psw.addr; \ + (_regs)->gprs[15] = arch_ftrace_regs(fregs)->regs.gprs[15]; \ + } while (0) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS /* @@ -103,37 +100,24 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, */ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr) { - struct pt_regs *regs = &fregs->regs; + struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs; regs->orig_gpr2 = addr; } #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ -/* - * Even though the system call numbers are identical for s390/s390x a - * different system call table is used for compat tasks. This may lead - * to e.g. incorrect or missing trace event sysfs files. - * Therefore simply do not trace compat system calls at all. - * See kernel/trace/trace_syscalls.c. - */ -#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS -static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) -{ - return is_compat_task(); -} - #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { - /* - * Skip __s390_ and __s390x_ prefix - due to compat wrappers - * and aliasing some symbols of 64 bit system call functions - * may get the __s390_ prefix instead of the __s390x_ prefix. - */ + /* Skip the __s390x_ prefix. */ return !strcmp(sym + 7, name) || !strcmp(sym + 8, name); } -#endif /* __ASSEMBLY__ */ +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); +#define ftrace_graph_func ftrace_graph_func + +#endif /* __ASSEMBLER__ */ #ifdef CONFIG_FUNCTION_TRACER diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index eaeaeb3ff0be..942f21c39697 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -2,80 +2,101 @@ #ifndef _ASM_S390_FUTEX_H #define _ASM_S390_FUTEX_H +#include <linux/instrumented.h> #include <linux/uaccess.h> #include <linux/futex.h> #include <asm/asm-extable.h> #include <asm/mmu_context.h> #include <asm/errno.h> -#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ - asm volatile( \ - " sacf 256\n" \ - "0: l %1,0(%6)\n" \ - "1:"insn \ - "2: cs %1,%2,0(%6)\n" \ - "3: jl 1b\n" \ - " lhi %0,0\n" \ - "4: sacf 768\n" \ - EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ - EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ - "=m" (*uaddr) \ - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ - "m" (*uaddr) : "cc"); +#define FUTEX_OP_FUNC(name, insn) \ +static uaccess_kmsan_or_inline int \ +__futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ +{ \ + bool sacf_flag; \ + int rc, new; \ + \ + instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \ + sacf_flag = enable_sacf_uaccess(); \ + asm_inline volatile( \ + " sacf 256\n" \ + "0: l %[old],%[uaddr]\n" \ + "1:"insn \ + "2: cs %[old],%[new],%[uaddr]\n" \ + "3: jl 1b\n" \ + " lhi %[rc],0\n" \ + "4: sacf 768\n" \ + EX_TABLE_UA_FAULT(0b, 4b, %[rc]) \ + EX_TABLE_UA_FAULT(1b, 4b, %[rc]) \ + EX_TABLE_UA_FAULT(2b, 4b, %[rc]) \ + EX_TABLE_UA_FAULT(3b, 4b, %[rc]) \ + : [rc] "=d" (rc), [old] "=&d" (*old), \ + [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \ + : [oparg] "d" (oparg) \ + : "cc"); \ + disable_sacf_uaccess(sacf_flag); \ + if (!rc) \ + instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \ + return rc; \ +} + +FUTEX_OP_FUNC(set, "lr %[new],%[oparg]\n") +FUTEX_OP_FUNC(add, "lr %[new],%[old]\n ar %[new],%[oparg]\n") +FUTEX_OP_FUNC(or, "lr %[new],%[old]\n or %[new],%[oparg]\n") +FUTEX_OP_FUNC(and, "lr %[new],%[old]\n nr %[new],%[oparg]\n") +FUTEX_OP_FUNC(xor, "lr %[new],%[old]\n xr %[new],%[oparg]\n") -static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, - u32 __user *uaddr) +static inline +int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { - int oldval = 0, newval, ret; + int old, rc; switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("lr %2,%5\n", - ret, oldval, newval, uaddr, oparg); + rc = __futex_atomic_set(oparg, &old, uaddr); break; case FUTEX_OP_ADD: - __futex_atomic_op("lr %2,%1\nar %2,%5\n", - ret, oldval, newval, uaddr, oparg); + rc = __futex_atomic_add(oparg, &old, uaddr); break; case FUTEX_OP_OR: - __futex_atomic_op("lr %2,%1\nor %2,%5\n", - ret, oldval, newval, uaddr, oparg); + rc = __futex_atomic_or(oparg, &old, uaddr); break; case FUTEX_OP_ANDN: - __futex_atomic_op("lr %2,%1\nnr %2,%5\n", - ret, oldval, newval, uaddr, oparg); + rc = __futex_atomic_and(~oparg, &old, uaddr); break; case FUTEX_OP_XOR: - __futex_atomic_op("lr %2,%1\nxr %2,%5\n", - ret, oldval, newval, uaddr, oparg); + rc = __futex_atomic_xor(oparg, &old, uaddr); break; default: - ret = -ENOSYS; + rc = -ENOSYS; } - - if (!ret) - *oval = oldval; - - return ret; + if (!rc) + *oval = old; + return rc; } -static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +static uaccess_kmsan_or_inline +int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { - int ret; + bool sacf_flag; + int rc; - asm volatile( - " sacf 256\n" - "0: cs %1,%4,0(%5)\n" - "1: la %0,0\n" - "2: sacf 768\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) - : "=d" (ret), "+d" (oldval), "=m" (*uaddr) - : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + instrument_copy_from_user_before(uval, uaddr, sizeof(*uval)); + sacf_flag = enable_sacf_uaccess(); + asm_inline volatile( + " sacf 256\n" + "0: cs %[old],%[new],%[uaddr]\n" + "1: lhi %[rc],0\n" + "2: sacf 768\n" + EX_TABLE_UA_FAULT(0b, 2b, %[rc]) + EX_TABLE_UA_FAULT(1b, 2b, %[rc]) + : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr) + : [new] "d" (newval) : "cc", "memory"); + disable_sacf_uaccess(sacf_flag); *uval = oldval; - return ret; + instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0); + return rc; } #endif /* _ASM_S390_FUTEX_H */ diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 9725586f4259..66c5808fd011 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -17,13 +17,12 @@ #define GMAP_NOTIFY_MPROT 0x1 /* Status bits only for huge segment entries */ -#define _SEGMENT_ENTRY_GMAP_IN 0x8000 /* invalidation notify bit */ -#define _SEGMENT_ENTRY_GMAP_UC 0x4000 /* dirty (migration) */ +#define _SEGMENT_ENTRY_GMAP_IN 0x0800 /* invalidation notify bit */ +#define _SEGMENT_ENTRY_GMAP_UC 0x0002 /* dirty (migration) */ /** * struct gmap_struct - guest address space * @list: list head for the mm->context gmap list - * @crst_list: list of all crst tables used in the guest address space * @mm: pointer to the parent mm_struct * @guest_to_host: radix tree with guest to host address translation * @host_to_guest: radix tree with pointer to segment table entries @@ -35,7 +34,6 @@ * @guest_handle: protected virtual machine handle for the ultravisor * @host_to_rmap: radix tree with gmap_rmap lists * @children: list of shadow gmap structures - * @pt_list: list of all page tables used in the shadow guest address space * @shadow_lock: spinlock to protect the shadow gmap list * @parent: pointer to the parent gmap for shadow guest address spaces * @orig_asce: ASCE for which the shadow page table has been created @@ -45,7 +43,6 @@ */ struct gmap { struct list_head list; - struct list_head crst_list; struct mm_struct *mm; struct radix_tree_root guest_to_host; struct radix_tree_root host_to_guest; @@ -61,7 +58,6 @@ struct gmap { /* Additional data for shadow guest address spaces */ struct radix_tree_root host_to_rmap; struct list_head children; - struct list_head pt_list; spinlock_t shadow_lock; struct gmap *parent; unsigned long orig_asce; @@ -106,26 +102,20 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit); void gmap_remove(struct gmap *gmap); struct gmap *gmap_get(struct gmap *gmap); void gmap_put(struct gmap *gmap); +void gmap_free(struct gmap *gmap); +struct gmap *gmap_alloc(unsigned long limit); -void gmap_enable(struct gmap *gmap); -void gmap_disable(struct gmap *gmap); -struct gmap *gmap_get_enabled(void); int gmap_map_segment(struct gmap *gmap, unsigned long from, unsigned long to, unsigned long len); int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); -unsigned long gmap_translate(struct gmap *, unsigned long gaddr); int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); -int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags); -void gmap_discard(struct gmap *, unsigned long from, unsigned long to); void __gmap_zap(struct gmap *, unsigned long gaddr); void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr); int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val); -struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, - int edat_level); -int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level); +void gmap_unshadow(struct gmap *sg); int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, int fake); int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, @@ -134,24 +124,20 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, int fake); int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake); -int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, - unsigned long *pgt, int *dat_protection, int *fake); int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); void gmap_register_pte_notifier(struct gmap_notifier *); void gmap_unregister_pte_notifier(struct gmap_notifier *); -int gmap_mprotect_notify(struct gmap *, unsigned long start, - unsigned long len, int prot); +int gmap_protect_one(struct gmap *gmap, unsigned long gaddr, int prot, unsigned long bits); void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], unsigned long gaddr, unsigned long vmaddr); -int s390_disable_cow_sharing(void); -void s390_unlist_old_asce(struct gmap *gmap); int s390_replace_asce(struct gmap *gmap); void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns); int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start, unsigned long end, bool interruptible); +unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level); /** * s390_uv_destroy_range - Destroy a range of pages in the given mm. diff --git a/arch/s390/include/asm/gmap_helpers.h b/arch/s390/include/asm/gmap_helpers.h new file mode 100644 index 000000000000..5356446a61c4 --- /dev/null +++ b/arch/s390/include/asm/gmap_helpers.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Helper functions for KVM guest address space mapping code + * + * Copyright IBM Corp. 2025 + */ + +#ifndef _ASM_S390_GMAP_HELPERS_H +#define _ASM_S390_GMAP_HELPERS_H + +void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr); +void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned long end); +int gmap_helper_disable_cow_sharing(void); + +#endif /* _ASM_S390_GMAP_HELPERS_H */ diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 58668ffb5488..a5b45388c91f 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -13,9 +13,9 @@ #include <asm/lowcore.h> -#define local_softirq_pending() (S390_lowcore.softirq_pending) -#define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x)) -#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) +#define local_softirq_pending() (get_lowcore()->softirq_pending) +#define set_softirq_pending(x) (get_lowcore()->softirq_pending = (x)) +#define or_softirq_pending(x) (get_lowcore()->softirq_pending |= (x)) #define __ARCH_IRQ_STAT #define __ARCH_IRQ_EXIT_IRQS_DISABLED diff --git a/arch/s390/include/asm/hiperdispatch.h b/arch/s390/include/asm/hiperdispatch.h new file mode 100644 index 000000000000..27e23aa27a24 --- /dev/null +++ b/arch/s390/include/asm/hiperdispatch.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2024 + */ + +#ifndef _ASM_HIPERDISPATCH_H +#define _ASM_HIPERDISPATCH_H + +void hd_reset_state(void); +void hd_add_core(int cpu); +void hd_disable_hiperdispatch(void); +int hd_enable_hiperdispatch(void); + +#endif /* _ASM_HIPERDISPATCH_H */ diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index ce5f4fe8be4d..69131736daaa 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -9,42 +9,41 @@ #ifndef _ASM_S390_HUGETLB_H #define _ASM_S390_HUGETLB_H +#include <linux/cpufeature.h> #include <linux/pgtable.h> +#include <linux/swap.h> +#include <linux/swapops.h> #include <asm/page.h> -#define hugetlb_free_pgd_range free_pgd_range -#define hugepages_supported() (MACHINE_HAS_EDAT1) +#define hugepages_supported() cpu_has_edat1() +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz); void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte); -pte_t huge_ptep_get(pte_t *ptep); -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); + pte_t *ptep, pte_t pte); -/* - * If the arch doesn't supply something else, assume that hugepage - * size aligned regions are ok without further preparation. - */ -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - struct hstate *h = hstate_file(file); +#define __HAVE_ARCH_HUGE_PTEP_GET +pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep); - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; +pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long sz) +{ + return __huge_ptep_get_and_clear(mm, addr, ptep); } static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_arch_1, &folio->flags); + clear_bit(PG_arch_1, &folio->flags.f); } #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags +#define __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) { @@ -54,94 +53,54 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY)); } +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - return huge_ptep_get_and_clear(vma->vm_mm, address, ptep); + return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep); } +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - int changed = !pte_same(huge_ptep_get(ptep), pte); + int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte); + if (changed) { - huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + __huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); } return changed; } +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); - __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); -} + pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep); -static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) -{ - return mk_pte(page, pgprot); -} - -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline int huge_pte_none_mostly(pte_t pte) -{ - return huge_pte_none(pte); -} - -static inline int huge_pte_write(pte_t pte) -{ - return pte_write(pte); -} - -static inline int huge_pte_dirty(pte_t pte) -{ - return pte_dirty(pte); -} - -static inline pte_t huge_pte_mkwrite(pte_t pte) -{ - return pte_mkwrite_novma(pte); -} - -static inline pte_t huge_pte_mkdirty(pte_t pte) -{ - return pte_mkdirty(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - -static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) -{ - return pte_modify(pte, newprot); + __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); } +#define __HAVE_ARCH_HUGE_PTE_MKUFFD_WP static inline pte_t huge_pte_mkuffd_wp(pte_t pte) { return pte; } +#define __HAVE_ARCH_HUGE_PTE_CLEAR_UFFD_WP static inline pte_t huge_pte_clear_uffd_wp(pte_t pte) { return pte; } +#define __HAVE_ARCH_HUGE_PTE_UFFD_WP static inline int huge_pte_uffd_wp(pte_t pte) { return 0; } -static inline bool gigantic_page_runtime_supported(void) -{ - return true; -} +#include <asm-generic/hugetlb.h> #endif /* _ASM_S390_HUGETLB_H */ diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index ac68c657b28c..e5000ee6cdc6 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h @@ -181,6 +181,82 @@ static inline void idal_buffer_free(struct idal_buffer *ib) } /* + * Allocate an array of IDAL buffers to cover a total data size of @size. The + * resulting array is null-terminated. + * + * The amount of individual IDAL buffers is determined based on @size. + * Each IDAL buffer can have a maximum size of @CCW_MAX_BYTE_COUNT. + */ +static inline struct idal_buffer **idal_buffer_array_alloc(size_t size, int page_order) +{ + struct idal_buffer **ibs; + size_t ib_size; /* Size of a single idal buffer */ + int count; /* Amount of individual idal buffers */ + int i; + + count = (size + CCW_MAX_BYTE_COUNT - 1) / CCW_MAX_BYTE_COUNT; + ibs = kmalloc_array(count + 1, sizeof(*ibs), GFP_KERNEL); + for (i = 0; i < count; i++) { + /* Determine size for the current idal buffer */ + ib_size = min(size, CCW_MAX_BYTE_COUNT); + size -= ib_size; + ibs[i] = idal_buffer_alloc(ib_size, page_order); + if (IS_ERR(ibs[i])) { + while (i--) + idal_buffer_free(ibs[i]); + kfree(ibs); + ibs = NULL; + return ERR_PTR(-ENOMEM); + } + } + ibs[i] = NULL; + return ibs; +} + +/* + * Free array of IDAL buffers + */ +static inline void idal_buffer_array_free(struct idal_buffer ***ibs) +{ + struct idal_buffer **p; + + if (!ibs || !*ibs) + return; + for (p = *ibs; *p; p++) + idal_buffer_free(*p); + kfree(*ibs); + *ibs = NULL; +} + +/* + * Determine size of IDAL buffer array + */ +static inline int idal_buffer_array_size(struct idal_buffer **ibs) +{ + int size = 0; + + while (ibs && *ibs) { + size++; + ibs++; + } + return size; +} + +/* + * Determine total data size covered by IDAL buffer array + */ +static inline size_t idal_buffer_array_datasize(struct idal_buffer **ibs) +{ + size_t size = 0; + + while (ibs && *ibs) { + size += (*ibs)->size; + ibs++; + } + return size; +} + +/* * Test if a idal list is really needed. */ static inline bool __idal_buffer_is_needed(struct idal_buffer *ib) diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 0fbc992d7a5e..faddb9aef3b8 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -16,8 +16,10 @@ #include <asm/pci_io.h> #define xlate_dev_mem_ptr xlate_dev_mem_ptr +#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr void *xlate_dev_mem_ptr(phys_addr_t phys); #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define IO_SPACE_LIMIT 0 @@ -31,9 +33,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) -#define ioremap_wt(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL))) + ioremap_prot((addr), (size), pgprot_writecombine(PAGE_KERNEL)) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 54b42817f70a..697497e7d13e 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -25,7 +25,7 @@ #define EXT_IRQ_CP_SERVICE 0x2603 #define EXT_IRQ_IUCV 0x4000 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/hardirq.h> #include <linux/percpu.h> @@ -47,13 +47,13 @@ enum interruption_class { IRQEXT_CMS, IRQEXT_CMC, IRQEXT_FTP, + IRQEXT_WTI, IRQIO_CIO, IRQIO_DAS, IRQIO_C15, IRQIO_C70, IRQIO_TAP, IRQIO_VMR, - IRQIO_LCS, IRQIO_CTC, IRQIO_ADM, IRQIO_CSC, @@ -99,6 +99,7 @@ int unregister_external_irq(u16 code, ext_int_handler_t handler); enum irq_subclass { IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, IRQ_SUBCLASS_SERVICE_SIGNAL = 9, + IRQ_SUBCLASS_WARNING_TRACK = 33, }; #define CR0_IRQ_SUBCLASS_MASK \ @@ -119,6 +120,6 @@ void irq_subclass_unregister(enum irq_subclass subclass); #define irq_canonicalize(irq) (irq) -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_IRQ_H */ diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 02427b205c11..bcab456dfb80 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -37,12 +37,18 @@ static __always_inline void __arch_local_irq_ssm(unsigned long flags) asm volatile("ssm %0" : : "Q" (flags) : "memory"); } -static __always_inline unsigned long arch_local_save_flags(void) +#ifdef CONFIG_KMSAN +#define arch_local_irq_attributes noinline notrace __no_sanitize_memory __maybe_unused +#else +#define arch_local_irq_attributes __always_inline +#endif + +static arch_local_irq_attributes unsigned long arch_local_save_flags(void) { return __arch_local_irq_stnsm(0xff); } -static __always_inline unsigned long arch_local_irq_save(void) +static arch_local_irq_attributes unsigned long arch_local_irq_save(void) { return __arch_local_irq_stnsm(0xfc); } @@ -52,7 +58,12 @@ static __always_inline void arch_local_irq_disable(void) arch_local_irq_save(); } -static __always_inline void arch_local_irq_enable(void) +static arch_local_irq_attributes void arch_local_irq_enable_external(void) +{ + __arch_local_irq_stosm(0x01); +} + +static arch_local_irq_attributes void arch_local_irq_enable(void) { __arch_local_irq_stosm(0x03); } diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index bf78cf381dfc..d9cbc18f6b2e 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -4,7 +4,7 @@ #define HAVE_JUMP_LABEL_BATCH -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/stringify.h> @@ -51,5 +51,5 @@ label: return true; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 1bd08eb56d5f..9084b750350d 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -94,6 +94,9 @@ void arch_kexec_protect_crashkres(void); void arch_kexec_unprotect_crashkres(void); #define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres + +bool is_kdump_kernel(void); +#define is_kdump_kernel is_kdump_kernel #endif #ifdef CONFIG_KEXEC_FILE diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h index e47fd8cbe701..e95e35eb8a3f 100644 --- a/arch/s390/include/asm/kfence.h +++ b/arch/s390/include/asm/kfence.h @@ -12,27 +12,16 @@ void __kernel_map_pages(struct page *page, int numpages, int enable); static __always_inline bool arch_kfence_init_pool(void) { - return true; -} - -#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK) - -/* - * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(), - * but earlier where page table allocations still happen with memblock. - * Reason is that arch_kfence_init_pool() gets called when the system - * is still in a limbo state - disabling and enabling bottom halves is - * not yet allowed, but that is what our page_table_alloc() would do. - */ -static __always_inline void kfence_split_mapping(void) -{ #ifdef CONFIG_KFENCE unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT; set_memory_4k((unsigned long)__kfence_pool, pool_pages); #endif + return true; } +#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK) + static inline bool kfence_protect_page(unsigned long addr, bool protect) { __kernel_map_pages(virt_to_page((void *)addr), 1, !protect); diff --git a/arch/s390/include/asm/kmsan.h b/arch/s390/include/asm/kmsan.h new file mode 100644 index 000000000000..f73e181d09ae --- /dev/null +++ b/arch/s390/include/asm/kmsan.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_KMSAN_H +#define _ASM_S390_KMSAN_H + +#include <asm/lowcore.h> +#include <asm/page.h> +#include <linux/kmsan.h> +#include <linux/mmzone.h> +#include <linux/stddef.h> + +#ifndef MODULE + +static inline bool is_lowcore_addr(void *addr) +{ + return addr >= (void *)get_lowcore() && + addr < (void *)(get_lowcore() + 1); +} + +static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin) +{ + if (is_lowcore_addr(addr)) { + /* + * Different lowcores accessed via S390_lowcore are described + * by the same struct page. Resolve the prefix manually in + * order to get a distinct struct page. + */ + addr += (void *)lowcore_ptr[raw_smp_processor_id()] - + (void *)get_lowcore(); + if (KMSAN_WARN_ON(is_lowcore_addr(addr))) + return NULL; + return kmsan_get_metadata(addr, is_origin); + } + return NULL; +} + +static inline bool kmsan_virt_addr_valid(void *addr) +{ + bool ret; + + /* + * pfn_valid() relies on RCU, and may call into the scheduler on exiting + * the critical section. However, this would result in recursion with + * KMSAN. Therefore, disable preemption here, and re-enable preemption + * below while suppressing reschedules to avoid recursion. + * + * Note, this sacrifices occasionally breaking scheduling guarantees. + * Although, a kernel compiled with KMSAN has already given up on any + * performance guarantees due to being heavily instrumented. + */ + preempt_disable(); + ret = virt_addr_valid(addr); + preempt_enable_no_resched(); + + return ret; +} + +#endif /* !MODULE */ + +#endif /* _ASM_S390_KMSAN_H */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 9281063636a7..ae1223264d3c 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -15,22 +15,22 @@ #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/kvm_types.h> -#include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/seqlock.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/mmu_notifier.h> +#include <asm/kvm_host_types.h> #include <asm/debug.h> #include <asm/cpu.h> #include <asm/fpu.h> #include <asm/isc.h> #include <asm/guarded_storage.h> -#define KVM_S390_BSCA_CPU_SLOTS 64 -#define KVM_S390_ESCA_CPU_SLOTS 248 #define KVM_MAX_VCPUS 255 +#define KVM_INTERNAL_MEM_SLOTS 1 + /* * These seem to be used for allocating ->chip in the routing table, which we * don't use. 1 is as small as we can get to reduce the needed memory. If we @@ -50,336 +50,6 @@ #define KVM_REQ_REFRESH_GUEST_PREFIX \ KVM_ARCH_REQ_FLAGS(6, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define SIGP_CTRL_C 0x80 -#define SIGP_CTRL_SCN_MASK 0x3f - -union bsca_sigp_ctrl { - __u8 value; - struct { - __u8 c : 1; - __u8 r : 1; - __u8 scn : 6; - }; -}; - -union esca_sigp_ctrl { - __u16 value; - struct { - __u8 c : 1; - __u8 reserved: 7; - __u8 scn; - }; -}; - -struct esca_entry { - union esca_sigp_ctrl sigp_ctrl; - __u16 reserved1[3]; - __u64 sda; - __u64 reserved2[6]; -}; - -struct bsca_entry { - __u8 reserved0; - union bsca_sigp_ctrl sigp_ctrl; - __u16 reserved[3]; - __u64 sda; - __u64 reserved2[2]; -}; - -union ipte_control { - unsigned long val; - struct { - unsigned long k : 1; - unsigned long kh : 31; - unsigned long kg : 32; - }; -}; - -union sca_utility { - __u16 val; - struct { - __u16 mtcr : 1; - __u16 reserved : 15; - }; -}; - -struct bsca_block { - union ipte_control ipte_control; - __u64 reserved[5]; - __u64 mcn; - union sca_utility utility; - __u8 reserved2[6]; - struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; -}; - -struct esca_block { - union ipte_control ipte_control; - __u64 reserved1[6]; - union sca_utility utility; - __u8 reserved2[6]; - __u64 mcn[4]; - __u64 reserved3[20]; - struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; -}; - -/* - * This struct is used to store some machine check info from lowcore - * for machine checks that happen while the guest is running. - * This info in host's lowcore might be overwritten by a second machine - * check from host when host is in the machine check's high-level handling. - * The size is 24 bytes. - */ -struct mcck_volatile_info { - __u64 mcic; - __u64 failing_storage_address; - __u32 ext_damage_code; - __u32 reserved; -}; - -#define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \ - CR0_MEASUREMENT_ALERT_SUBMASK) -#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \ - CR14_EXTERNAL_DAMAGE_SUBMASK) - -#define SIDAD_SIZE_MASK 0xff -#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK) -#define sida_size(sie_block) \ - ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) - -#define CPUSTAT_STOPPED 0x80000000 -#define CPUSTAT_WAIT 0x10000000 -#define CPUSTAT_ECALL_PEND 0x08000000 -#define CPUSTAT_STOP_INT 0x04000000 -#define CPUSTAT_IO_INT 0x02000000 -#define CPUSTAT_EXT_INT 0x01000000 -#define CPUSTAT_RUNNING 0x00800000 -#define CPUSTAT_RETAINED 0x00400000 -#define CPUSTAT_TIMING_SUB 0x00020000 -#define CPUSTAT_SIE_SUB 0x00010000 -#define CPUSTAT_RRF 0x00008000 -#define CPUSTAT_SLSV 0x00004000 -#define CPUSTAT_SLSR 0x00002000 -#define CPUSTAT_ZARCH 0x00000800 -#define CPUSTAT_MCDS 0x00000100 -#define CPUSTAT_KSS 0x00000200 -#define CPUSTAT_SM 0x00000080 -#define CPUSTAT_IBS 0x00000040 -#define CPUSTAT_GED2 0x00000010 -#define CPUSTAT_G 0x00000008 -#define CPUSTAT_GED 0x00000004 -#define CPUSTAT_J 0x00000002 -#define CPUSTAT_P 0x00000001 - -struct kvm_s390_sie_block { - atomic_t cpuflags; /* 0x0000 */ - __u32 : 1; /* 0x0004 */ - __u32 prefix : 18; - __u32 : 1; - __u32 ibc : 12; - __u8 reserved08[4]; /* 0x0008 */ -#define PROG_IN_SIE (1<<0) - __u32 prog0c; /* 0x000c */ - union { - __u8 reserved10[16]; /* 0x0010 */ - struct { - __u64 pv_handle_cpu; - __u64 pv_handle_config; - }; - }; -#define PROG_BLOCK_SIE (1<<0) -#define PROG_REQUEST (1<<1) - atomic_t prog20; /* 0x0020 */ - __u8 reserved24[4]; /* 0x0024 */ - __u64 cputm; /* 0x0028 */ - __u64 ckc; /* 0x0030 */ - __u64 epoch; /* 0x0038 */ - __u32 svcc; /* 0x0040 */ -#define LCTL_CR0 0x8000 -#define LCTL_CR6 0x0200 -#define LCTL_CR9 0x0040 -#define LCTL_CR10 0x0020 -#define LCTL_CR11 0x0010 -#define LCTL_CR14 0x0002 - __u16 lctl; /* 0x0044 */ - __s16 icpua; /* 0x0046 */ -#define ICTL_OPEREXC 0x80000000 -#define ICTL_PINT 0x20000000 -#define ICTL_LPSW 0x00400000 -#define ICTL_STCTL 0x00040000 -#define ICTL_ISKE 0x00004000 -#define ICTL_SSKE 0x00002000 -#define ICTL_RRBE 0x00001000 -#define ICTL_TPROT 0x00000200 - __u32 ictl; /* 0x0048 */ -#define ECA_CEI 0x80000000 -#define ECA_IB 0x40000000 -#define ECA_SIGPI 0x10000000 -#define ECA_MVPGI 0x01000000 -#define ECA_AIV 0x00200000 -#define ECA_VX 0x00020000 -#define ECA_PROTEXCI 0x00002000 -#define ECA_APIE 0x00000008 -#define ECA_SII 0x00000001 - __u32 eca; /* 0x004c */ -#define ICPT_INST 0x04 -#define ICPT_PROGI 0x08 -#define ICPT_INSTPROGI 0x0C -#define ICPT_EXTREQ 0x10 -#define ICPT_EXTINT 0x14 -#define ICPT_IOREQ 0x18 -#define ICPT_WAIT 0x1c -#define ICPT_VALIDITY 0x20 -#define ICPT_STOP 0x28 -#define ICPT_OPEREXC 0x2C -#define ICPT_PARTEXEC 0x38 -#define ICPT_IOINST 0x40 -#define ICPT_KSS 0x5c -#define ICPT_MCHKREQ 0x60 -#define ICPT_INT_ENABLE 0x64 -#define ICPT_PV_INSTR 0x68 -#define ICPT_PV_NOTIFY 0x6c -#define ICPT_PV_PREF 0x70 - __u8 icptcode; /* 0x0050 */ - __u8 icptstatus; /* 0x0051 */ - __u16 ihcpu; /* 0x0052 */ - __u8 reserved54; /* 0x0054 */ -#define IICTL_CODE_NONE 0x00 -#define IICTL_CODE_MCHK 0x01 -#define IICTL_CODE_EXT 0x02 -#define IICTL_CODE_IO 0x03 -#define IICTL_CODE_RESTART 0x04 -#define IICTL_CODE_SPECIFICATION 0x10 -#define IICTL_CODE_OPERAND 0x11 - __u8 iictl; /* 0x0055 */ - __u16 ipa; /* 0x0056 */ - __u32 ipb; /* 0x0058 */ - __u32 scaoh; /* 0x005c */ -#define FPF_BPBC 0x20 - __u8 fpf; /* 0x0060 */ -#define ECB_GS 0x40 -#define ECB_TE 0x10 -#define ECB_SPECI 0x08 -#define ECB_SRSI 0x04 -#define ECB_HOSTPROTINT 0x02 -#define ECB_PTF 0x01 - __u8 ecb; /* 0x0061 */ -#define ECB2_CMMA 0x80 -#define ECB2_IEP 0x20 -#define ECB2_PFMFI 0x08 -#define ECB2_ESCA 0x04 -#define ECB2_ZPCI_LSI 0x02 - __u8 ecb2; /* 0x0062 */ -#define ECB3_AISI 0x20 -#define ECB3_AISII 0x10 -#define ECB3_DEA 0x08 -#define ECB3_AES 0x04 -#define ECB3_RI 0x01 - __u8 ecb3; /* 0x0063 */ -#define ESCA_SCAOL_MASK ~0x3fU - __u32 scaol; /* 0x0064 */ - __u8 sdf; /* 0x0068 */ - __u8 epdx; /* 0x0069 */ - __u8 cpnc; /* 0x006a */ - __u8 reserved6b; /* 0x006b */ - __u32 todpr; /* 0x006c */ -#define GISA_FORMAT1 0x00000001 - __u32 gd; /* 0x0070 */ - __u8 reserved74[12]; /* 0x0074 */ - __u64 mso; /* 0x0080 */ - __u64 msl; /* 0x0088 */ - psw_t gpsw; /* 0x0090 */ - __u64 gg14; /* 0x00a0 */ - __u64 gg15; /* 0x00a8 */ - __u8 reservedb0[8]; /* 0x00b0 */ -#define HPID_KVM 0x4 -#define HPID_VSIE 0x5 - __u8 hpid; /* 0x00b8 */ - __u8 reservedb9[7]; /* 0x00b9 */ - union { - struct { - __u32 eiparams; /* 0x00c0 */ - __u16 extcpuaddr; /* 0x00c4 */ - __u16 eic; /* 0x00c6 */ - }; - __u64 mcic; /* 0x00c0 */ - } __packed; - __u32 reservedc8; /* 0x00c8 */ - union { - struct { - __u16 pgmilc; /* 0x00cc */ - __u16 iprcc; /* 0x00ce */ - }; - __u32 edc; /* 0x00cc */ - } __packed; - union { - struct { - __u32 dxc; /* 0x00d0 */ - __u16 mcn; /* 0x00d4 */ - __u8 perc; /* 0x00d6 */ - __u8 peratmid; /* 0x00d7 */ - }; - __u64 faddr; /* 0x00d0 */ - } __packed; - __u64 peraddr; /* 0x00d8 */ - __u8 eai; /* 0x00e0 */ - __u8 peraid; /* 0x00e1 */ - __u8 oai; /* 0x00e2 */ - __u8 armid; /* 0x00e3 */ - __u8 reservede4[4]; /* 0x00e4 */ - union { - __u64 tecmc; /* 0x00e8 */ - struct { - __u16 subchannel_id; /* 0x00e8 */ - __u16 subchannel_nr; /* 0x00ea */ - __u32 io_int_parm; /* 0x00ec */ - __u32 io_int_word; /* 0x00f0 */ - }; - } __packed; - __u8 reservedf4[8]; /* 0x00f4 */ -#define CRYCB_FORMAT_MASK 0x00000003 -#define CRYCB_FORMAT0 0x00000000 -#define CRYCB_FORMAT1 0x00000001 -#define CRYCB_FORMAT2 0x00000003 - __u32 crycbd; /* 0x00fc */ - __u64 gcr[16]; /* 0x0100 */ - union { - __u64 gbea; /* 0x0180 */ - __u64 sidad; - }; - __u8 reserved188[8]; /* 0x0188 */ - __u64 sdnxo; /* 0x0190 */ - __u8 reserved198[8]; /* 0x0198 */ - __u32 fac; /* 0x01a0 */ - __u8 reserved1a4[20]; /* 0x01a4 */ - __u64 cbrlo; /* 0x01b8 */ - __u8 reserved1c0[8]; /* 0x01c0 */ -#define ECD_HOSTREGMGMT 0x20000000 -#define ECD_MEF 0x08000000 -#define ECD_ETOKENF 0x02000000 -#define ECD_ECC 0x00200000 - __u32 ecd; /* 0x01c8 */ - __u8 reserved1cc[18]; /* 0x01cc */ - __u64 pp; /* 0x01de */ - __u8 reserved1e6[2]; /* 0x01e6 */ - __u64 itdba; /* 0x01e8 */ - __u64 riccbd; /* 0x01f0 */ - __u64 gvrd; /* 0x01f8 */ -} __packed __aligned(512); - -struct kvm_s390_itdb { - __u8 data[256]; -}; - -struct sie_page { - struct kvm_s390_sie_block sie_block; - struct mcck_volatile_info mcck_info; /* 0x0200 */ - __u8 reserved218[360]; /* 0x0218 */ - __u64 pv_grregs[16]; /* 0x0380 */ - __u8 reserved400[512]; /* 0x0400 */ - struct kvm_s390_itdb itdb; /* 0x0600 */ - __u8 reserved700[2304]; /* 0x0700 */ -}; - struct kvm_vcpu_stat { struct kvm_vcpu_stat_generic generic; u64 exit_userspace; @@ -476,6 +146,7 @@ struct kvm_vcpu_stat { u64 instruction_diagnose_500; u64 instruction_diagnose_other; u64 pfault_sync; + u64 signal_exits; }; #define PGM_OPERATION 0x01 @@ -528,6 +199,9 @@ struct kvm_vcpu_stat { #define PGM_REGION_FIRST_TRANS 0x39 #define PGM_REGION_SECOND_TRANS 0x3a #define PGM_REGION_THIRD_TRANS 0x3b +#define PGM_SECURE_STORAGE_ACCESS 0x3d +#define PGM_NON_SECURE_STORAGE_ACCESS 0x3e +#define PGM_SECURE_STORAGE_VIOLATION 0x3f #define PGM_MONITOR 0x40 #define PGM_PER 0x80 #define PGM_CRYPTO_OPERATION 0x119 @@ -683,7 +357,7 @@ struct kvm_s390_float_interrupt { int counters[FIRQ_MAX_COUNT]; struct kvm_s390_mchk_info mchk; struct kvm_s390_ext_info srv_signal; - int next_rr_cpu; + int last_sleep_cpu; struct mutex ais_lock; u8 simm; u8 nimm; @@ -748,8 +422,6 @@ struct kvm_vcpu_arch { struct hrtimer ckc_timer; struct kvm_s390_pgm_info pgm; struct gmap *gmap; - /* backup location for the currently enabled gmap when scheduled out */ - struct gmap *enabled_gmap; struct kvm_guestdbg_info_arch guestdbg; unsigned long pfault_token; unsigned long pfault_select; @@ -925,12 +597,14 @@ struct sie_page2 { u8 reserved928[0x1000 - 0x928]; /* 0x0928 */ }; +struct vsie_page; + struct kvm_s390_vsie { struct mutex mutex; struct radix_tree_root addr_to_page; int page_count; int next; - struct page *pages[KVM_MAX_VCPUS]; + struct vsie_page *pages[KVM_MAX_VCPUS]; }; struct kvm_s390_gisa_iam { @@ -958,10 +632,8 @@ struct kvm_s390_pv { struct mmu_notifier mmu_notifier; }; -struct kvm_arch{ - void *sca; - int use_esca; - rwlock_t sca_lock; +struct kvm_arch { + struct esca_block *sca; debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; struct kvm_device *flic; @@ -977,6 +649,7 @@ struct kvm_arch{ int user_sigp; int user_stsi; int user_instr0; + int user_operexec; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; int ipte_lock_count; @@ -1030,11 +703,12 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm); void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, unsigned long *aqm, unsigned long *adm); -int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa); +int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa, + unsigned long gasce); -static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa) +static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa, unsigned long gasce) { - return __sie64a(virt_to_phys(sie_block), sie_block, rsa); + return __sie64a(virt_to_phys(sie_block), sie_block, rsa, gasce); } extern char sie_exit; @@ -1042,11 +716,14 @@ extern char sie_exit; bool kvm_s390_pv_is_protected(struct kvm *kvm); bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu); +extern int kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb, + u64 *gprs, unsigned long gasce); + extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc); extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc); -static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +bool kvm_s390_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa); + static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} diff --git a/arch/s390/include/asm/kvm_host_types.h b/arch/s390/include/asm/kvm_host_types.h new file mode 100644 index 000000000000..1394d3fb648f --- /dev/null +++ b/arch/s390/include/asm/kvm_host_types.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_KVM_HOST_TYPES_H +#define _ASM_KVM_HOST_TYPES_H + +#include <linux/atomic.h> +#include <linux/types.h> + +#define KVM_S390_BSCA_CPU_SLOTS 64 +#define KVM_S390_ESCA_CPU_SLOTS 248 + +#define SIGP_CTRL_C 0x80 +#define SIGP_CTRL_SCN_MASK 0x3f + +union bsca_sigp_ctrl { + __u8 value; + struct { + __u8 c : 1; + __u8 r : 1; + __u8 scn : 6; + }; +}; + +union esca_sigp_ctrl { + __u16 value; + struct { + __u8 c : 1; + __u8 reserved: 7; + __u8 scn; + }; +}; + +struct esca_entry { + union esca_sigp_ctrl sigp_ctrl; + __u16 reserved1[3]; + __u64 sda; + __u64 reserved2[6]; +}; + +struct bsca_entry { + __u8 reserved0; + union bsca_sigp_ctrl sigp_ctrl; + __u16 reserved[3]; + __u64 sda; + __u64 reserved2[2]; +}; + +union ipte_control { + unsigned long val; + struct { + unsigned long k : 1; + unsigned long kh : 31; + unsigned long kg : 32; + }; +}; + +/* + * Utility is defined as two bytes but having it four bytes wide + * generates more efficient code. Since the following bytes are + * reserved this makes no functional difference. + */ +union sca_utility { + __u32 val; + struct { + __u32 mtcr : 1; + __u32 : 31; + }; +}; + +struct bsca_block { + union ipte_control ipte_control; + __u64 reserved[5]; + __u64 mcn; + union sca_utility utility; + __u8 reserved2[4]; + struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; +}; + +struct esca_block { + union ipte_control ipte_control; + __u64 reserved1[6]; + union sca_utility utility; + __u8 reserved2[4]; + __u64 mcn[4]; + __u64 reserved3[20]; + struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; +}; + +/* + * This struct is used to store some machine check info from lowcore + * for machine checks that happen while the guest is running. + * This info in host's lowcore might be overwritten by a second machine + * check from host when host is in the machine check's high-level handling. + * The size is 24 bytes. + */ +struct mcck_volatile_info { + __u64 mcic; + __u64 failing_storage_address; + __u32 ext_damage_code; + __u32 reserved; +}; + +#define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \ + CR0_MEASUREMENT_ALERT_SUBMASK) +#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \ + CR14_EXTERNAL_DAMAGE_SUBMASK) + +#define SIDAD_SIZE_MASK 0xff +#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK) +#define sida_size(sie_block) \ + ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) + +#define CPUSTAT_STOPPED 0x80000000 +#define CPUSTAT_WAIT 0x10000000 +#define CPUSTAT_ECALL_PEND 0x08000000 +#define CPUSTAT_STOP_INT 0x04000000 +#define CPUSTAT_IO_INT 0x02000000 +#define CPUSTAT_EXT_INT 0x01000000 +#define CPUSTAT_RUNNING 0x00800000 +#define CPUSTAT_RETAINED 0x00400000 +#define CPUSTAT_TIMING_SUB 0x00020000 +#define CPUSTAT_SIE_SUB 0x00010000 +#define CPUSTAT_RRF 0x00008000 +#define CPUSTAT_SLSV 0x00004000 +#define CPUSTAT_SLSR 0x00002000 +#define CPUSTAT_ZARCH 0x00000800 +#define CPUSTAT_MCDS 0x00000100 +#define CPUSTAT_KSS 0x00000200 +#define CPUSTAT_SM 0x00000080 +#define CPUSTAT_IBS 0x00000040 +#define CPUSTAT_GED2 0x00000010 +#define CPUSTAT_G 0x00000008 +#define CPUSTAT_GED 0x00000004 +#define CPUSTAT_J 0x00000002 +#define CPUSTAT_P 0x00000001 + +struct kvm_s390_sie_block { + atomic_t cpuflags; /* 0x0000 */ + __u32 : 1; /* 0x0004 */ + __u32 prefix : 18; + __u32 : 1; + __u32 ibc : 12; + __u8 reserved08[4]; /* 0x0008 */ +#define PROG_IN_SIE (1<<0) + __u32 prog0c; /* 0x000c */ + union { + __u8 reserved10[16]; /* 0x0010 */ + struct { + __u64 pv_handle_cpu; + __u64 pv_handle_config; + }; + }; +#define PROG_BLOCK_SIE (1<<0) +#define PROG_REQUEST (1<<1) + atomic_t prog20; /* 0x0020 */ + __u8 reserved24[4]; /* 0x0024 */ + __u64 cputm; /* 0x0028 */ + __u64 ckc; /* 0x0030 */ + __u64 epoch; /* 0x0038 */ + __u32 svcc; /* 0x0040 */ +#define LCTL_CR0 0x8000 +#define LCTL_CR6 0x0200 +#define LCTL_CR9 0x0040 +#define LCTL_CR10 0x0020 +#define LCTL_CR11 0x0010 +#define LCTL_CR14 0x0002 + __u16 lctl; /* 0x0044 */ + __s16 icpua; /* 0x0046 */ +#define ICTL_OPEREXC 0x80000000 +#define ICTL_PINT 0x20000000 +#define ICTL_LPSW 0x00400000 +#define ICTL_STCTL 0x00040000 +#define ICTL_ISKE 0x00004000 +#define ICTL_SSKE 0x00002000 +#define ICTL_RRBE 0x00001000 +#define ICTL_TPROT 0x00000200 + __u32 ictl; /* 0x0048 */ +#define ECA_CEI 0x80000000 +#define ECA_IB 0x40000000 +#define ECA_SIGPI 0x10000000 +#define ECA_MVPGI 0x01000000 +#define ECA_AIV 0x00200000 +#define ECA_VX 0x00020000 +#define ECA_PROTEXCI 0x00002000 +#define ECA_APIE 0x00000008 +#define ECA_SII 0x00000001 + __u32 eca; /* 0x004c */ +#define ICPT_INST 0x04 +#define ICPT_PROGI 0x08 +#define ICPT_INSTPROGI 0x0C +#define ICPT_EXTREQ 0x10 +#define ICPT_EXTINT 0x14 +#define ICPT_IOREQ 0x18 +#define ICPT_WAIT 0x1c +#define ICPT_VALIDITY 0x20 +#define ICPT_STOP 0x28 +#define ICPT_OPEREXC 0x2C +#define ICPT_PARTEXEC 0x38 +#define ICPT_IOINST 0x40 +#define ICPT_KSS 0x5c +#define ICPT_MCHKREQ 0x60 +#define ICPT_INT_ENABLE 0x64 +#define ICPT_PV_INSTR 0x68 +#define ICPT_PV_NOTIFY 0x6c +#define ICPT_PV_PREF 0x70 + __u8 icptcode; /* 0x0050 */ + __u8 icptstatus; /* 0x0051 */ + __u16 ihcpu; /* 0x0052 */ + __u8 reserved54; /* 0x0054 */ +#define IICTL_CODE_NONE 0x00 +#define IICTL_CODE_MCHK 0x01 +#define IICTL_CODE_EXT 0x02 +#define IICTL_CODE_IO 0x03 +#define IICTL_CODE_RESTART 0x04 +#define IICTL_CODE_SPECIFICATION 0x10 +#define IICTL_CODE_OPERAND 0x11 + __u8 iictl; /* 0x0055 */ + __u16 ipa; /* 0x0056 */ + __u32 ipb; /* 0x0058 */ + __u32 scaoh; /* 0x005c */ +#define FPF_BPBC 0x20 + __u8 fpf; /* 0x0060 */ +#define ECB_GS 0x40 +#define ECB_TE 0x10 +#define ECB_SPECI 0x08 +#define ECB_SRSI 0x04 +#define ECB_HOSTPROTINT 0x02 +#define ECB_PTF 0x01 + __u8 ecb; /* 0x0061 */ +#define ECB2_CMMA 0x80 +#define ECB2_IEP 0x20 +#define ECB2_PFMFI 0x08 +#define ECB2_ESCA 0x04 +#define ECB2_ZPCI_LSI 0x02 + __u8 ecb2; /* 0x0062 */ +#define ECB3_AISI 0x20 +#define ECB3_AISII 0x10 +#define ECB3_DEA 0x08 +#define ECB3_AES 0x04 +#define ECB3_RI 0x01 + __u8 ecb3; /* 0x0063 */ +#define ESCA_SCAOL_MASK ~0x3fU + __u32 scaol; /* 0x0064 */ + __u8 sdf; /* 0x0068 */ + __u8 epdx; /* 0x0069 */ + __u8 cpnc; /* 0x006a */ + __u8 reserved6b; /* 0x006b */ + __u32 todpr; /* 0x006c */ +#define GISA_FORMAT1 0x00000001 + __u32 gd; /* 0x0070 */ + __u8 reserved74[12]; /* 0x0074 */ + __u64 mso; /* 0x0080 */ + __u64 msl; /* 0x0088 */ + psw_t gpsw; /* 0x0090 */ + __u64 gg14; /* 0x00a0 */ + __u64 gg15; /* 0x00a8 */ + __u8 reservedb0[8]; /* 0x00b0 */ +#define HPID_KVM 0x4 +#define HPID_VSIE 0x5 + __u8 hpid; /* 0x00b8 */ + __u8 reservedb9[7]; /* 0x00b9 */ + union { + struct { + __u32 eiparams; /* 0x00c0 */ + __u16 extcpuaddr; /* 0x00c4 */ + __u16 eic; /* 0x00c6 */ + }; + __u64 mcic; /* 0x00c0 */ + } __packed; + __u32 reservedc8; /* 0x00c8 */ + union { + struct { + __u16 pgmilc; /* 0x00cc */ + __u16 iprcc; /* 0x00ce */ + }; + __u32 edc; /* 0x00cc */ + } __packed; + union { + struct { + __u32 dxc; /* 0x00d0 */ + __u16 mcn; /* 0x00d4 */ + __u8 perc; /* 0x00d6 */ + __u8 peratmid; /* 0x00d7 */ + }; + __u64 faddr; /* 0x00d0 */ + } __packed; + __u64 peraddr; /* 0x00d8 */ + __u8 eai; /* 0x00e0 */ + __u8 peraid; /* 0x00e1 */ + __u8 oai; /* 0x00e2 */ + __u8 armid; /* 0x00e3 */ + __u8 reservede4[4]; /* 0x00e4 */ + union { + __u64 tecmc; /* 0x00e8 */ + struct { + __u16 subchannel_id; /* 0x00e8 */ + __u16 subchannel_nr; /* 0x00ea */ + __u32 io_int_parm; /* 0x00ec */ + __u32 io_int_word; /* 0x00f0 */ + }; + } __packed; + __u8 reservedf4[8]; /* 0x00f4 */ +#define CRYCB_FORMAT_MASK 0x00000003 +#define CRYCB_FORMAT0 0x00000000 +#define CRYCB_FORMAT1 0x00000001 +#define CRYCB_FORMAT2 0x00000003 + __u32 crycbd; /* 0x00fc */ + __u64 gcr[16]; /* 0x0100 */ + union { + __u64 gbea; /* 0x0180 */ + __u64 sidad; + }; + __u8 reserved188[8]; /* 0x0188 */ + __u64 sdnxo; /* 0x0190 */ + __u8 reserved198[8]; /* 0x0198 */ + __u32 fac; /* 0x01a0 */ + __u8 reserved1a4[20]; /* 0x01a4 */ + __u64 cbrlo; /* 0x01b8 */ + __u8 reserved1c0[8]; /* 0x01c0 */ +#define ECD_HOSTREGMGMT 0x20000000 +#define ECD_MEF 0x08000000 +#define ECD_ETOKENF 0x02000000 +#define ECD_ECC 0x00200000 +#define ECD_HMAC 0x00004000 + __u32 ecd; /* 0x01c8 */ + __u8 reserved1cc[18]; /* 0x01cc */ + __u64 pp; /* 0x01de */ + __u8 reserved1e6[2]; /* 0x01e6 */ + __u64 itdba; /* 0x01e8 */ + __u64 riccbd; /* 0x01f0 */ + __u64 gvrd; /* 0x01f8 */ +} __packed __aligned(512); + +struct kvm_s390_itdb { + __u8 data[256]; +}; + +struct sie_page { + struct kvm_s390_sie_block sie_block; + struct mcck_volatile_info mcck_info; /* 0x0200 */ + __u8 reserved218[360]; /* 0x0218 */ + __u64 pv_grregs[16]; /* 0x0380 */ + __u8 reserved400[512]; /* 0x0400 */ + struct kvm_s390_itdb itdb; /* 0x0600 */ + __u8 reserved700[2304]; /* 0x0700 */ +}; + +#endif /* _ASM_KVM_HOST_TYPES_H */ diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h index df73a052760c..00cc8c916cfb 100644 --- a/arch/s390/include/asm/kvm_para.h +++ b/arch/s390/include/asm/kvm_para.h @@ -76,7 +76,7 @@ long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args) \ HYPERCALL_REGS_##args; \ \ asm volatile ( \ - " diag 2,4,0x500\n" \ + " diag 2,4,0x500" \ : "=d" (__rc) \ : "d" (__nr) HYPERCALL_FMT_##args \ : "memory", "cc"); \ diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 8c5f16857539..50ffe75adeb4 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -10,14 +10,20 @@ #define _ASM_S390_LOWCORE_H #include <linux/types.h> +#include <asm/machine.h> #include <asm/ptrace.h> #include <asm/ctlreg.h> #include <asm/cpu.h> #include <asm/types.h> +#include <asm/alternative.h> #define LC_ORDER 1 #define LC_PAGES 2 +#define LOWCORE_ALT_ADDRESS _AC(0x70000, UL) + +#ifndef __ASSEMBLER__ + struct pgm_tdb { u64 data[32]; }; @@ -93,12 +99,12 @@ struct lowcore { psw_t io_new_psw; /* 0x01f0 */ /* Save areas. */ - __u64 save_area_sync[8]; /* 0x0200 */ - __u64 save_area_async[8]; /* 0x0240 */ + __u64 save_area[8]; /* 0x0200 */ + __u64 stack_canary; /* 0x0240 */ + __u8 pad_0x0248[0x0280-0x0248]; /* 0x0248 */ __u64 save_area_restart[1]; /* 0x0280 */ - /* CPU flags. */ - __u64 cpu_flags; /* 0x0288 */ + __u64 pcpu; /* 0x0288 */ /* Return psws. */ psw_t return_psw; /* 0x0290 */ @@ -122,7 +128,7 @@ struct lowcore { __u64 int_clock; /* 0x0318 */ __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */ __u64 clock_comparator; /* 0x0328 */ - __u64 boot_clock[2]; /* 0x0330 */ + __u8 pad_0x0330[0x0340-0x0330]; /* 0x0330 */ /* Current process. */ __u64 current_task; /* 0x0340 */ @@ -159,10 +165,7 @@ struct lowcore { __u32 spinlock_index; /* 0x03b0 */ __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */ __u64 percpu_offset; /* 0x03b8 */ - __u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */ - __u64 machine_flags; /* 0x03c8 */ - __u64 gmap; /* 0x03d0 */ - __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ + __u8 pad_0x03c0[0x0400-0x03c0]; /* 0x03c0 */ __u32 return_lpswe; /* 0x0400 */ __u32 return_mcck_lpswe; /* 0x0404 */ @@ -213,7 +216,20 @@ struct lowcore { __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */ } __packed __aligned(8192); -#define S390_lowcore (*((struct lowcore *) 0)) +static __always_inline struct lowcore *get_lowcore(void) +{ + struct lowcore *lc; + + if (__is_defined(__DECOMPRESSOR)) + return NULL; + asm_inline( + ALTERNATIVE(" lghi %[lc],0", + " llilh %[lc],%[alt]", + ALT_FEATURE(MFEATURE_LOWCORE)) + : [lc] "=d" (lc) + : [alt] "i" (LOWCORE_ALT_ADDRESS >> 16)); + return lc; +} extern struct lowcore *lowcore_ptr[]; @@ -222,4 +238,19 @@ static inline void set_prefix(__u32 address) asm volatile("spx %0" : : "Q" (address) : "memory"); } +#else /* __ASSEMBLER__ */ + +.macro GET_LC reg + ALTERNATIVE "lghi \reg,0", \ + __stringify(llilh \reg, LOWCORE_ALT_ADDRESS >> 16), \ + ALT_FEATURE(MFEATURE_LOWCORE) +.endm + +.macro STMG_LC start, end, savearea + ALTERNATIVE "stmg \start, \end, \savearea", \ + __stringify(stmg \start, \end, LOWCORE_ALT_ADDRESS + \savearea), \ + ALT_FEATURE(MFEATURE_LOWCORE) +.endm + +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_LOWCORE_H */ diff --git a/arch/s390/include/asm/machine.h b/arch/s390/include/asm/machine.h new file mode 100644 index 000000000000..9bd4a9dc7778 --- /dev/null +++ b/arch/s390/include/asm/machine.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2024 + */ + +#ifndef __ASM_S390_MACHINE_H +#define __ASM_S390_MACHINE_H + +#include <linux/const.h> + +#define MFEATURE_LOWCORE 0 +#define MFEATURE_PCI_MIO 1 +#define MFEATURE_SCC 2 +#define MFEATURE_TLB_GUEST 3 +#define MFEATURE_TX 4 +#define MFEATURE_ESOP 5 +#define MFEATURE_DIAG9C 6 +#define MFEATURE_VM 7 +#define MFEATURE_KVM 8 +#define MFEATURE_LPAR 9 +#define MFEATURE_DIAG288 10 + +#ifndef __ASSEMBLER__ + +#include <linux/bitops.h> +#include <asm/alternative.h> + +extern unsigned long machine_features[1]; + +#define MAX_MFEATURE_BIT (sizeof(machine_features) * BITS_PER_BYTE) + +static inline void __set_machine_feature(unsigned int nr, unsigned long *mfeatures) +{ + if (nr >= MAX_MFEATURE_BIT) + return; + __set_bit(nr, mfeatures); +} + +static inline void set_machine_feature(unsigned int nr) +{ + __set_machine_feature(nr, machine_features); +} + +static inline void __clear_machine_feature(unsigned int nr, unsigned long *mfeatures) +{ + if (nr >= MAX_MFEATURE_BIT) + return; + __clear_bit(nr, mfeatures); +} + +static inline void clear_machine_feature(unsigned int nr) +{ + __clear_machine_feature(nr, machine_features); +} + +static bool __test_machine_feature(unsigned int nr, unsigned long *mfeatures) +{ + if (nr >= MAX_MFEATURE_BIT) + return false; + return test_bit(nr, mfeatures); +} + +static bool test_machine_feature(unsigned int nr) +{ + return __test_machine_feature(nr, machine_features); +} + +static __always_inline bool __test_machine_feature_constant(unsigned int nr) +{ + asm goto( + ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FEATURE(%[nr])) + : + : [nr] "i" (nr) + : + : l_no); + return true; +l_no: + return false; +} + +#define DEFINE_MACHINE_HAS_FEATURE(name, feature) \ +static __always_inline bool machine_has_##name(void) \ +{ \ + if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(feature)) \ + return __test_machine_feature_constant(feature); \ + return test_machine_feature(feature); \ +} + +DEFINE_MACHINE_HAS_FEATURE(relocated_lowcore, MFEATURE_LOWCORE) +DEFINE_MACHINE_HAS_FEATURE(scc, MFEATURE_SCC) +DEFINE_MACHINE_HAS_FEATURE(tlb_guest, MFEATURE_TLB_GUEST) +DEFINE_MACHINE_HAS_FEATURE(tx, MFEATURE_TX) +DEFINE_MACHINE_HAS_FEATURE(esop, MFEATURE_ESOP) +DEFINE_MACHINE_HAS_FEATURE(diag9c, MFEATURE_DIAG9C) +DEFINE_MACHINE_HAS_FEATURE(vm, MFEATURE_VM) +DEFINE_MACHINE_HAS_FEATURE(kvm, MFEATURE_KVM) +DEFINE_MACHINE_HAS_FEATURE(lpar, MFEATURE_LPAR) + +#define machine_is_vm machine_has_vm +#define machine_is_kvm machine_has_kvm +#define machine_is_lpar machine_has_lpar + +#endif /* __ASSEMBLER__ */ +#endif /* __ASM_S390_MACHINE_H */ diff --git a/arch/s390/include/asm/march.h b/arch/s390/include/asm/march.h new file mode 100644 index 000000000000..11a71bd14954 --- /dev/null +++ b/arch/s390/include/asm/march.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_S390_MARCH_H +#define __ASM_S390_MARCH_H + +#include <linux/kconfig.h> + +#define MARCH_HAS_Z10_FEATURES 1 + +#ifndef __DECOMPRESSOR + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#define MARCH_HAS_Z196_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES +#define MARCH_HAS_ZEC12_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES +#define MARCH_HAS_Z13_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES +#define MARCH_HAS_Z14_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z15_FEATURES +#define MARCH_HAS_Z15_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z16_FEATURES +#define MARCH_HAS_Z16_FEATURES 1 +#endif + +#ifdef CONFIG_HAVE_MARCH_Z17_FEATURES +#define MARCH_HAS_Z17_FEATURES 1 +#endif + +#endif /* __DECOMPRESSOR */ + +#endif /* __ASM_S390_MARCH_H */ diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index b85e13505a0f..28c83ec1f243 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -2,11 +2,11 @@ #ifndef S390_MEM_ENCRYPT_H__ #define S390_MEM_ENCRYPT_H__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ int set_memory_encrypted(unsigned long vaddr, int numpages); int set_memory_decrypted(unsigned long vaddr, int numpages); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* S390_MEM_ENCRYPT_H__ */ diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 4c2dc7abc285..f07e49b419ab 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -22,10 +22,7 @@ typedef struct { * The following bitfields need a down_write on the mm * semaphore when they are written to. As they are only * written once, they can be read without a lock. - * - * The mmu context allocates 4K page tables. */ - unsigned int alloc_pgste:1; /* The mmu context uses extended page tables. */ unsigned int has_pgste:1; /* The mmu context uses storage keys. */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index a7789a9f6218..d9b8501bc93d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -13,6 +13,7 @@ #include <linux/mm_types.h> #include <asm/tlbflush.h> #include <asm/ctlreg.h> +#include <asm/asce.h> #include <asm-generic/mm_hooks.h> #define init_new_context init_new_context @@ -29,9 +30,6 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.gmap_asce = 0; mm->context.flush_mm = 0; #ifdef CONFIG_PGSTE - mm->context.alloc_pgste = page_table_allocate_pgste || - test_thread_flag(TIF_PGSTE) || - (current->mm && current->mm->context.alloc_pgste); mm->context.has_pgste = 0; mm->context.uses_skeys = 0; mm->context.uses_cmm = 0; @@ -76,11 +74,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct * int cpu = smp_processor_id(); if (next == &init_mm) - S390_lowcore.user_asce = s390_invalid_asce; + get_lowcore()->user_asce = s390_invalid_asce; else - S390_lowcore.user_asce.val = next->context.asce; + get_lowcore()->user_asce.val = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); - /* Clear previous user-ASCE from CR7 */ + /* Clear previous user-ASCE from CR1 and CR7 */ + local_ctl_load(1, &s390_invalid_asce); local_ctl_load(7, &s390_invalid_asce); if (prev != next) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); @@ -102,6 +101,7 @@ static inline void finish_arch_post_lock_switch(void) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; + unsigned long flags; if (mm) { preempt_disable(); @@ -111,16 +111,26 @@ static inline void finish_arch_post_lock_switch(void) __tlb_flush_mm_lazy(mm); preempt_enable(); } - local_ctl_load(7, &S390_lowcore.user_asce); + local_irq_save(flags); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); + local_ctl_load(7, &get_lowcore()->user_asce); + local_irq_restore(flags); } #define activate_mm activate_mm static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - switch_mm(prev, next, current); + switch_mm_irqs_off(prev, next, current); cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); - local_ctl_load(7, &S390_lowcore.user_asce); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); + local_ctl_load(7, &get_lowcore()->user_asce); } #include <asm-generic/mmu_context.h> diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h deleted file mode 100644 index 73e3e7c6976c..000000000000 --- a/arch/s390/include/asm/mmzone.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * NUMA support for s390 - * - * Copyright IBM Corp. 2015 - */ - -#ifndef _ASM_S390_MMZONE_H -#define _ASM_S390_MMZONE_H - -#ifdef CONFIG_NUMA - -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[nid]) - -#endif /* CONFIG_NUMA */ -#endif /* _ASM_S390_MMZONE_H */ diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h index 9f1eea15872c..916ab59e458a 100644 --- a/arch/s390/include/asm/module.h +++ b/arch/s390/include/asm/module.h @@ -38,4 +38,18 @@ struct mod_arch_specific { #endif /* CONFIG_FUNCTION_TRACER */ }; +static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +{ + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + const Elf_Shdr *s, *se; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + } + return NULL; +} + #endif /* _ASM_S390_MODULE_H */ diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 227466ce9e41..6454c1531854 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h @@ -33,7 +33,7 @@ #define MCCK_CODE_FC_VALID BIT(63 - 43) #define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ union mci { unsigned long val; @@ -104,5 +104,5 @@ void nmi_free_mcesa(u64 *mcesad); void s390_handle_mcck(void); void s390_do_machine_check(struct pt_regs *regs); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_NMI_H */ diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h index b9c1f3cae842..81c4813cff18 100644 --- a/arch/s390/include/asm/nospec-branch.h +++ b/arch/s390/include/asm/nospec-branch.h @@ -2,11 +2,20 @@ #ifndef _ASM_S390_EXPOLINE_H #define _ASM_S390_EXPOLINE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> +#include <asm/facility.h> extern int nospec_disable; +extern int nobp; + +static inline bool nobp_enabled(void) +{ + if (__is_defined(__DECOMPRESSOR)) + return false; + return nobp && test_facility(82); +} void nospec_init_branches(void); void nospec_auto_detect(void); @@ -17,8 +26,6 @@ static inline bool nospec_uses_trampoline(void) return __is_defined(CC_USING_EXPOLINE) && !nospec_disable; } -#ifdef CONFIG_EXPOLINE_EXTERN - void __s390_indirect_jump_r1(void); void __s390_indirect_jump_r2(void); void __s390_indirect_jump_r3(void); @@ -35,8 +42,6 @@ void __s390_indirect_jump_r13(void); void __s390_indirect_jump_r14(void); void __s390_indirect_jump_r15(void); -#endif - -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_EXPOLINE_H */ diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h index cb15dd25bf21..46f92bb4c9e5 100644 --- a/arch/s390/include/asm/nospec-insn.h +++ b/arch/s390/include/asm/nospec-insn.h @@ -3,9 +3,10 @@ #define _ASM_S390_NOSPEC_ASM_H #include <linux/linkage.h> +#include <linux/export.h> #include <asm/dwarf.h> -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #ifdef CC_USING_EXPOLINE @@ -18,7 +19,7 @@ #ifdef CONFIG_EXPOLINE_EXTERN SYM_CODE_START(\name) #else - .pushsection .text.\name,"axG",@progbits,\name,comdat + .pushsection .text..\name,"axG",@progbits,\name,comdat .globl \name .hidden \name .type \name,@function @@ -128,6 +129,6 @@ .endm #endif /* CC_USING_EXPOLINE */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_NOSPEC_ASM_H */ diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h index 08fcbd628120..794fdb21500a 100644 --- a/arch/s390/include/asm/page-states.h +++ b/arch/s390/include/asm/page-states.h @@ -7,7 +7,6 @@ #ifndef PAGE_STATES_H #define PAGE_STATES_H -#include <asm/sections.h> #include <asm/page.h> #define ESSA_GET_STATE 0 @@ -21,7 +20,7 @@ #define ESSA_MAX ESSA_SET_STABLE_NODAT -extern int __bootdata_preserved(cmma_flag); +extern int cmma_flag; static __always_inline unsigned long essa(unsigned long paddr, unsigned char cmd) { diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 224ff9d433ea..9240a363c893 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -10,15 +10,10 @@ #include <linux/const.h> #include <asm/types.h> +#include <asm/asm.h> -#define _PAGE_SHIFT CONFIG_PAGE_SHIFT -#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT) -#define _PAGE_MASK (~(_PAGE_SIZE - 1)) +#include <vdso/page.h> -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT _PAGE_SHIFT -#define PAGE_SIZE _PAGE_SIZE -#define PAGE_MASK _PAGE_MASK #define PAGE_DEFAULT_ACC _AC(0, UL) /* storage-protection override */ #define PAGE_SPO_ACC 9 @@ -38,7 +33,7 @@ #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #include <asm/setup.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ void __storage_key_init_range(unsigned long start, unsigned long end); @@ -74,11 +69,13 @@ static inline void copy_page(void *to, void *from) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define vma_alloc_zeroed_movable_folio(vma, vaddr) \ - vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false) + vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr) -/* - * These are used to make use of C type-checking.. - */ +#ifdef CONFIG_STRICT_MM_TYPECHECKS +#define STRICT_MM_TYPECHECKS +#endif + +#ifdef STRICT_MM_TYPECHECKS typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgste; } pgste_t; @@ -87,52 +84,65 @@ typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pud; } pud_t; typedef struct { unsigned long p4d; } p4d_t; typedef struct { unsigned long pgd; } pgd_t; -typedef pte_t *pgtable_t; -#define pgprot_val(x) ((x).pgprot) -#define pgste_val(x) ((x).pgste) - -static inline unsigned long pte_val(pte_t pte) -{ - return pte.pte; +#define DEFINE_PGVAL_FUNC(name) \ +static __always_inline unsigned long name ## _val(name ## _t name) \ +{ \ + return name.name; \ } -static inline unsigned long pmd_val(pmd_t pmd) -{ - return pmd.pmd; -} +#else /* STRICT_MM_TYPECHECKS */ -static inline unsigned long pud_val(pud_t pud) -{ - return pud.pud; -} +typedef unsigned long pgprot_t; +typedef unsigned long pgste_t; +typedef unsigned long pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pud_t; +typedef unsigned long p4d_t; +typedef unsigned long pgd_t; -static inline unsigned long p4d_val(p4d_t p4d) -{ - return p4d.p4d; +#define DEFINE_PGVAL_FUNC(name) \ +static __always_inline unsigned long name ## _val(name ## _t name) \ +{ \ + return name; \ } -static inline unsigned long pgd_val(pgd_t pgd) -{ - return pgd.pgd; -} +#endif /* STRICT_MM_TYPECHECKS */ + +DEFINE_PGVAL_FUNC(pgprot) +DEFINE_PGVAL_FUNC(pgste) +DEFINE_PGVAL_FUNC(pte) +DEFINE_PGVAL_FUNC(pmd) +DEFINE_PGVAL_FUNC(pud) +DEFINE_PGVAL_FUNC(p4d) +DEFINE_PGVAL_FUNC(pgd) +typedef pte_t *pgtable_t; + +#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgste(x) ((pgste_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } ) #define __pud(x) ((pud_t) { (x) } ) #define __p4d(x) ((p4d_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) static inline void page_set_storage_key(unsigned long addr, unsigned char skey, int mapped) { - if (!mapped) - asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" - : : "d" (skey), "a" (addr)); - else - asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); + if (!mapped) { + asm volatile( + " .insn rrf,0xb22b0000,%[skey],%[addr],8,0" + : + : [skey] "d" (skey), [addr] "a" (addr) + : "memory"); + } else { + asm volatile( + " sske %[skey],%[addr]" + : + : [skey] "d" (skey), [addr] "a" (addr) + : "memory"); + } } static inline unsigned char page_get_storage_key(unsigned long addr) @@ -148,11 +158,12 @@ static inline int page_reset_referenced(unsigned long addr) int cc; asm volatile( - " rrbe 0,%1\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) : "a" (addr) : "cc"); - return cc; + " rrbe 0,%[addr]\n" + CC_IPM(cc) + : CC_OUT(cc, cc) + : [addr] "a" (addr) + : CC_CLOBBER); + return CC_TRANSFORM(cc); } /* Bits int the storage key */ @@ -162,6 +173,7 @@ static inline int page_reset_referenced(unsigned long addr) #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ struct page; +struct folio; void arch_free_page(struct page *page, int order); void arch_alloc_page(struct page *page, int order); @@ -173,10 +185,8 @@ static inline int devmem_is_allowed(unsigned long pfn) #define HAVE_ARCH_FREE_PAGE #define HAVE_ARCH_ALLOC_PAGE -#if IS_ENABLED(CONFIG_PGSTE) -int arch_make_page_accessible(struct page *page); -#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE -#endif +int arch_make_folio_accessible(struct folio *folio); +#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE struct vm_layout { unsigned long kaslr_offset; @@ -189,7 +199,11 @@ extern struct vm_layout vm_layout; #define __kaslr_offset vm_layout.kaslr_offset #define __kaslr_offset_phys vm_layout.kaslr_offset_phys +#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE #define __identity_base vm_layout.identity_base +#else +#define __identity_base 0UL +#endif #define ident_map_size vm_layout.identity_size static inline unsigned long kaslr_offset(void) @@ -246,8 +260,8 @@ static inline unsigned long __phys_addr(unsigned long x, bool is_31bit) #define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) -#define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) +#define phys_to_folio(phys) page_folio(phys_to_page(phys)) +#define folio_to_phys(page) pfn_to_phys(folio_pfn(folio)) static inline void *pfn_to_virt(unsigned long pfn) { @@ -268,7 +282,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr) #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> @@ -276,8 +290,9 @@ static inline unsigned long virt_to_pfn(const void *kaddr) #define AMODE31_SIZE (3 * PAGE_SIZE) #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) -#define __START_KERNEL 0x100000 #define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE #define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE) +#define TEXT_OFFSET 0x100000 + #endif /* _S390_PAGE_H */ diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h index 3f609565734b..534d0320e2aa 100644 --- a/arch/s390/include/asm/pai.h +++ b/arch/s390/include/asm/pai.h @@ -11,6 +11,7 @@ #include <linux/jump_label.h> #include <asm/lowcore.h> #include <asm/ptrace.h> +#include <asm/asm.h> struct qpaci_info_block { u64 header; @@ -33,12 +34,11 @@ static inline int qpaci(struct qpaci_info_block *info) " lgr 0,%[size]\n" " .insn s,0xb28f0000,%[info]\n" " lgr %[size],0\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size) + CC_IPM(cc) + : CC_OUT(cc, cc), [info] "=Q" (*info), [size] "+&d" (size) : - : "0", "cc", "memory"); - return cc ? (size + 1) * sizeof(u64) : 0; + : CC_CLOBBER_LIST("0", "memory")); + return CC_TRANSFORM(cc) ? (size + 1) * sizeof(u64) : 0; } #define PAI_CRYPTO_BASE 0x1000 /* First event number */ @@ -55,11 +55,11 @@ static __always_inline void pai_kernel_enter(struct pt_regs *regs) return; if (!static_branch_unlikely(&pai_key)) return; - if (!S390_lowcore.ccd) + if (!get_lowcore()->ccd) return; if (!user_mode(regs)) return; - WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET); + WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd | PAI_CRYPTO_KERNEL_OFFSET); } static __always_inline void pai_kernel_exit(struct pt_regs *regs) @@ -68,18 +68,16 @@ static __always_inline void pai_kernel_exit(struct pt_regs *regs) return; if (!static_branch_unlikely(&pai_key)) return; - if (!S390_lowcore.ccd) + if (!get_lowcore()->ccd) return; if (!user_mode(regs)) return; - WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET); + WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd & ~PAI_CRYPTO_KERNEL_OFFSET); } -enum paievt_mode { - PAI_MODE_NONE, - PAI_MODE_SAMPLING, - PAI_MODE_COUNTING, -}; - #define PAI_SAVE_AREA(x) ((x)->hw.event_base) +#define PAI_CPU_MASK(x) ((x)->hw.addr_filters) +#define PAI_PMU_IDX(x) ((x)->hw.last_tag) +#define PAI_SWLIST(x) (&(x)->hw.tp_list) + #endif diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 30820a649e6e..a32f465ecf73 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -11,6 +11,9 @@ #include <asm/pci_insn.h> #include <asm/sclp.h> +#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 +#define arch_can_pci_mmap_wc() 1 + #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 @@ -96,7 +99,6 @@ struct zpci_bar_struct { u8 size; /* order 2 exponent */ }; -struct s390_domain; struct kvm_zdev; #define ZPCI_FUNCTIONS_PER_BUS 256 @@ -107,9 +109,10 @@ struct zpci_bus { struct list_head resources; struct list_head bus_next; struct resource bus_resource; - int pchid; + int topo; /* TID if topo_is_tid, PCHID otherwise */ int domain_nr; - bool multifunction; + u8 multifunction : 1; + u8 topo_is_tid : 1; enum pci_bus_speed max_bus_speed; }; @@ -130,17 +133,20 @@ struct zpci_dev { u16 vfn; /* virtual function number */ u16 pchid; /* physical channel ID */ u16 maxstbl; /* Maximum store block size */ + u16 rid; /* RID as supplied by firmware */ + u16 tid; /* Topology for which RID is valid */ u8 pfgid; /* function group ID */ u8 pft; /* pci function type */ u8 port; + u8 fidparm; u8 dtsm; /* Supported DT mask */ u8 rid_available : 1; u8 has_hp_slot : 1; u8 has_resources : 1; u8 is_physfn : 1; u8 util_str_avail : 1; - u8 irqs_registered : 1; - u8 reserved : 2; + u8 tid_avail : 1; + u8 rtr_avail : 1; /* Relaxed translation allowed */ unsigned int devfn; /* DEVFN part of the RID*/ u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ @@ -181,9 +187,10 @@ struct zpci_dev { struct dentry *debugfs_dev; /* IOMMU and passthrough */ - struct s390_domain *s390_domain; /* s390 IOMMU domain data */ + struct iommu_domain *s390_domain; /* attached IOMMU domain */ struct kvm_zdev *kzdev; struct mutex kzdev_lock; + spinlock_t dom_lock; /* protect s390_domain change */ }; static inline bool zdev_enabled(struct zpci_dev *zdev) @@ -191,7 +198,14 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) return (zdev->fh & (1UL << 31)) ? true : false; } -extern const struct attribute_group *zpci_attr_groups[]; +extern const struct attribute_group zpci_attr_group; +extern const struct attribute_group pfip_attr_group; +extern const struct attribute_group zpci_ident_attr_group; + +#define ARCH_PCI_DEV_GROUPS &zpci_attr_group, \ + &pfip_attr_group, \ + &zpci_ident_attr_group, + extern unsigned int s390_pci_force_floating __initdata; extern unsigned int s390_pci_no_rid; @@ -203,12 +217,15 @@ extern struct airq_iv *zpci_aif_sbv; ----------------------------------------------------------------------------- */ /* Base stuff */ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state); +int zpci_add_device(struct zpci_dev *zdev); int zpci_enable_device(struct zpci_dev *); +int zpci_reenable_device(struct zpci_dev *zdev); int zpci_disable_device(struct zpci_dev *); int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh); int zpci_deconfigure_device(struct zpci_dev *zdev); void zpci_device_reserved(struct zpci_dev *zdev); bool zpci_is_device_configured(struct zpci_dev *zdev); +int zpci_scan_devices(void); int zpci_hot_reset_device(struct zpci_dev *zdev); int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *); @@ -218,7 +235,7 @@ void zpci_update_fh(struct zpci_dev *zdev, u32 fh); /* CLP */ int clp_setup_writeback_mio(void); -int clp_scan_pci_devices(void); +int clp_scan_pci_devices(struct list_head *scan_list); int clp_query_pci_fn(struct zpci_dev *zdev); int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as); int clp_disable_fh(struct zpci_dev *zdev, u32 *fh); @@ -228,9 +245,20 @@ int clp_refresh_fh(u32 fid, u32 *fh); /* UID */ void update_uid_checking(bool new); +/* Firmware Sysfs */ +int __init __zpci_fw_sysfs_init(void); + +static inline int __init zpci_fw_sysfs_init(void) +{ + if (IS_ENABLED(CONFIG_SYSFS)) + return __zpci_fw_sysfs_init(); + return 0; +} + /* IOMMU Interface */ int zpci_init_iommu(struct zpci_dev *zdev); void zpci_destroy_iommu(struct zpci_dev *zdev); +int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status); #ifdef CONFIG_PCI static inline bool zpci_use_mio(struct zpci_dev *zdev) diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h index f0c677ddd270..7ebff39c84b3 100644 --- a/arch/s390/include/asm/pci_clp.h +++ b/arch/s390/include/asm/pci_clp.h @@ -110,7 +110,8 @@ struct clp_req_query_pci { struct clp_rsp_query_pci { struct clp_rsp_hdr hdr; u16 vfn; /* virtual fn number */ - u16 : 3; + u16 : 2; + u16 tid_avail : 1; u16 rid_avail : 1; u16 is_physfn : 1; u16 reserved1 : 1; @@ -122,16 +123,18 @@ struct clp_rsp_query_pci { u16 pchid; __le32 bar[PCI_STD_NUM_BARS]; u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ - u16 : 12; - u16 port : 4; + u8 fidparm; + u8 reserved3 : 4; + u8 port : 4; u8 fmb_len; u8 pft; /* pci function type */ u64 sdma; /* start dma as */ u64 edma; /* end dma as */ #define ZPCI_RID_MASK_DEVFN 0x00ff u16 rid; /* BUS/DEVFN PCI address */ - u16 reserved0; - u32 reserved[10]; + u32 reserved0; + u16 tid; + u32 reserved[9]; u32 uid; /* user defined id */ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ u32 reserved2[16]; @@ -153,7 +156,9 @@ struct clp_rsp_query_pci_grp { u16 : 4; u16 noi : 12; /* number of interrupts */ u8 version; - u8 : 6; + u8 : 2; + u8 rtr : 1; /* Relaxed translation requirement */ + u8 : 3; u8 frame : 1; u8 refresh : 1; /* TLB refresh mode */ u16 : 3; diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h index 42d7cc4262ca..d12e17201661 100644 --- a/arch/s390/include/asm/pci_dma.h +++ b/arch/s390/include/asm/pci_dma.h @@ -25,6 +25,7 @@ enum zpci_ioat_dtype { #define ZPCI_KEY (PAGE_DEFAULT_KEY << 5) #define ZPCI_TABLE_SIZE_RT (1UL << 42) +#define ZPCI_TABLE_SIZE_RS (1UL << 53) #define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST) #define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT) @@ -55,6 +56,8 @@ enum zpci_ioat_dtype { #define ZPCI_PT_BITS 8 #define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT) #define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS) +#define ZPCI_RS_SHIFT (ZPCI_RT_SHIFT + ZPCI_TABLE_BITS) +#define ZPCI_RF_SHIFT (ZPCI_RS_SHIFT + ZPCI_TABLE_BITS) #define ZPCI_RTE_FLAG_MASK 0x3fffUL #define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK) diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index e5f57cfe1d45..025c6dcbf893 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -16,11 +16,11 @@ #define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40 #define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44 -/* Load/Store return codes */ -#define ZPCI_PCI_LS_OK 0 -#define ZPCI_PCI_LS_ERR 1 -#define ZPCI_PCI_LS_BUSY 2 -#define ZPCI_PCI_LS_INVAL_HANDLE 3 +/* PCI instruction condition codes */ +#define ZPCI_CC_OK 0 +#define ZPCI_CC_ERR 1 +#define ZPCI_CC_BUSY 2 +#define ZPCI_CC_INVAL_HANDLE 3 /* Load/Store address space identifiers */ #define ZPCI_PCIAS_MEMIO_0 0 diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 2686bee800e3..43a5ea4ee20f 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -143,7 +143,7 @@ static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max) static inline int zpci_memcpy_fromio(void *dst, const volatile void __iomem *src, - unsigned long n) + size_t n) { int size, rc = 0; @@ -162,7 +162,7 @@ static inline int zpci_memcpy_fromio(void *dst, } static inline int zpci_memcpy_toio(volatile void __iomem *dst, - const void *src, unsigned long n) + const void *src, size_t n) { int size, rc = 0; @@ -187,7 +187,7 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst, } static inline int zpci_memset_io(volatile void __iomem *dst, - unsigned char val, size_t count) + int val, size_t count) { u8 *src = kmalloc(count, GFP_KERNEL); int rc; diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 264095dd84bc..5899f57f17d1 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -4,21 +4,13 @@ #include <linux/preempt.h> #include <asm/cmpxchg.h> +#include <asm/march.h> /* * s390 uses its own implementation for per cpu data, the offset of * the cpu local data area is cached in the cpu's lowcore memory. */ -#define __my_cpu_offset S390_lowcore.percpu_offset - -/* - * For 64 bit module code, the module may be more than 4G above the - * per cpu area, use weak definitions to force the compiler to - * generate external references. - */ -#if defined(MODULE) -#define ARCH_NEEDS_WEAK_PER_CPU -#endif +#define __my_cpu_offset get_lowcore()->percpu_offset /* * We use a compare-and-swap loop since that uses less cpu cycles than @@ -50,7 +42,7 @@ #define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) #define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) -#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES +#ifndef MARCH_HAS_Z196_FEATURES #define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) #define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) @@ -61,7 +53,7 @@ #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) -#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#else /* MARCH_HAS_Z196_FEATURES */ #define arch_this_cpu_add(pcp, val, op1, op2, szcast) \ { \ @@ -73,13 +65,13 @@ if (__builtin_constant_p(val__) && \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ asm volatile( \ - op2 " %[ptr__],%[val__]\n" \ + op2 " %[ptr__],%[val__]" \ : [ptr__] "+Q" (*ptr__) \ : [val__] "i" ((szcast)val__) \ : "cc"); \ } else { \ asm volatile( \ - op1 " %[old__],%[val__],%[ptr__]\n" \ + op1 " %[old__],%[val__],%[ptr__]" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ @@ -98,7 +90,7 @@ preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ - op " %[old__],%[val__],%[ptr__]\n" \ + op " %[old__],%[val__],%[ptr__]" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ @@ -117,7 +109,7 @@ preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ - op " %[old__],%[val__],%[ptr__]\n" \ + op " %[old__],%[val__],%[ptr__]" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ @@ -129,7 +121,7 @@ #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao") #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog") -#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#endif /* MARCH_HAS_Z196_FEATURES */ #define arch_this_cpu_cmpxchg(pcp, oval, nval) \ ({ \ diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 9917e2717b2b..e53894cedf08 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -37,9 +37,9 @@ extern ssize_t cpumf_events_sysfs_show(struct device *dev, /* Perf callbacks */ struct pt_regs; -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -extern unsigned long perf_misc_flags(struct pt_regs *regs); -#define perf_misc_flags(regs) perf_misc_flags(regs) +extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); +#define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) #define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs /* Perf pt_regs extension for sample-data-entry indicators */ @@ -48,31 +48,8 @@ struct perf_sf_sde_regs { unsigned long reserved:63; /* reserved */ }; -/* Perf PMU definitions for the counter facility */ -#define PERF_CPUM_CF_MAX_CTR 0xffffUL /* Max ctr for ECCTR */ - -/* Perf PMU definitions for the sampling facility */ -#define PERF_CPUM_SF_MAX_CTR 2 -#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ -#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ -#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ -#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ -#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ -#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ - PERF_CPUM_SF_DIAG_MODE) -#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ - -#define REG_NONE 0 -#define REG_OVERFLOW 1 -#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) -#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) -#define TEAR_REG(hwc) ((hwc)->last_tag) -#define SAMPL_RATE(hwc) ((hwc)->event_base) -#define SAMPL_FLAGS(hwc) ((hwc)->config_base) -#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) -#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) - #define perf_arch_fetch_caller_regs(regs, __ip) do { \ + (regs)->psw.mask = 0; \ (regs)->psw.addr = (__ip); \ (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \ offsetof(struct stack_frame, back_chain); \ diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 7b84ef6dc4b6..a16e65072371 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -19,14 +19,17 @@ #define CRST_ALLOC_ORDER 2 -unsigned long *crst_table_alloc(struct mm_struct *); +unsigned long *crst_table_alloc_noprof(struct mm_struct *); +#define crst_table_alloc(...) alloc_hooks(crst_table_alloc_noprof(__VA_ARGS__)) void crst_table_free(struct mm_struct *, unsigned long *); -unsigned long *page_table_alloc(struct mm_struct *); -struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm); +unsigned long *page_table_alloc_noprof(struct mm_struct *); +#define page_table_alloc(...) alloc_hooks(page_table_alloc_noprof(__VA_ARGS__)) void page_table_free(struct mm_struct *, unsigned long *); + +struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm); +#define page_table_alloc_pgste(...) alloc_hooks(page_table_alloc_pgste_noprof(__VA_ARGS__)) void page_table_free_pgste(struct ptdesc *ptdesc); -extern int page_table_allocate_pgste; static inline void crst_table_init(unsigned long *crst, unsigned long entry) { @@ -49,54 +52,70 @@ static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long return addr; } -static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) +static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long address) { - unsigned long *table = crst_table_alloc(mm); + unsigned long *table = crst_table_alloc_noprof(mm); + + if (!table) + return NULL; + crst_table_init(table, _REGION2_ENTRY_EMPTY); + pagetable_p4d_ctor(virt_to_ptdesc(table)); - if (table) - crst_table_init(table, _REGION2_ENTRY_EMPTY); return (p4d_t *) table; } +#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__)) static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) { - if (!mm_p4d_folded(mm)) - crst_table_free(mm, (unsigned long *) p4d); + if (mm_p4d_folded(mm)) + return; + + pagetable_dtor(virt_to_ptdesc(p4d)); + crst_table_free(mm, (unsigned long *) p4d); } -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) +static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long address) { - unsigned long *table = crst_table_alloc(mm); - if (table) - crst_table_init(table, _REGION3_ENTRY_EMPTY); + unsigned long *table = crst_table_alloc_noprof(mm); + + if (!table) + return NULL; + crst_table_init(table, _REGION3_ENTRY_EMPTY); + pagetable_pud_ctor(virt_to_ptdesc(table)); + return (pud_t *) table; } +#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__)) static inline void pud_free(struct mm_struct *mm, pud_t *pud) { - if (!mm_pud_folded(mm)) - crst_table_free(mm, (unsigned long *) pud); + if (mm_pud_folded(mm)) + return; + + pagetable_dtor(virt_to_ptdesc(pud)); + crst_table_free(mm, (unsigned long *) pud); } -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) +static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long vmaddr) { - unsigned long *table = crst_table_alloc(mm); + unsigned long *table = crst_table_alloc_noprof(mm); if (!table) return NULL; crst_table_init(table, _SEGMENT_ENTRY_EMPTY); - if (!pagetable_pmd_ctor(virt_to_ptdesc(table))) { + if (!pagetable_pmd_ctor(mm, virt_to_ptdesc(table))) { crst_table_free(mm, table); return NULL; } return (pmd_t *) table; } +#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__)) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { if (mm_pmd_folded(mm)) return; - pagetable_pmd_dtor(virt_to_ptdesc(pmd)); + pagetable_dtor(virt_to_ptdesc(pmd)); crst_table_free(mm, (unsigned long *) pmd); } @@ -115,13 +134,21 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd))); } -static inline pgd_t *pgd_alloc(struct mm_struct *mm) +static inline pgd_t *pgd_alloc_noprof(struct mm_struct *mm) { - return (pgd_t *) crst_table_alloc(mm); + unsigned long *table = crst_table_alloc_noprof(mm); + + if (!table) + return NULL; + pagetable_pgd_ctor(virt_to_ptdesc(table)); + + return (pgd_t *) table; } +#define pgd_alloc(...) alloc_hooks(pgd_alloc_noprof(__VA_ARGS__)) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { + pagetable_dtor(virt_to_ptdesc(pgd)); crst_table_free(mm, (unsigned long *) pgd); } diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 70b6ee557eb2..bca9b29778c3 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -14,10 +14,10 @@ #include <linux/sched.h> #include <linux/mm_types.h> +#include <linux/cpufeature.h> #include <linux/page-flags.h> #include <linux/radix-tree.h> #include <linux/atomic.h> -#include <asm/sections.h> #include <asm/ctlreg.h> #include <asm/bug.h> #include <asm/page.h> @@ -35,7 +35,7 @@ enum { PG_DIRECT_MAP_MAX }; -extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]); +extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; static inline void update_page_count(int level, long count) { @@ -85,14 +85,14 @@ extern unsigned long zero_page_mask; * happen without trampolines and in addition the placement within a * 2GB frame is branch prediction unit friendly. */ -extern unsigned long __bootdata_preserved(VMALLOC_START); -extern unsigned long __bootdata_preserved(VMALLOC_END); +extern unsigned long VMALLOC_START; +extern unsigned long VMALLOC_END; #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN) -extern struct page *__bootdata_preserved(vmemmap); -extern unsigned long __bootdata_preserved(vmemmap_size); +extern struct page *vmemmap; +extern unsigned long vmemmap_size; -extern unsigned long __bootdata_preserved(MODULES_VADDR); -extern unsigned long __bootdata_preserved(MODULES_END); +extern unsigned long MODULES_VADDR; +extern unsigned long MODULES_END; #define MODULES_VADDR MODULES_VADDR #define MODULES_END MODULES_END #define MODULES_LEN (1UL << 31) @@ -107,12 +107,26 @@ static inline int is_module_addr(void *addr) return 1; } +#ifdef CONFIG_KMSAN +#define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START) +#define KMSAN_VMALLOC_SHADOW_START VMALLOC_END +#define KMSAN_VMALLOC_SHADOW_END (KMSAN_VMALLOC_SHADOW_START + KMSAN_VMALLOC_SIZE) +#define KMSAN_VMALLOC_ORIGIN_START KMSAN_VMALLOC_SHADOW_END +#define KMSAN_VMALLOC_ORIGIN_END (KMSAN_VMALLOC_ORIGIN_START + KMSAN_VMALLOC_SIZE) +#define KMSAN_MODULES_SHADOW_START KMSAN_VMALLOC_ORIGIN_END +#define KMSAN_MODULES_SHADOW_END (KMSAN_MODULES_SHADOW_START + MODULES_LEN) +#define KMSAN_MODULES_ORIGIN_START KMSAN_MODULES_SHADOW_END +#define KMSAN_MODULES_ORIGIN_END (KMSAN_MODULES_ORIGIN_START + MODULES_LEN) +#endif + #ifdef CONFIG_RANDOMIZE_BASE #define KASLR_LEN (1UL << 31) #else #define KASLR_LEN 0UL #endif +void setup_protection_map(void); + /* * A 64 bit pagetable entry of S390 has following format: * | PFRA |0IPC| OS | @@ -265,7 +279,8 @@ static inline int is_module_addr(void *addr) #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) -#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) +#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH | \ + _REGION3_ENTRY_PRESENT) #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) #define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL @@ -273,18 +288,27 @@ static inline int is_module_addr(void *addr) #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ +#define _REGION3_ENTRY_COMM 0x0010 /* Common-Region, marks swap entry */ #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ -#define _REGION3_ENTRY_WRITE 0x0002 /* SW region write bit */ -#define _REGION3_ENTRY_READ 0x0001 /* SW region read bit */ +#define _REGION3_ENTRY_WRITE 0x8000 /* SW region write bit */ +#define _REGION3_ENTRY_READ 0x4000 /* SW region read bit */ #ifdef CONFIG_MEM_SOFT_DIRTY -#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ +#define _REGION3_ENTRY_SOFT_DIRTY 0x0002 /* SW region soft dirty bit */ #else #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ #endif #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL +/* + * SW region present bit. For non-leaf region-third-table entries, bits 62-63 + * indicate the TABLE LENGTH and both must be set to 1. But such entries + * would always be considered as present, so it is safe to use bit 63 as + * PRESENT bit for PUD. + */ +#define _REGION3_ENTRY_PRESENT 0x0001 + /* Bits in the segment table entry */ #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL @@ -296,21 +320,29 @@ static inline int is_module_addr(void *addr) #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */ -#define _SEGMENT_ENTRY (0) +#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PRESENT) #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ + +#define _SEGMENT_ENTRY_COMM 0x0010 /* Common-Segment, marks swap entry */ #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ -#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ -#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ +#define _SEGMENT_ENTRY_WRITE 0x8000 /* SW segment write bit */ +#define _SEGMENT_ENTRY_READ 0x4000 /* SW segment read bit */ #ifdef CONFIG_MEM_SOFT_DIRTY -#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ +#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0002 /* SW segment soft dirty bit */ #else #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ #endif +#define _SEGMENT_ENTRY_PRESENT 0x0001 /* SW segment present bit */ + +/* Common bits in region and segment table entries, for swap entries */ +#define _RST_ENTRY_COMM 0x0010 /* Common-Region/Segment, marks swap entry */ +#define _RST_ENTRY_INVALID 0x0020 /* invalid region/segment table entry */ + #define _CRST_ENTRIES 2048 /* number of region/segment table entries */ #define _PAGE_ENTRIES 256 /* number of page table entries */ @@ -326,7 +358,7 @@ static inline int is_module_addr(void *addr) #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) -#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT) +#define _PAGE_INDEX (0xffUL << PAGE_SHIFT) #define _REGION1_SIZE (1UL << _REGION1_SHIFT) #define _REGION2_SIZE (1UL << _REGION2_SHIFT) @@ -389,9 +421,10 @@ static inline int is_module_addr(void *addr) #define PGSTE_HC_BIT 0x0020000000000000UL #define PGSTE_GR_BIT 0x0004000000000000UL #define PGSTE_GC_BIT 0x0002000000000000UL -#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ -#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ -#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ +#define PGSTE_ST2_MASK 0x0000ffff00000000UL +#define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */ +#define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */ +#define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */ /* Guest Page State used for virtualization */ #define _PGSTE_GPS_ZERO 0x0000000080000000UL @@ -413,90 +446,107 @@ static inline int is_module_addr(void *addr) /* * Page protection definitions. */ -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) -#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \ +#define __PAGE_NONE (_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) +#define __PAGE_RO (_PAGE_PRESENT | _PAGE_READ | \ _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) -#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \ +#define __PAGE_RX (_PAGE_PRESENT | _PAGE_READ | \ _PAGE_INVALID | _PAGE_PROTECT) -#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ +#define __PAGE_RW (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) -#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ +#define __PAGE_RWX (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _PAGE_INVALID | _PAGE_PROTECT) - -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ +#define __PAGE_SHARED (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ +#define __PAGE_KERNEL (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) -#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ +#define __PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ _PAGE_PROTECT | _PAGE_NOEXEC) -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ - _PAGE_YOUNG | _PAGE_DIRTY) -/* - * On s390 the page table entry has an invalid bit and a read-only bit. - * Read permission implies execute permission and write permission - * implies read permission. - */ - /*xwr*/ +extern unsigned long page_noexec_mask; + +#define __pgprot_page_mask(x) __pgprot((x) & page_noexec_mask) + +#define PAGE_NONE __pgprot_page_mask(__PAGE_NONE) +#define PAGE_RO __pgprot_page_mask(__PAGE_RO) +#define PAGE_RX __pgprot_page_mask(__PAGE_RX) +#define PAGE_RW __pgprot_page_mask(__PAGE_RW) +#define PAGE_RWX __pgprot_page_mask(__PAGE_RWX) +#define PAGE_SHARED __pgprot_page_mask(__PAGE_SHARED) +#define PAGE_KERNEL __pgprot_page_mask(__PAGE_KERNEL) +#define PAGE_KERNEL_RO __pgprot_page_mask(__PAGE_KERNEL_RO) /* * Segment entry (large page) protection definitions. */ -#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ +#define __SEGMENT_NONE (_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_INVALID | \ _SEGMENT_ENTRY_PROTECT) -#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ +#define __SEGMENT_RO (_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_PROTECT | \ _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_NOEXEC) -#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ +#define __SEGMENT_RX (_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_PROTECT | \ _SEGMENT_ENTRY_READ) -#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ +#define __SEGMENT_RW (_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_WRITE | \ _SEGMENT_ENTRY_NOEXEC) -#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ +#define __SEGMENT_RWX (_SEGMENT_ENTRY_PRESENT | \ + _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_WRITE) -#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ +#define __SEGMENT_KERNEL (_SEGMENT_ENTRY | \ _SEGMENT_ENTRY_LARGE | \ _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_WRITE | \ _SEGMENT_ENTRY_YOUNG | \ _SEGMENT_ENTRY_DIRTY | \ _SEGMENT_ENTRY_NOEXEC) -#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \ +#define __SEGMENT_KERNEL_RO (_SEGMENT_ENTRY | \ _SEGMENT_ENTRY_LARGE | \ _SEGMENT_ENTRY_READ | \ _SEGMENT_ENTRY_YOUNG | \ _SEGMENT_ENTRY_PROTECT | \ _SEGMENT_ENTRY_NOEXEC) -#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \ - _SEGMENT_ENTRY_LARGE | \ - _SEGMENT_ENTRY_READ | \ - _SEGMENT_ENTRY_WRITE | \ - _SEGMENT_ENTRY_YOUNG | \ - _SEGMENT_ENTRY_DIRTY) + +extern unsigned long segment_noexec_mask; + +#define __pgprot_segment_mask(x) __pgprot((x) & segment_noexec_mask) + +#define SEGMENT_NONE __pgprot_segment_mask(__SEGMENT_NONE) +#define SEGMENT_RO __pgprot_segment_mask(__SEGMENT_RO) +#define SEGMENT_RX __pgprot_segment_mask(__SEGMENT_RX) +#define SEGMENT_RW __pgprot_segment_mask(__SEGMENT_RW) +#define SEGMENT_RWX __pgprot_segment_mask(__SEGMENT_RWX) +#define SEGMENT_KERNEL __pgprot_segment_mask(__SEGMENT_KERNEL) +#define SEGMENT_KERNEL_RO __pgprot_segment_mask(__SEGMENT_KERNEL_RO) /* * Region3 entry (large page) protection definitions. */ -#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ - _REGION3_ENTRY_LARGE | \ - _REGION3_ENTRY_READ | \ - _REGION3_ENTRY_WRITE | \ - _REGION3_ENTRY_YOUNG | \ +#define __REGION3_KERNEL (_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_PRESENT | \ + _REGION3_ENTRY_LARGE | \ + _REGION3_ENTRY_READ | \ + _REGION3_ENTRY_WRITE | \ + _REGION3_ENTRY_YOUNG | \ _REGION3_ENTRY_DIRTY | \ _REGION_ENTRY_NOEXEC) -#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ - _REGION3_ENTRY_LARGE | \ - _REGION3_ENTRY_READ | \ - _REGION3_ENTRY_YOUNG | \ - _REGION_ENTRY_PROTECT | \ - _REGION_ENTRY_NOEXEC) -#define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \ - _REGION3_ENTRY_LARGE | \ - _REGION3_ENTRY_READ | \ - _REGION3_ENTRY_WRITE | \ - _REGION3_ENTRY_YOUNG | \ - _REGION3_ENTRY_DIRTY) +#define __REGION3_KERNEL_RO (_REGION_ENTRY_TYPE_R3 | \ + _REGION3_ENTRY_PRESENT | \ + _REGION3_ENTRY_LARGE | \ + _REGION3_ENTRY_READ | \ + _REGION3_ENTRY_YOUNG | \ + _REGION_ENTRY_PROTECT | \ + _REGION_ENTRY_NOEXEC) + +extern unsigned long region_noexec_mask; + +#define __pgprot_region_mask(x) __pgprot((x) & region_noexec_mask) + +#define REGION3_KERNEL __pgprot_region_mask(__REGION3_KERNEL) +#define REGION3_KERNEL_RO __pgprot_region_mask(__REGION3_KERNEL_RO) static inline bool mm_p4d_folded(struct mm_struct *mm) { @@ -534,13 +584,14 @@ static inline int mm_is_protected(struct mm_struct *mm) return 0; } -static inline int mm_alloc_pgste(struct mm_struct *mm) +static inline pgste_t clear_pgste_bit(pgste_t pgste, unsigned long mask) { -#ifdef CONFIG_PGSTE - if (unlikely(mm->context.alloc_pgste)) - return 1; -#endif - return 0; + return __pgste(pgste_val(pgste) & ~mask); +} + +static inline pgste_t set_pgste_bit(pgste_t pgste, unsigned long mask) +{ + return __pgste(pgste_val(pgste) | mask); } static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) @@ -597,19 +648,15 @@ static inline int mm_uses_skeys(struct mm_struct *mm) return 0; } -static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) -{ - union register_pair r1 = { .even = old, .odd = new, }; - unsigned long address = (unsigned long)ptr | 1; - - asm volatile( - " csp %[r1],%[address]" - : [r1] "+&d" (r1.pair), "+m" (*ptr) - : [address] "d" (address) - : "cc"); -} - -static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) +/** + * cspg() - Compare and Swap and Purge (CSPG) + * @ptr: Pointer to the value to be exchanged + * @old: The expected old value + * @new: The new value + * + * Return: True if compare and swap was successful, otherwise false. + */ +static inline bool cspg(unsigned long *ptr, unsigned long old, unsigned long new) { union register_pair r1 = { .even = old, .odd = new, }; unsigned long address = (unsigned long)ptr | 1; @@ -619,6 +666,7 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new : [r1] "+&d" (r1.pair), "+m" (*ptr) : [address] "d" (address) : "cc"); + return old == r1.even; } #define CRDTE_DTT_PAGE 0x00UL @@ -627,7 +675,18 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new #define CRDTE_DTT_REGION2 0x18UL #define CRDTE_DTT_REGION1 0x1cUL -static inline void crdte(unsigned long old, unsigned long new, +/** + * crdte() - Compare and Replace DAT Table Entry + * @old: The expected old value + * @new: The new value + * @table: Pointer to the value to be exchanged + * @dtt: Table type of the table to be exchanged + * @address: The address mapped by the entry to be replaced + * @asce: The ASCE of this entry + * + * Return: True if compare and replace was successful, otherwise false. + */ +static inline bool crdte(unsigned long old, unsigned long new, unsigned long *table, unsigned long dtt, unsigned long address, unsigned long asce) { @@ -638,6 +697,7 @@ static inline void crdte(unsigned long old, unsigned long new, : [r1] "+&d" (r1.pair) : [r2] "d" (r2.pair), [asce] "a" (asce) : "memory", "cc"); + return old == r1.even; } /* @@ -713,7 +773,7 @@ static inline int pud_present(pud_t pud) { if (pud_folded(pud)) return 1; - return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; + return (pud_val(pud) & _REGION3_ENTRY_PRESENT) != 0; } static inline int pud_none(pud_t pud) @@ -728,13 +788,18 @@ static inline bool pud_leaf(pud_t pud) { if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) return 0; - return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); + return (pud_present(pud) && (pud_val(pud) & _REGION3_ENTRY_LARGE) != 0); +} + +static inline int pmd_present(pmd_t pmd) +{ + return (pmd_val(pmd) & _SEGMENT_ENTRY_PRESENT) != 0; } #define pmd_leaf pmd_leaf static inline bool pmd_leaf(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; + return (pmd_present(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0); } static inline int pmd_bad(pmd_t pmd) @@ -766,11 +831,6 @@ static inline int p4d_bad(p4d_t p4d) return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; } -static inline int pmd_present(pmd_t pmd) -{ - return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; -} - static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; @@ -843,7 +903,7 @@ static inline int pmd_protnone(pmd_t pmd) } #endif -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; } @@ -891,6 +951,12 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); } +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +#define pmd_swp_soft_dirty(pmd) pmd_soft_dirty(pmd) +#define pmd_swp_mksoft_dirty(pmd) pmd_mksoft_dirty(pmd) +#define pmd_swp_clear_soft_dirty(pmd) pmd_clear_soft_dirty(pmd) +#endif + /* * query functions pte_write/pte_dirty/pte_young only work if * pte_present() is true. Undefined behaviour if not.. @@ -922,6 +988,7 @@ static inline int pte_unused(pte_t pte) * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID * must not be set. */ +#define pte_pgprot pte_pgprot static inline pgprot_t pte_pgprot(pte_t pte) { unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK; @@ -1075,17 +1142,15 @@ static inline pte_t pte_mkhuge(pte_t pte) #define IPTE_NODAT 0x400 #define IPTE_GUEST_ASCE 0x800 -static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, - unsigned long opt, unsigned long asce, - int local) +static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local) { unsigned long pto; pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1); - asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]" + asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]" : "+m" (*ptep) - : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt), - [asce] "a" (asce), [m4] "i" (local)); + : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK), + [m4] "i" (local)); } static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, @@ -1167,7 +1232,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); /* At this point the reference through the mapping is still present */ if (mm_is_protected(mm) && pte_present(res)) - uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_from_secure_pte(res); return res; } @@ -1185,7 +1250,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); /* At this point the reference through the mapping is still present */ if (mm_is_protected(vma->vm_mm) && pte_present(res)) - uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_from_secure_pte(res); return res; } @@ -1217,14 +1282,14 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, * The notifier should have destroyed all protected vCPUs at this * point, so the destroy should be successful. */ - if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK)) + if (full && !uv_destroy_pte(res)) return res; /* * If something went wrong and the page could not be destroyed, or * if this is not a mm teardown, the slower export is used as * fallback instead. */ - uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); + uv_convert_from_secure_pte(res); return res; } @@ -1268,8 +1333,8 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead. * A local RDP can be used to do the flush. */ - if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT)) - __ptep_rdp(address, ptep, 0, 0, 1); + if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT)) + __ptep_rdp(address, ptep, 1); } #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault @@ -1283,7 +1348,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, { if (pte_same(*ptep, entry)) return 0; - if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) + if (cpu_has_rdp() && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry); else ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); @@ -1323,7 +1388,6 @@ int set_pgste_bits(struct mm_struct *mm, unsigned long addr, int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, unsigned long *oldpte, unsigned long *oldpgste); -void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); @@ -1331,9 +1395,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); #define pgprot_writecombine pgprot_writecombine pgprot_t pgprot_writecombine(pgprot_t prot); -#define pgprot_writethrough pgprot_writethrough -pgprot_t pgprot_writethrough(pgprot_t prot); - #define PFN_PTE_SHIFT PAGE_SHIFT /* @@ -1375,21 +1436,9 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) pte_t __pte; __pte = __pte(physpage | pgprot_val(pgprot)); - if (!MACHINE_HAS_NX) - __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC)); return pte_mkyoung(__pte); } -static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) -{ - unsigned long physpage = page_to_phys(page); - pte_t __pte = mk_pte_phys(physpage, pgprot); - - if (pte_write(__pte) && PageDirty(page)) - __pte = pte_mkdirty(__pte); - return __pte; -} - #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) @@ -1628,10 +1677,10 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ -static inline void __pmdp_csp(pmd_t *pmdp) +static inline void __pmdp_cspg(pmd_t *pmdp) { - csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), - pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); + cspg((unsigned long *)pmdp, pmd_val(*pmdp), + pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); } #define IDTE_GLOBAL 0 @@ -1744,8 +1793,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t entry) { - if (!MACHINE_HAS_NX) - entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC)); set_pmd(pmdp, entry); } @@ -1813,17 +1860,16 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, #define pmdp_collapse_flush pmdp_collapse_flush #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot)) -#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) static inline int pmd_trans_huge(pmd_t pmd) { - return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; + return pmd_leaf(pmd); } #define has_transparent_hugepage has_transparent_hugepage static inline int has_transparent_hugepage(void) { - return MACHINE_HAS_EDAT1 ? 1 : 0; + return cpu_has_edat1() ? 1 : 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -1877,6 +1923,92 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +/* + * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE) + * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste + * as invalid. + * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010 + * | offset |Xtype |11TT|S0| + * |0000000000111111111122222222223333333333444444444455|555555|5566|66| + * |0123456789012345678901234567890123456789012345678901|234567|8901|23| + * + * Bits 0-51 store the offset. + * Bits 53-57 store the type. + * Bit 62 (S) is used for softdirty tracking. + * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT. + * Bit 52 (X) is unused. + */ + +#define __SWP_OFFSET_MASK_RSTE ((1UL << 52) - 1) +#define __SWP_OFFSET_SHIFT_RSTE 12 +#define __SWP_TYPE_MASK_RSTE ((1UL << 5) - 1) +#define __SWP_TYPE_SHIFT_RSTE 6 + +/* + * TT bits set to 0x00 == SEGMENT. For REGION3 entries, caller must add R3 + * bits 0x01. See also __set_huge_pte_at(). + */ +static inline unsigned long mk_swap_rste(unsigned long type, unsigned long offset) +{ + unsigned long rste; + + rste = _RST_ENTRY_INVALID | _RST_ENTRY_COMM; + rste |= (offset & __SWP_OFFSET_MASK_RSTE) << __SWP_OFFSET_SHIFT_RSTE; + rste |= (type & __SWP_TYPE_MASK_RSTE) << __SWP_TYPE_SHIFT_RSTE; + return rste; +} + +static inline unsigned long __swp_type_rste(swp_entry_t entry) +{ + return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE; +} + +static inline unsigned long __swp_offset_rste(swp_entry_t entry) +{ + return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE; +} + +#define __rste_to_swp_entry(rste) ((swp_entry_t) { rste }) + +/* + * s390 has different layout for PTE and region / segment table entries (RSTE). + * This is also true for swap entries, and their swap type and offset encoding. + * For hugetlbfs PTE_MARKER support, s390 has internal __swp_type_rste() and + * __swp_offset_rste() helpers to correctly handle RSTE swap entries. + * + * But common swap code does not know about this difference, and only uses + * __swp_type(), __swp_offset() and __swp_entry() helpers for conversion between + * arch-dependent and arch-independent representation of swp_entry_t for all + * pagetable levels. On s390, those helpers only work for PTE swap entries. + * + * Therefore, implement __pmd_to_swp_entry() to build a fake PTE swap entry + * and return the arch-dependent representation of that. Correspondingly, + * implement __swp_entry_to_pmd() to convert that into a proper PMD swap + * entry again. With this, the arch-dependent swp_entry_t representation will + * always look like a PTE swap entry in common code. + * + * This is somewhat similar to fake PTEs in hugetlbfs code for s390, but only + * requires conversion of the swap type and offset, and not all the possible + * PTE bits. + */ +static inline swp_entry_t __pmd_to_swp_entry(pmd_t pmd) +{ + swp_entry_t arch_entry; + pte_t pte; + + arch_entry = __rste_to_swp_entry(pmd_val(pmd)); + pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry)); + return __pte_to_swp_entry(pte); +} + +static inline pmd_t __swp_entry_to_pmd(swp_entry_t arch_entry) +{ + pmd_t pmd; + + pmd = __pmd(mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry))); + return pmd; +} + extern int vmem_add_mapping(unsigned long start, unsigned long size); extern void vmem_remove_mapping(unsigned long start, unsigned long size); extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); @@ -1894,4 +2026,40 @@ extern void s390_reset_cmma(struct mm_struct *mm); #define pmd_pgtable(pmd) \ ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)) +static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt) +{ + unsigned long *pgstes, res; + + pgstes = pgt + _PAGE_ENTRIES; + + res = (pgstes[0] & PGSTE_ST2_MASK) << 16; + res |= pgstes[1] & PGSTE_ST2_MASK; + res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16; + res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32; + + return res; +} + +static inline pgste_t pgste_get_lock(pte_t *ptep) +{ + unsigned long value = 0; +#ifdef CONFIG_PGSTE + unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE); + + do { + value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr); + } while (value & PGSTE_PCL_BIT); + value |= PGSTE_PCL_BIT; +#endif + return __pgste(value); +} + +static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) +{ +#ifdef CONFIG_PGSTE + barrier(); + WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT); +#endif +} + #endif /* _S390_PAGE_H */ diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h index f45cfc8bc233..7ef3bbec98b0 100644 --- a/arch/s390/include/asm/physmem_info.h +++ b/arch/s390/include/asm/physmem_info.h @@ -9,6 +9,7 @@ enum physmem_info_source { MEM_DETECT_NONE = 0, MEM_DETECT_SCLP_STOR_INFO, MEM_DETECT_DIAG260, + MEM_DETECT_DIAG500_STOR_LIMIT, MEM_DETECT_SCLP_READ_INFO, MEM_DETECT_BIN_SEARCH }; @@ -25,7 +26,7 @@ enum reserved_range_type { RR_AMODE31, RR_IPLREPORT, RR_CERT_COMP_LIST, - RR_MEM_DETECT_EXTENDED, + RR_MEM_DETECT_EXT, RR_VMEM, RR_MAX }; @@ -107,6 +108,8 @@ static inline const char *get_physmem_info_source(void) return "sclp storage info"; case MEM_DETECT_DIAG260: return "diag260"; + case MEM_DETECT_DIAG500_STOR_LIMIT: + return "diag500 storage limit"; case MEM_DETECT_SCLP_READ_INFO: return "sclp read info"; case MEM_DETECT_BIN_SEARCH: @@ -125,7 +128,7 @@ static inline const char *get_rr_type_name(enum reserved_range_type t) RR_TYPE_NAME(AMODE31); RR_TYPE_NAME(IPLREPORT); RR_TYPE_NAME(CERT_COMP_LIST); - RR_TYPE_NAME(MEM_DETECT_EXTENDED); + RR_TYPE_NAME(MEM_DETECT_EXT); RR_TYPE_NAME(VMEM); default: return "UNKNOWN"; diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h index 47d80a7451a6..b7b59faf16f4 100644 --- a/arch/s390/include/asm/pkey.h +++ b/arch/s390/include/asm/pkey.h @@ -20,9 +20,22 @@ * @param key pointer to a buffer containing the key blob * @param keylen size of the key blob in bytes * @param protkey pointer to buffer receiving the protected key + * @param xflags additional execution flags (see PKEY_XFLAG_* definitions below) + * As of now the only supported flag is PKEY_XFLAG_NOMEMALLOC. * @return 0 on success, negative errno value on failure */ -int pkey_keyblob2pkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); +int pkey_key2protkey(const u8 *key, u32 keylen, + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); + +/* + * If this flag is given in the xflags parameter, the pkey implementation + * is not allowed to allocate memory but instead should fall back to use + * preallocated memory or simple fail with -ENOMEM. + * This flag is for protected key derive within a cipher or similar + * which must not allocate memory which would cause io operations - see + * also the CRYPTO_ALG_ALLOCATES_MEMORY flag in crypto.h. + */ +#define PKEY_XFLAG_NOMEMALLOC 0x0001 #endif /* _KAPI_PKEY_H */ diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 0e3da500e98c..6ccd033acfe5 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -5,43 +5,59 @@ #include <asm/current.h> #include <linux/thread_info.h> #include <asm/atomic_ops.h> - -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES +#include <asm/cmpxchg.h> +#include <asm/march.h> /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 + +/* + * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such + * that a decrement hitting 0 means we can and should reschedule. + */ #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) +/* + * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users + * that think a non-zero value indicates we cannot preempt. + */ static __always_inline int preempt_count(void) { - return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED; + return READ_ONCE(get_lowcore()->preempt_count) & ~PREEMPT_NEED_RESCHED; } static __always_inline void preempt_count_set(int pc) { int old, new; + old = READ_ONCE(get_lowcore()->preempt_count); do { - old = READ_ONCE(S390_lowcore.preempt_count); - new = (old & PREEMPT_NEED_RESCHED) | - (pc & ~PREEMPT_NEED_RESCHED); - } while (__atomic_cmpxchg(&S390_lowcore.preempt_count, - old, new) != old); + new = (old & PREEMPT_NEED_RESCHED) | (pc & ~PREEMPT_NEED_RESCHED); + } while (!arch_try_cmpxchg(&get_lowcore()->preempt_count, &old, new)); } +/* + * We fold the NEED_RESCHED bit into the preempt count such that + * preempt_enable() can decrement and test for needing to reschedule with a + * short instruction sequence. + * + * We invert the actual bit, so that when the decrement hits 0 we know we both + * need to resched (the bit is cleared) and can resched (no preempt count). + */ + static __always_inline void set_preempt_need_resched(void) { - __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); + __atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count); } static __always_inline void clear_preempt_need_resched(void) { - __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); + __atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count); } static __always_inline bool test_preempt_need_resched(void) { - return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED); + return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED); } static __always_inline void __preempt_count_add(int val) @@ -52,11 +68,11 @@ static __always_inline void __preempt_count_add(int val) */ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { - __atomic_add_const(val, &S390_lowcore.preempt_count); + __atomic_add_const(val, &get_lowcore()->preempt_count); return; } } - __atomic_add(val, &S390_lowcore.preempt_count); + __atomic_add(val, &get_lowcore()->preempt_count); } static __always_inline void __preempt_count_sub(int val) @@ -64,76 +80,47 @@ static __always_inline void __preempt_count_sub(int val) __preempt_count_add(-val); } +/* + * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ static __always_inline bool __preempt_count_dec_and_test(void) { - return __atomic_add(-1, &S390_lowcore.preempt_count) == 1; + return __atomic_add_const_and_test(-1, &get_lowcore()->preempt_count); } +/* + * Returns true when we need to resched and can (barring IRQ state). + */ static __always_inline bool should_resched(int preempt_offset) { - return unlikely(READ_ONCE(S390_lowcore.preempt_count) == - preempt_offset); -} - -#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ - -#define PREEMPT_ENABLED (0) - -static __always_inline int preempt_count(void) -{ - return READ_ONCE(S390_lowcore.preempt_count); -} - -static __always_inline void preempt_count_set(int pc) -{ - S390_lowcore.preempt_count = pc; -} - -static __always_inline void set_preempt_need_resched(void) -{ + return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset); } -static __always_inline void clear_preempt_need_resched(void) -{ -} +#define init_task_preempt_count(p) do { } while (0) +/* Deferred to CPU bringup time */ +#define init_idle_preempt_count(p, cpu) do { } while (0) -static __always_inline bool test_preempt_need_resched(void) -{ - return false; -} +#ifdef CONFIG_PREEMPTION -static __always_inline void __preempt_count_add(int val) -{ - S390_lowcore.preempt_count += val; -} +void preempt_schedule(void); +void preempt_schedule_notrace(void); -static __always_inline void __preempt_count_sub(int val) -{ - S390_lowcore.preempt_count -= val; -} +#ifdef CONFIG_PREEMPT_DYNAMIC -static __always_inline bool __preempt_count_dec_and_test(void) -{ - return !--S390_lowcore.preempt_count && tif_need_resched(); -} +void dynamic_preempt_schedule(void); +void dynamic_preempt_schedule_notrace(void); +#define __preempt_schedule() dynamic_preempt_schedule() +#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace() -static __always_inline bool should_resched(int preempt_offset) -{ - return unlikely(preempt_count() == preempt_offset && - tif_need_resched()); -} +#else /* CONFIG_PREEMPT_DYNAMIC */ -#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#define __preempt_schedule() preempt_schedule() +#define __preempt_schedule_notrace() preempt_schedule_notrace() -#define init_task_preempt_count(p) do { } while (0) -/* Deferred to CPU bringup time */ -#define init_idle_preempt_count(p, cpu) do { } while (0) +#endif /* CONFIG_PREEMPT_DYNAMIC */ -#ifdef CONFIG_PREEMPTION -extern void preempt_schedule(void); -#define __preempt_schedule() preempt_schedule() -extern void preempt_schedule_notrace(void); -#define __preempt_schedule_notrace() preempt_schedule_notrace() #endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 07ad5a1df878..3affba95845b 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -14,13 +14,11 @@ #include <linux/bits.h> -#define CIF_SIE 0 /* CPU needs SIE exit cleanup */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ #define CIF_ENABLED_WAIT 5 /* in enabled wait state */ #define CIF_MCCK_GUEST 6 /* machine check happening in guest */ #define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */ -#define _CIF_SIE BIT(CIF_SIE) #define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) #define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT) #define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST) @@ -28,11 +26,12 @@ #define RESTART_FLAG_CTLREGS _AC(1 << 0, U) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/cpumask.h> #include <linux/linkage.h> #include <linux/irqflags.h> +#include <linux/bitops.h> #include <asm/fpu-types.h> #include <asm/cpu.h> #include <asm/page.h> @@ -41,38 +40,50 @@ #include <asm/runtime_instr.h> #include <asm/irqflags.h> #include <asm/alternative.h> +#include <asm/fault.h> + +struct pcpu { + unsigned long ec_mask; /* bit mask for ec_xxx functions */ + unsigned long ec_clk; /* sigp timestamp for ec_xxx */ + unsigned long flags; /* per CPU flags */ + unsigned long capacity; /* cpu capacity for scheduler */ + signed char state; /* physical cpu state */ + signed char polarization; /* physical polarization */ + u16 address; /* physical cpu address */ +}; + +DECLARE_PER_CPU(struct pcpu, pcpu_devices); typedef long (*sys_call_ptr_t)(struct pt_regs *regs); +static __always_inline struct pcpu *this_pcpu(void) +{ + return (struct pcpu *)(get_lowcore()->pcpu); +} + static __always_inline void set_cpu_flag(int flag) { - S390_lowcore.cpu_flags |= (1UL << flag); + set_bit(flag, &this_pcpu()->flags); } static __always_inline void clear_cpu_flag(int flag) { - S390_lowcore.cpu_flags &= ~(1UL << flag); + clear_bit(flag, &this_pcpu()->flags); } static __always_inline bool test_cpu_flag(int flag) { - return S390_lowcore.cpu_flags & (1UL << flag); + return test_bit(flag, &this_pcpu()->flags); } static __always_inline bool test_and_set_cpu_flag(int flag) { - if (test_cpu_flag(flag)) - return true; - set_cpu_flag(flag); - return false; + return test_and_set_bit(flag, &this_pcpu()->flags); } static __always_inline bool test_and_clear_cpu_flag(int flag) { - if (!test_cpu_flag(flag)) - return false; - clear_cpu_flag(flag); - return true; + return test_and_clear_bit(flag, &this_pcpu()->flags); } /* @@ -81,9 +92,7 @@ static __always_inline bool test_and_clear_cpu_flag(int flag) */ static __always_inline bool test_cpu_flag_of(int flag, int cpu) { - struct lowcore *lc = lowcore_ptr[cpu]; - - return lc->cpu_flags & (1UL << flag); + return test_bit(flag, &per_cpu(pcpu_devices, cpu).flags); } #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) @@ -110,18 +119,12 @@ extern void execve_tail(void); unsigned long vdso_text_size(void); unsigned long vdso_size(void); -/* - * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. - */ - -#define TASK_SIZE (test_thread_flag(TIF_31BIT) ? \ - _REGION3_SIZE : TASK_SIZE_MAX) -#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ - (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) +#define TASK_SIZE (TASK_SIZE_MAX) +#define TASK_UNMAPPED_BASE (_REGION2_SIZE >> 1) #define TASK_SIZE_MAX (-PAGE_SIZE) #define VDSO_BASE (STACK_TOP + PAGE_SIZE) -#define VDSO_LIMIT (test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE) +#define VDSO_LIMIT (_REGION2_SIZE) #define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE) #define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE) @@ -149,13 +152,12 @@ static __always_inline void __stackleak_poison(unsigned long erase_low, " la %[addr],256(%[addr])\n" " brctg %[tmp],0b\n" "1: stg %[poison],0(%[addr])\n" - " larl %[tmp],3f\n" - " ex %[count],0(%[tmp])\n" + " exrl %[count],3f\n" " j 4f\n" "2: stg %[poison],0(%[addr])\n" " j 4f\n" "3: mvc 8(1,%[addr]),0(%[addr])\n" - "4:\n" + "4:" : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp) : [poison] "d" (poison) : "memory", "cc" @@ -173,11 +175,8 @@ struct thread_struct { unsigned long system_timer; /* task cputime in kernel space */ unsigned long hardirq_timer; /* task cputime in hardirq context */ unsigned long softirq_timer; /* task cputime in softirq context */ - const sys_call_ptr_t *sys_call_table; /* system call table address */ - unsigned long gmap_addr; /* address of last gmap fault. */ - unsigned int gmap_write_flag; /* gmap fault write indication */ + union teid gmap_teid; /* address and flags of last gmap fault */ unsigned int gmap_int_code; /* int code of last gmap fault */ - unsigned int gmap_pfault; /* signal of a pending guest pfault */ int ufpu_flags; /* user fpu flags */ int kfpu_flags; /* kernel fpu flags */ @@ -269,7 +268,7 @@ static __always_inline unsigned long __current_stack_pointer(void) static __always_inline bool on_thread_stack(void) { - unsigned long ksp = S390_lowcore.kernel_stack; + unsigned long ksp = get_lowcore()->kernel_stack; return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); } @@ -373,14 +372,19 @@ static inline void local_mcck_enable(void) /* * Rewind PSW instruction address by specified number of bytes. */ -static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) +static inline unsigned long __rewind_psw(psw_t psw, long ilen) { unsigned long mask; mask = (psw.mask & PSW_MASK_EA) ? -1UL : (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : (1UL << 24) - 1; - return (psw.addr - ilc) & mask; + return (psw.addr - ilen) & mask; +} + +static inline unsigned long __forward_psw(psw_t psw, long ilen) +{ + return __rewind_psw(psw, -ilen); } /* @@ -405,9 +409,13 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) static __always_inline void bpon(void) { - asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", 82)); + asm_inline volatile( + ALTERNATIVE(" nop\n", + " .insn rrf,0xb2e80000,0,0,13,0\n", + ALT_SPEC(82)) + ); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 2ad9324f6338..962cf042c66d 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -8,17 +8,21 @@ #define _S390_PTRACE_H #include <linux/bits.h> +#include <linux/typecheck.h> #include <uapi/asm/ptrace.h> +#include <asm/thread_info.h> #include <asm/tpi.h> #define PIF_SYSCALL 0 /* inside a system call */ -#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */ +#define PIF_PSW_ADDR_ADJUSTED 1 /* psw address has been adjusted */ #define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ +#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ #define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */ #define _PIF_SYSCALL BIT(PIF_SYSCALL) -#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART) +#define _PIF_ADDR_PSW_ADJUSTED BIT(PIF_PSW_ADDR_ADJUSTED) #define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) +#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) #define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS) #define PSW32_MASK_PER _AC(0x40000000, UL) @@ -53,7 +57,7 @@ PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ PSW_MASK_PSTATE | PSW_ASC_PRIMARY) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct psw_bits { unsigned long : 1; @@ -98,7 +102,7 @@ enum { typedef struct { unsigned int mask; unsigned int addr; -} psw_t32 __aligned(8); +} psw32_t __aligned(8); #define PGM_INT_CODE_MASK 0x7f #define PGM_INT_CODE_PER 0x80 @@ -126,7 +130,6 @@ struct pt_regs { struct tpi_info tpi_info; }; unsigned long flags; - unsigned long cr1; unsigned long last_break; }; @@ -229,8 +232,44 @@ static inline void instruction_pointer_set(struct pt_regs *regs, int regs_query_register_offset(const char *name); const char *regs_query_register_name(unsigned int offset); -unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); -unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); + +static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->gprs[15]; +} + +static __always_inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (offset >= NUM_GPRS) + return 0; + return regs->gprs[offset]; +} + +static __always_inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static __always_inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return READ_ONCE_NOCHECK(*(unsigned long *)addr); +} /** * regs_get_kernel_argument() - get Nth function argument in kernel @@ -251,15 +290,10 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, return regs_get_kernel_stack_nth(regs, argoffset + n); } -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ - return regs->gprs[15]; -} - static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) { regs->gprs[2] = rc; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _S390_PTRACE_H */ diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h index e297bcfc476f..4c7a43bc43a1 100644 --- a/arch/s390/include/asm/purgatory.h +++ b/arch/s390/include/asm/purgatory.h @@ -7,11 +7,11 @@ #ifndef _S390_PURGATORY_H_ #define _S390_PURGATORY_H_ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/purgatory.h> int verify_sha256_digest(void); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _S390_PURGATORY_H_ */ diff --git a/arch/s390/include/asm/runtime-const.h b/arch/s390/include/asm/runtime-const.h new file mode 100644 index 000000000000..17878b1d048c --- /dev/null +++ b/arch/s390/include/asm/runtime-const.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_RUNTIME_CONST_H +#define _ASM_S390_RUNTIME_CONST_H + +#include <linux/uaccess.h> + +#define runtime_const_ptr(sym) \ +({ \ + typeof(sym) __ret; \ + \ + asm_inline( \ + "0: iihf %[__ret],%[c1]\n" \ + " iilf %[__ret],%[c2]\n" \ + ".pushsection runtime_ptr_" #sym ",\"a\"\n" \ + ".long 0b - .\n" \ + ".popsection" \ + : [__ret] "=d" (__ret) \ + : [c1] "i" (0x01234567UL), \ + [c2] "i" (0x89abcdefUL)); \ + __ret; \ +}) + +#define runtime_const_shift_right_32(val, sym) \ +({ \ + unsigned int __ret = (val); \ + \ + asm_inline( \ + "0: srl %[__ret],12\n" \ + ".pushsection runtime_shift_" #sym ",\"a\"\n" \ + ".long 0b - .\n" \ + ".popsection" \ + : [__ret] "+d" (__ret)); \ + __ret; \ +}) + +#define runtime_const_init(type, sym) do { \ + extern s32 __start_runtime_##type##_##sym[]; \ + extern s32 __stop_runtime_##type##_##sym[]; \ + \ + runtime_const_fixup(__runtime_fixup_##type, \ + (unsigned long)(sym), \ + __start_runtime_##type##_##sym, \ + __stop_runtime_##type##_##sym); \ +} while (0) + +/* 32-bit immediate for iihf and iilf in bits in I2 field */ +static inline void __runtime_fixup_32(u32 *p, unsigned int val) +{ + s390_kernel_write(p, &val, sizeof(val)); +} + +static inline void __runtime_fixup_ptr(void *where, unsigned long val) +{ + __runtime_fixup_32(where + 2, val >> 32); + __runtime_fixup_32(where + 8, val); +} + +/* Immediate value is lower 12 bits of D2 field of srl */ +static inline void __runtime_fixup_shift(void *where, unsigned long val) +{ + u32 insn = *(u32 *)where; + + insn &= 0xfffff000; + insn |= (val & 63); + s390_kernel_write(where, &insn, sizeof(insn)); +} + +static inline void runtime_const_fixup(void (*fn)(void *, unsigned long), + unsigned long val, s32 *start, s32 *end) +{ + while (start < end) { + fn(*start + (void *)start, val); + start++; + } +} + +#endif /* _ASM_S390_RUNTIME_CONST_H */ diff --git a/arch/s390/include/asm/rwonce.h b/arch/s390/include/asm/rwonce.h index 91fc24520e82..402325ec20f0 100644 --- a/arch/s390/include/asm/rwonce.h +++ b/arch/s390/include/asm/rwonce.h @@ -19,7 +19,7 @@ \ BUILD_BUG_ON(sizeof(x) != 16); \ asm volatile( \ - " lpq %[val],%[_x]\n" \ + " lpq %[val],%[_x]" \ : [val] "=d" (__u.val) \ : [_x] "QS" (x) \ : "memory"); \ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 5742d23bba13..0f184dbdbe5e 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -16,7 +16,12 @@ /* 24 + 16 * SCLP_MAX_CORES */ #define EXT_SCCB_READ_CPU (3 * PAGE_SIZE) -#ifndef __ASSEMBLY__ +#define SCLP_ERRNOTIFY_AQ_RESET 0 +#define SCLP_ERRNOTIFY_AQ_REPAIR 1 +#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2 +#define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3 + +#ifndef __ASSEMBLER__ #include <linux/uio.h> #include <asm/chpid.h> #include <asm/cpu.h> @@ -72,6 +77,7 @@ struct sclp_info { unsigned char has_core_type : 1; unsigned char has_sprp : 1; unsigned char has_hvs : 1; + unsigned char has_wti : 1; unsigned char has_esca : 1; unsigned char has_sief2 : 1; unsigned char has_64bscao : 1; @@ -84,9 +90,12 @@ struct sclp_info { unsigned char has_ibs : 1; unsigned char has_skey : 1; unsigned char has_kss : 1; + unsigned char has_diag204_bif : 1; unsigned char has_gisaf : 1; + unsigned char has_diag310 : 1; unsigned char has_diag318 : 1; unsigned char has_diag320 : 1; + unsigned char has_diag324 : 1; unsigned char has_sipl : 1; unsigned char has_sipl_eckd : 1; unsigned char has_dirq : 1; @@ -109,6 +118,34 @@ struct sclp_info { }; extern struct sclp_info sclp; +struct sccb_header { + u16 length; + u8 function_code; + u8 control_mask[3]; + u16 response_code; +} __packed; + +struct evbuf_header { + u16 length; + u8 type; + u8 flags; + u16 _reserved; +} __packed; + +struct err_notify_evbuf { + struct evbuf_header header; + u8 action; + u8 atype; + u32 fh; + u32 fid; + u8 data[]; +} __packed; + +struct err_notify_sccb { + struct sccb_header header; + struct err_notify_evbuf evbuf; +} __packed; + struct zpci_report_error_header { u8 version; /* Interface version byte */ u8 action; /* Action qualifier byte @@ -131,10 +168,12 @@ int sclp_early_read_storage_info(void); int sclp_early_get_core_info(struct sclp_core_info *info); void sclp_early_get_ipl_info(struct sclp_ipl_info *info); void sclp_early_detect(void); +void sclp_early_detect_machine_features(void); void sclp_early_printk(const char *s); void __sclp_early_printk(const char *s, unsigned int len); void sclp_emergency_printk(const char *s); +int sclp_init(void); int sclp_early_get_memsize(unsigned long *mem); int sclp_early_get_hsa_size(unsigned long *hsa_size); int _sclp_get_core_info(struct sclp_core_info *info); @@ -160,5 +199,5 @@ static inline int sclp_get_core_info(struct sclp_core_info *info, int early) return _sclp_get_core_info(info); } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h index 71d46f0ba97b..f904b674fee0 100644 --- a/arch/s390/include/asm/seccomp.h +++ b/arch/s390/include/asm/seccomp.h @@ -19,10 +19,5 @@ #define SECCOMP_ARCH_NATIVE AUDIT_ARCH_S390X #define SECCOMP_ARCH_NATIVE_NR NR_syscalls #define SECCOMP_ARCH_NATIVE_NAME "s390x" -#ifdef CONFIG_COMPAT -# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_S390 -# define SECCOMP_ARCH_COMPAT_NR NR_syscalls -# define SECCOMP_ARCH_COMPAT_NAME "s390" -#endif #endif /* _ASM_S390_SECCOMP_H */ diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h index 06fbabe2f66c..94092f4ae764 100644 --- a/arch/s390/include/asm/set_memory.h +++ b/arch/s390/include/asm/set_memory.h @@ -62,5 +62,7 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K) int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); +bool kernel_page_present(struct page *page); #endif diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 32f70873e2b7..7c57ac968bf6 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -13,27 +13,6 @@ #define PARMAREA 0x10400 #define COMMAND_LINE_SIZE CONFIG_COMMAND_LINE_SIZE -/* - * Machine features detected in early.c - */ - -#define MACHINE_FLAG_VM BIT(0) -#define MACHINE_FLAG_KVM BIT(1) -#define MACHINE_FLAG_LPAR BIT(2) -#define MACHINE_FLAG_DIAG9C BIT(3) -#define MACHINE_FLAG_ESOP BIT(4) -#define MACHINE_FLAG_IDTE BIT(5) -#define MACHINE_FLAG_EDAT1 BIT(7) -#define MACHINE_FLAG_EDAT2 BIT(8) -#define MACHINE_FLAG_TOPOLOGY BIT(10) -#define MACHINE_FLAG_TE BIT(11) -#define MACHINE_FLAG_TLB_LC BIT(12) -#define MACHINE_FLAG_TLB_GUEST BIT(14) -#define MACHINE_FLAG_NX BIT(15) -#define MACHINE_FLAG_GS BIT(16) -#define MACHINE_FLAG_SCC BIT(17) -#define MACHINE_FLAG_PCI_MIO BIT(18) -#define MACHINE_FLAG_RDP BIT(19) #define LPP_MAGIC BIT(31) #define LPP_PID_MASK _AC(0xffffffff, UL) @@ -45,7 +24,7 @@ #define LEGACY_COMMAND_LINE_SIZE 896 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/lowcore.h> #include <asm/types.h> @@ -62,6 +41,8 @@ struct parmarea { char command_line[COMMAND_LINE_SIZE]; /* 0x10480 */ }; +extern char arch_hw_string[128]; + extern struct parmarea parmarea; extern unsigned int zlib_dfltcc_support; @@ -77,25 +58,6 @@ extern unsigned long max_mappable; /* The Write Back bit position in the physaddr is given by the SLPC PCI */ extern unsigned long mio_wb_bit_mask; -#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) -#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) -#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) - -#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) -#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) -#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) -#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) -#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) -#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) -#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) -#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) -#define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST) -#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX) -#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS) -#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC) -#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO) -#define MACHINE_HAS_RDP (S390_lowcore.machine_flags & MACHINE_FLAG_RDP) - /* * Console mode. Override with conmode= */ @@ -115,6 +77,8 @@ extern unsigned int console_irq; #define SET_CONSOLE_VT220 do { console_mode = 4; } while (0) #define SET_CONSOLE_HVC do { console_mode = 5; } while (0) +void register_early_console(void); + #ifdef CONFIG_VMCP void vmcp_cma_reserve(void); #else @@ -138,5 +102,5 @@ static __always_inline u32 gen_lpswe(unsigned long addr) BUILD_BUG_ON(addr > 0xfff); return 0xb2b20000 | addr; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_SETUP_H */ diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index edee63da08e7..97d77868f83c 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -36,7 +36,9 @@ #define SIGP_STATUS_INCORRECT_STATE 0x00000200UL #define SIGP_STATUS_NOT_RUNNING 0x00000400UL -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ + +#include <asm/asm.h> static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm, u32 *status) @@ -46,13 +48,12 @@ static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm, asm volatile( " sigp %[r1],%[addr],0(%[order])\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (cc), [r1] "+&d" (r1.pair) + CC_IPM(cc) + : CC_OUT(cc, cc), [r1] "+d" (r1.pair) : [addr] "d" (addr), [order] "a" (order) - : "cc"); + : CC_CLOBBER); *status = r1.even; - return cc; + return CC_TRANSFORM(cc); } static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm, @@ -67,6 +68,6 @@ static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm, return cc; } -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __S390_ASM_SIGP_H */ diff --git a/arch/s390/include/asm/skey.h b/arch/s390/include/asm/skey.h new file mode 100644 index 000000000000..84e7cf28b712 --- /dev/null +++ b/arch/s390/include/asm/skey.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SKEY_H +#define __ASM_SKEY_H + +#include <asm/rwonce.h> + +struct skey_region { + unsigned long start; + unsigned long end; +}; + +#define SKEY_REGION(_start, _end) \ + stringify_in_c(.section .skey_region,"a";) \ + stringify_in_c(.balign 8;) \ + stringify_in_c(.quad (_start);) \ + stringify_in_c(.quad (_end);) \ + stringify_in_c(.previous) + +extern int skey_regions_initialized; +extern struct skey_region __skey_region_start[]; +extern struct skey_region __skey_region_end[]; + +void __skey_regions_initialize(void); + +static inline void skey_regions_initialize(void) +{ + if (READ_ONCE(skey_regions_initialized)) + return; + __skey_regions_initialize(); +} + +#endif /* __ASM_SKEY_H */ diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 6e5b1b4b19a9..fb2bdbf35da5 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -7,11 +7,30 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H -#include <asm/sigp.h> -#include <asm/lowcore.h> #include <asm/processor.h> +#include <asm/lowcore.h> +#include <asm/machine.h> +#include <asm/sigp.h> + +static __always_inline unsigned int raw_smp_processor_id(void) +{ + unsigned long lc_cpu_nr; + unsigned int cpu; + + BUILD_BUG_ON(sizeof_field(struct lowcore, cpu_nr) != sizeof(cpu)); + lc_cpu_nr = offsetof(struct lowcore, cpu_nr); + asm_inline( + ALTERNATIVE(" ly %[cpu],%[offzero](%%r0)\n", + " ly %[cpu],%[offalt](%%r0)\n", + ALT_FEATURE(MFEATURE_LOWCORE)) + : [cpu] "=d" (cpu) + : [offzero] "i" (lc_cpu_nr), + [offalt] "i" (lc_cpu_nr + LOWCORE_ALT_ADDRESS), + "m" (((struct lowcore *)0)->cpu_nr)); + return cpu; +} -#define raw_smp_processor_id() (S390_lowcore.cpu_nr) +#define arch_scale_cpu_capacity smp_cpu_get_capacity extern struct mutex smp_cpu_state_mutex; extern unsigned int smp_cpu_mt_shift; @@ -24,8 +43,7 @@ extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -extern void smp_call_online_cpu(void (*func)(void *), void *); -extern void smp_call_ipl_cpu(void (*func)(void *), void *); +extern void __noreturn smp_call_ipl_cpu(void (*func)(void *), void *data); extern void smp_emergency_stop(void); extern int smp_find_processor_id(u16 address); @@ -35,6 +53,9 @@ extern void smp_save_dump_secondary_cpus(void); extern void smp_yield_cpu(int cpu); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); +extern void smp_cpu_set_capacity(int cpu, unsigned long val); +extern void smp_set_core_capacity(int cpu, unsigned long val); +extern unsigned long smp_cpu_get_capacity(int cpu); extern int smp_cpu_get_cpu_address(int cpu); extern void smp_fill_possible_mask(void); extern void smp_detect_cpus(void); @@ -59,7 +80,7 @@ static inline void smp_cpus_done(unsigned int max_cpus) { } -extern int smp_rescan_cpus(void); +extern int smp_rescan_cpus(bool early); extern void __noreturn cpu_die(void); extern void __cpu_die(unsigned int cpu); extern int __cpu_disable(void); diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h index 1ac5115d3115..42d61296bbad 100644 --- a/arch/s390/include/asm/softirq_stack.h +++ b/arch/s390/include/asm/softirq_stack.h @@ -8,7 +8,7 @@ #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK static inline void do_softirq_own_stack(void) { - call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq); + call_on_stack(0, get_lowcore()->async_stack, void, __do_softirq); } #endif #endif /* __ASM_S390_SOFTIRQ_STACK_H */ diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h index c549893602ea..668dfc5de538 100644 --- a/arch/s390/include/asm/sparsemem.h +++ b/arch/s390/include/asm/sparsemem.h @@ -2,7 +2,23 @@ #ifndef _ASM_S390_SPARSEMEM_H #define _ASM_S390_SPARSEMEM_H -#define SECTION_SIZE_BITS 28 +#define SECTION_SIZE_BITS 27 #define MAX_PHYSMEM_BITS CONFIG_MAX_PHYSMEM_BITS +#ifdef CONFIG_NUMA + +static inline int memory_add_physaddr_to_nid(u64 addr) +{ + return 0; +} +#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid + +static inline int phys_to_target_node(u64 start) +{ + return 0; +} +#define phys_to_target_node phys_to_target_node + +#endif /* CONFIG_NUMA */ + #endif /* _ASM_S390_SPARSEMEM_H */ diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 37127cd7749e..b06b183b7246 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -16,7 +16,23 @@ #include <asm/processor.h> #include <asm/alternative.h> -#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) +static __always_inline unsigned int spinlock_lockval(void) +{ + unsigned long lc_lockval; + unsigned int lockval; + + BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval)); + lc_lockval = offsetof(struct lowcore, spinlock_lockval); + asm_inline( + ALTERNATIVE(" ly %[lockval],%[offzero](%%r0)\n", + " ly %[lockval],%[offalt](%%r0)\n", + ALT_FEATURE(MFEATURE_LOWCORE)) + : [lockval] "=d" (lockval) + : [offzero] "i" (lc_lockval), + [offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS), + "m" (((struct lowcore *)0)->spinlock_lockval)); + return lockval; +} extern int spin_retry; @@ -57,8 +73,10 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp) static inline int arch_spin_trylock_once(arch_spinlock_t *lp) { + int old = 0; + barrier(); - return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); + return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval())); } static inline void arch_spin_lock(arch_spinlock_t *lp) @@ -79,10 +97,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) typecheck(int, lp->lock); kcsan_release(); asm_inline volatile( - ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ - " sth %1,%0\n" - : "=R" (((unsigned short *) &lp->lock)[1]) - : "d" (0) : "cc", "memory"); + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */ + " mvhhi %[lock],0" + : [lock] "=Q" (((unsigned short *)&lp->lock)[1]) + : + : "memory"); } /* @@ -118,7 +137,9 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_write_lock(arch_rwlock_t *rw) { - if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000)) + int old = 0; + + if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000)) arch_write_lock_wait(rw); } @@ -133,8 +154,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) int old; old = READ_ONCE(rw->cnts); - return (!(old & 0xffff0000) && - __atomic_cmpxchg_bool(&rw->cnts, old, old + 1)); + return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1)); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -142,7 +162,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) int old; old = READ_ONCE(rw->cnts); - return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000); + return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000); } #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index b69695e39957..3653ff57d6d9 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -3,7 +3,7 @@ #define __ASM_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif typedef struct { diff --git a/arch/s390/include/asm/stackprotector.h b/arch/s390/include/asm/stackprotector.h new file mode 100644 index 000000000000..0497850103dd --- /dev/null +++ b/arch/s390/include/asm/stackprotector.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_STACKPROTECTOR_H +#define _ASM_S390_STACKPROTECTOR_H + +#include <linux/sched.h> +#include <asm/current.h> +#include <asm/lowcore.h> + +static __always_inline void boot_init_stack_canary(void) +{ + current->stack_canary = get_random_canary(); + get_lowcore()->stack_canary = current->stack_canary; +} + +#endif /* _ASM_S390_STACKPROTECTOR_H */ diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 85b6738b826a..c9ae680a28af 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -65,6 +65,8 @@ struct stack_frame { unsigned long sie_reason; unsigned long sie_flags; unsigned long sie_control_block_phys; + unsigned long sie_guest_asce; + unsigned long sie_irq; }; }; unsigned long gprs[10]; @@ -198,7 +200,7 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task, " lg 15,%[_stack]\n" \ " stg %[_frame],%[_bc](15)\n" \ " brasl 14,%[_fn]\n" \ - " lgr 15,%[_prev]\n" \ + " lgr 15,%[_prev]" \ : [_prev] "=&d" (prev), CALL_FMT_##nr \ : [_stack] "R" (__stack), \ [_bc] "i" (offsetof(struct stack_frame, back_chain)), \ @@ -249,7 +251,7 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task, " lra 14,0(1)\n" \ " lpswe %[psw_enter]\n" \ "0: lpswe 0(7)\n" \ - "1:\n" \ + "1:" \ : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \ : [psw_enter] "Q" (psw_enter) \ : "7", CALL_CLOBBER_##nr); \ diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h index 4d74d7e33340..827cb208de86 100644 --- a/arch/s390/include/asm/stp.h +++ b/arch/s390/include/asm/stp.h @@ -94,5 +94,6 @@ struct stp_stzi { int stp_sync_check(void); int stp_island_check(void); void stp_queue_work(void); +bool stp_enabled(void); #endif /* __S390_STP_H */ diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index 351685de53d2..238e721e5a22 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -15,36 +15,33 @@ #define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */ #define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */ #define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */ -#define __HAVE_ARCH_MEMSET16 /* arch function */ -#define __HAVE_ARCH_MEMSET32 /* arch function */ -#define __HAVE_ARCH_MEMSET64 /* arch function */ void *memcpy(void *dest, const void *src, size_t n); void *memset(void *s, int c, size_t n); void *memmove(void *dest, const void *src, size_t n); -#ifndef CONFIG_KASAN +#if !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN) #define __HAVE_ARCH_MEMCHR /* inline & arch function */ #define __HAVE_ARCH_MEMCMP /* arch function */ #define __HAVE_ARCH_MEMSCAN /* inline & arch function */ #define __HAVE_ARCH_STRCAT /* inline & arch function */ #define __HAVE_ARCH_STRCMP /* arch function */ -#define __HAVE_ARCH_STRCPY /* inline & arch function */ #define __HAVE_ARCH_STRLCAT /* arch function */ #define __HAVE_ARCH_STRLEN /* inline & arch function */ #define __HAVE_ARCH_STRNCAT /* arch function */ -#define __HAVE_ARCH_STRNCPY /* arch function */ #define __HAVE_ARCH_STRNLEN /* inline & arch function */ #define __HAVE_ARCH_STRSTR /* arch function */ +#define __HAVE_ARCH_MEMSET16 /* arch function */ +#define __HAVE_ARCH_MEMSET32 /* arch function */ +#define __HAVE_ARCH_MEMSET64 /* arch function */ /* Prototypes for non-inlined arch strings functions. */ int memcmp(const void *s1, const void *s2, size_t n); int strcmp(const char *s1, const char *s2); size_t strlcat(char *dest, const char *src, size_t n); char *strncat(char *dest, const char *src, size_t n); -char *strncpy(char *dest, const char *src, size_t n); char *strstr(const char *s1, const char *s2); -#endif /* !CONFIG_KASAN */ +#endif /* !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN) */ #undef __HAVE_ARCH_STRCHR #undef __HAVE_ARCH_STRNCHR @@ -74,20 +71,30 @@ void *__memset16(uint16_t *s, uint16_t v, size_t count); void *__memset32(uint32_t *s, uint32_t v, size_t count); void *__memset64(uint64_t *s, uint64_t v, size_t count); +#ifdef __HAVE_ARCH_MEMSET16 static inline void *memset16(uint16_t *s, uint16_t v, size_t count) { return __memset16(s, v, count * sizeof(v)); } +#endif +#ifdef __HAVE_ARCH_MEMSET32 static inline void *memset32(uint32_t *s, uint32_t v, size_t count) { return __memset32(s, v, count * sizeof(v)); } +#endif +#ifdef __HAVE_ARCH_MEMSET64 +#ifdef IN_BOOT_STRING_C +void *memset64(uint64_t *s, uint64_t v, size_t count); +#else static inline void *memset64(uint64_t *s, uint64_t v, size_t count) { return __memset64(s, v, count * sizeof(v)); } +#endif +#endif #if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY)) @@ -118,7 +125,7 @@ static inline void *memscan(void *s, int c, size_t n) asm volatile( " lgr 0,%[c]\n" "0: srst %[ret],%[s]\n" - " jo 0b\n" + " jo 0b" : [ret] "+&a" (ret), [s] "+&a" (s) : [c] "d" (c) : "cc", "memory", "0"); @@ -145,22 +152,6 @@ static inline char *strcat(char *dst, const char *src) } #endif -#ifdef __HAVE_ARCH_STRCPY -static inline char *strcpy(char *dst, const char *src) -{ - char *ret = dst; - - asm volatile( - " lghi 0,0\n" - "0: mvst %[dst],%[src]\n" - " jo 0b" - : [dst] "+&a" (dst), [src] "+&a" (src) - : - : "cc", "memory", "0"); - return ret; -} -#endif - #if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s) { @@ -198,7 +189,6 @@ static inline size_t strnlen(const char * s, size_t n) void *memchr(const void * s, int c, size_t n); void *memscan(void *s, int c, size_t n); char *strcat(char *dst, const char *src); -char *strcpy(char *dst, const char *src); size_t strlen(const char *s); size_t strnlen(const char * s, size_t n); #endif /* !IN_ARCH_STRING_C */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 27e3d804b311..4271e4169f45 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -15,7 +15,6 @@ #include <asm/ptrace.h> extern const sys_call_ptr_t sys_call_table[]; -extern const sys_call_ptr_t sys_call_table_emu[]; static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) @@ -24,6 +23,18 @@ static inline long syscall_get_nr(struct task_struct *task, (regs->int_code & 0xffff) : -1; } +static inline void syscall_set_nr(struct task_struct *task, + struct pt_regs *regs, + int nr) +{ + /* + * Unlike syscall_get_nr(), syscall_set_nr() can be called only when + * the target task is stopped for tracing on entering syscall, so + * there is no need to have the same check syscall_get_nr() has. + */ + regs->int_code = (regs->int_code & ~0xffff) | (nr & 0xffff); +} + static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { @@ -34,15 +45,7 @@ static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { unsigned long error = regs->gprs[2]; -#ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(task, TIF_31BIT)) { - /* - * Sign-extend the value so (int)-EFOO becomes (long)-EFOO - * and will match correctly in comparisons. - */ - error = (long)(int)error; - } -#endif + return IS_ERR_VALUE(error) ? error : 0; } @@ -65,25 +68,24 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { unsigned long mask = -1UL; - unsigned int n = 6; -#ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(task, TIF_31BIT)) - mask = 0xffffffff; -#endif - while (n-- > 0) - if (n > 0) - args[n] = regs->gprs[2 + n] & mask; + for (int i = 1; i < 6; i++) + args[i] = regs->gprs[2 + i] & mask; args[0] = regs->orig_gpr2 & mask; } +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + regs->orig_gpr2 = args[0]; + for (int n = 1; n < 6; n++) + regs->gprs[2 + n] = args[n]; +} + static inline int syscall_get_arch(struct task_struct *task) { -#ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(task, TIF_31BIT)) - return AUDIT_ARCH_S390; -#endif return AUDIT_ARCH_S390X; } @@ -136,7 +138,7 @@ long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr) \ SYSCALL_REGS_##nr; \ \ asm volatile ( \ - " svc 0\n" \ + " svc 0" \ : "=d" (rc) \ : "d" (r1) SYSCALL_FMT_##nr \ : "memory"); \ diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h index 35c1d1b860d8..9eb58d5348d8 100644 --- a/arch/s390/include/asm/syscall_wrapper.h +++ b/arch/s390/include/asm/syscall_wrapper.h @@ -13,101 +13,12 @@ ,, regs->orig_gpr2,, regs->gprs[3],, regs->gprs[4] \ ,, regs->gprs[5],, regs->gprs[6],, regs->gprs[7]) -#ifdef CONFIG_COMPAT - -#define __SC_COMPAT_CAST(t, a) \ -({ \ - long __ReS = a; \ - \ - BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \ - !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t) && \ - !__TYPE_IS_LL(t)); \ - if (__TYPE_IS_L(t)) \ - __ReS = (s32)a; \ - if (__TYPE_IS_UL(t)) \ - __ReS = (u32)a; \ - if (__TYPE_IS_PTR(t)) \ - __ReS = a & 0x7fffffff; \ - if (__TYPE_IS_LL(t)) \ - return -ENOSYS; \ - (t)__ReS; \ -}) - -/* - * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias - * named __s390x_sys_*() - */ -#define COMPAT_SYSCALL_DEFINE0(sname) \ - long __s390_compat_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__s390_compat_sys_##sname, ERRNO); \ - long __s390_compat_sys_##sname(void) - -#define SYSCALL_DEFINE0(sname) \ - SYSCALL_METADATA(_##sname, 0); \ - long __s390_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__s390_sys_##sname, ERRNO); \ - long __s390x_sys_##sname(void); \ - ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ - static inline long __do_sys_##sname(void); \ - long __s390_sys_##sname(void) \ - { \ - return __do_sys_##sname(); \ - } \ - long __s390x_sys_##sname(void) \ - { \ - return __do_sys_##sname(); \ - } \ - static inline long __do_sys_##sname(void) - -#define COND_SYSCALL(name) \ - cond_syscall(__s390x_sys_##name); \ - cond_syscall(__s390_sys_##name) - -#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ - long __s390_compat_sys##name(struct pt_regs *regs); \ - ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \ - static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \ - static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \ - long __s390_compat_sys##name(struct pt_regs *regs) \ - { \ - return __se_compat_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \ - } \ - static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \ - { \ - __MAP(x, __SC_TEST, __VA_ARGS__); \ - return __do_compat_sys##name(__MAP(x, __SC_DELOUSE, __VA_ARGS__)); \ - } \ - static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)) - -/* - * As some compat syscalls may not be implemented, we need to expand - * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well. - */ -#define COND_SYSCALL_COMPAT(name) \ - cond_syscall(__s390_compat_sys_##name) - -#define __S390_SYS_STUBx(x, name, ...) \ - long __s390_sys##name(struct pt_regs *regs); \ - ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \ - static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \ - long __s390_sys##name(struct pt_regs *regs) \ - { \ - return ___se_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \ - } \ - static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \ - { \ - __MAP(x, __SC_TEST, __VA_ARGS__); \ - return __do_sys##name(__MAP(x, __SC_COMPAT_CAST, __VA_ARGS__)); \ - } - -#else /* CONFIG_COMPAT */ - #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ - long __s390x_sys_##sname(void); \ + long __s390x_sys_##sname(struct pt_regs *__unused); \ ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ static inline long __do_sys_##sname(void); \ - long __s390x_sys_##sname(void) \ + long __s390x_sys_##sname(struct pt_regs *__unused) \ { \ return __do_sys_##sname(); \ } \ @@ -118,8 +29,6 @@ #define __S390_SYS_STUBx(x, fullname, name, ...) -#endif /* CONFIG_COMPAT */ - #define __SYSCALL_DEFINEx(x, name, ...) \ long __s390x_sys##name(struct pt_regs *regs); \ ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \ diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index edca5a751df4..9088c5267f35 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -11,8 +11,34 @@ #ifndef __ASM_S390_SYSINFO_H #define __ASM_S390_SYSINFO_H -#include <asm/bitsperlong.h> #include <linux/uuid.h> +#include <asm/bitsperlong.h> +#include <asm/asm.h> + +/* + * stsi - store system information + * + * Returns the current configuration level if function code 0 was specified. + * Otherwise returns 0 on success or a negative value on error. + */ +static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) +{ + int r0 = (fc << 28) | sel1; + int cc; + + asm volatile( + " lr %%r0,%[r0]\n" + " lr %%r1,%[r1]\n" + " stsi %[sysinfo]\n" + " lr %[r0],%%r0\n" + CC_IPM(cc) + : CC_OUT(cc, cc), [r0] "+d" (r0), [sysinfo] "=Q" (*(char *)sysinfo) + : [r1] "d" (sel2) + : CC_CLOBBER_LIST("0", "1", "memory")); + if (cc == 3) + return -EOPNOTSUPP; + return fc ? 0 : (unsigned int)r0 >> 28; +} struct sysinfo_1_1_1 { unsigned char p:1; diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index a674c7d25da5..6a548a819400 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -9,14 +9,12 @@ #define _ASM_THREAD_INFO_H #include <linux/bits.h> -#ifndef ASM_OFFSETS_C -#include <asm/asm-offsets.h> -#endif +#include <vdso/page.h> /* * General size of kernel stacks */ -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN) || defined(CONFIG_KMSAN) #define THREAD_SIZE_ORDER 4 #else #define THREAD_SIZE_ORDER 2 @@ -26,9 +24,7 @@ #define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) -#ifndef __ASSEMBLY__ -#include <asm/lowcore.h> -#include <asm/page.h> +#ifndef __ASSEMBLER__ /* * low level task data that entry.S needs immediate access to @@ -40,6 +36,7 @@ struct thread_info { unsigned long flags; /* low level flags */ unsigned long syscall_work; /* SYSCALL_WORK_ flags */ unsigned int cpu; /* current CPU */ + unsigned char sie; /* running in SIE context */ }; /* @@ -59,48 +56,29 @@ void arch_setup_new_exec(void); /* * thread information flags bit numbers + * + * Tell the generic TIF infrastructure which special bits s390 supports */ -/* _TIF_WORK bits */ -#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ -#define TIF_SIGPENDING 1 /* signal pending */ -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_UPROBE 3 /* breakpointed or single-stepping */ -#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ -#define TIF_PATCH_PENDING 5 /* pending live patching update */ -#define TIF_PGSTE 6 /* New mm's will use 4K page tables */ -#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ -#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ -#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */ +#define HAVE_TIF_NEED_RESCHED_LAZY +#define HAVE_TIF_RESTORE_SIGMASK -#define TIF_31BIT 16 /* 32bit process */ -#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ -#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ -#define TIF_SINGLE_STEP 19 /* This task is single stepped */ -#define TIF_BLOCK_STEP 20 /* This task is block stepped */ -#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ +#include <asm-generic/thread_info_tif.h> -/* _TIF_TRACE bits */ -#define TIF_SYSCALL_TRACE 24 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */ -#define TIF_SECCOMP 26 /* secure computing */ -#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ +/* Architecture specific bits */ +#define TIF_ASCE_PRIMARY 16 /* primary asce is kernel asce */ +#define TIF_GUARDED_STORAGE 17 /* load guarded storage control block */ +#define TIF_ISOLATE_BP_GUEST 18 /* Run KVM guests with isolated BP */ +#define TIF_PER_TRAP 19 /* Need to handle PER trap on exit to usermode */ +#define TIF_SINGLE_STEP 21 /* This task is single stepped */ +#define TIF_BLOCK_STEP 22 /* This task is block stepped */ +#define TIF_UPROBE_SINGLESTEP 23 /* This task is uprobe single stepped */ -#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) -#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) -#define _TIF_SIGPENDING BIT(TIF_SIGPENDING) -#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) -#define _TIF_UPROBE BIT(TIF_UPROBE) +#define _TIF_ASCE_PRIMARY BIT(TIF_ASCE_PRIMARY) #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) -#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) #define _TIF_PER_TRAP BIT(TIF_PER_TRAP) - -#define _TIF_31BIT BIT(TIF_31BIT) #define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP) - -#define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE) -#define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT) -#define _TIF_SECCOMP BIT(TIF_SECCOMP) -#define _TIF_SYSCALL_TRACEPOINT BIT(TIF_SYSCALL_TRACEPOINT) +#define _TIF_BLOCK_STEP BIT(TIF_BLOCK_STEP) +#define _TIF_UPROBE_SINGLESTEP BIT(TIF_UPROBE_SINGLESTEP) #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 4d646659a5f5..49447b40f038 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -13,6 +13,8 @@ #include <linux/preempt.h> #include <linux/time64.h> #include <asm/lowcore.h> +#include <asm/machine.h> +#include <asm/asm.h> /* The value of the TOD clock for 1.1.1970. */ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL @@ -44,11 +46,12 @@ static inline int set_tod_clock(__u64 time) int cc; asm volatile( - " sck %1\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) : "Q" (time) : "cc"); - return cc; + " sck %[time]\n" + CC_IPM(cc) + : CC_OUT(cc, cc) + : [time] "Q" (time) + : CC_CLOBBER); + return CC_TRANSFORM(cc); } static inline int store_tod_clock_ext_cc(union tod_clock *clk) @@ -56,11 +59,12 @@ static inline int store_tod_clock_ext_cc(union tod_clock *clk) int cc; asm volatile( - " stcke %1\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc), "=Q" (*clk) : : "cc"); - return cc; + " stcke %[clk]\n" + CC_IPM(cc) + : CC_OUT(cc, cc), [clk] "=Q" (*clk) + : + : CC_CLOBBER); + return CC_TRANSFORM(cc); } static __always_inline void store_tod_clock_ext(union tod_clock *tod) @@ -77,7 +81,7 @@ static inline void set_tod_programmable_field(u16 val) { asm volatile( " lgr 0,%[val]\n" - " sckpf\n" + " sckpf" : : [val] "d" ((unsigned long)val) : "0"); @@ -93,6 +97,7 @@ extern unsigned char ptff_function_mask[16]; #define PTFF_QAF 0x00 /* query available functions */ #define PTFF_QTO 0x01 /* query tod offset */ #define PTFF_QSI 0x02 /* query steering information */ +#define PTFF_QPT 0x03 /* query physical clock */ #define PTFF_QUI 0x04 /* query UTC information */ #define PTFF_ATO 0x40 /* adjust tod offset */ #define PTFF_STO 0x41 /* set tod offset */ @@ -149,28 +154,27 @@ struct ptff_qui { " lgr 0,%[reg0]\n" \ " lgr 1,%[reg1]\n" \ " ptff\n" \ - " ipm %[rc]\n" \ - " srl %[rc],28\n" \ - : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \ + CC_IPM(rc) \ + : CC_OUT(rc, rc), "+m" (*(struct addrtype *)reg1) \ : [reg0] "d" (reg0), [reg1] "d" (reg1) \ - : "cc", "0", "1"); \ - rc; \ + : CC_CLOBBER_LIST("0", "1")); \ + CC_TRANSFORM(rc); \ }) static inline unsigned long local_tick_disable(void) { unsigned long old; - old = S390_lowcore.clock_comparator; - S390_lowcore.clock_comparator = clock_comparator_max; - set_clock_comparator(S390_lowcore.clock_comparator); + old = get_lowcore()->clock_comparator; + get_lowcore()->clock_comparator = clock_comparator_max; + set_clock_comparator(get_lowcore()->clock_comparator); return old; } static inline void local_tick_enable(unsigned long comp) { - S390_lowcore.clock_comparator = comp; - set_clock_comparator(S390_lowcore.clock_comparator); + get_lowcore()->clock_comparator = comp; + set_clock_comparator(get_lowcore()->clock_comparator); } #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ @@ -192,13 +196,6 @@ static inline unsigned long get_tod_clock_fast(void) asm volatile("stckf %0" : "=Q" (clk) : : "cc"); return clk; } - -static inline cycles_t get_cycles(void) -{ - return (cycles_t) get_tod_clock() >> 2; -} -#define get_cycles get_cycles - int get_phys_clock(unsigned long *clock); void init_cpu_timer(void); @@ -226,6 +223,12 @@ static inline unsigned long get_tod_clock_monotonic(void) return tod; } +static inline cycles_t get_cycles(void) +{ + return (cycles_t)get_tod_clock_monotonic() >> 2; +} +#define get_cycles get_cycles + /** * tod_to_ns - convert a TOD format value to nanoseconds * @todval: to be converted TOD format value @@ -250,6 +253,11 @@ static __always_inline unsigned long tod_to_ns(unsigned long todval) return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); } +static __always_inline u128 eitod_to_ns(u128 todval) +{ + return (todval * 125) >> 9; +} + /** * tod_after - compare two 64 bit TOD values * @a: first 64 bit TOD timestamp @@ -259,7 +267,7 @@ static __always_inline unsigned long tod_to_ns(unsigned long todval) */ static inline int tod_after(unsigned long a, unsigned long b) { - if (MACHINE_HAS_SCC) + if (machine_has_scc()) return (long) a > (long) b; return a > b; } @@ -273,7 +281,7 @@ static inline int tod_after(unsigned long a, unsigned long b) */ static inline int tod_after_eq(unsigned long a, unsigned long b) { - if (MACHINE_HAS_SCC) + if (machine_has_scc()) return (long) a >= (long) b; return a >= b; } diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index e95b2c8081eb..1e50f6f1ad9d 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -22,7 +22,6 @@ * Pages used for the page tables is a different story. FIXME: more */ -void __tlb_remove_table(void *_table); static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, bool delay_rmap, int page_size); @@ -37,11 +36,12 @@ static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, #include <asm/tlbflush.h> #include <asm-generic/tlb.h> +#include <asm/gmap.h> /* * Release the page cache reference for a pte removed by * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page - * has already been freed, so just do free_page_and_swap_cache. + * has already been freed, so just do free_folio_and_swap_cache. * * s390 doesn't delay rmap removal. */ @@ -50,7 +50,7 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, { VM_WARN_ON_ONCE(delay_rmap); - free_page_and_swap_cache(page); + free_folio_and_swap_cache(page_folio(page)); return false; } @@ -85,9 +85,9 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_pmds = 1; - if (mm_alloc_pgste(tlb->mm)) + if (mm_has_pgste(tlb->mm)) gmap_unlink(tlb->mm, (unsigned long *)pte, address); - tlb_remove_ptdesc(tlb, pte); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte)); } /* @@ -102,12 +102,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, { if (mm_pmd_folded(tlb->mm)) return; - pagetable_pmd_dtor(virt_to_ptdesc(pmd)); __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_puds = 1; - tlb_remove_ptdesc(tlb, pmd); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); } /* @@ -125,7 +124,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; - tlb_remove_ptdesc(tlb, p4d); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); } /* @@ -140,11 +139,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, { if (mm_pud_folded(tlb->mm)) return; + __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_p4ds = 1; - tlb_remove_ptdesc(tlb, pud); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); } - #endif /* _S390_TLB_H */ diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index a6e2cd89b609..163ccbbe8c47 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -2,9 +2,11 @@ #ifndef _S390_TLBFLUSH_H #define _S390_TLBFLUSH_H +#include <linux/cpufeature.h> #include <linux/mm.h> #include <linux/sched.h> #include <asm/processor.h> +#include <asm/machine.h> /* * Flush all TLB entries on the local CPU. @@ -22,7 +24,7 @@ static inline void __tlb_flush_idte(unsigned long asce) unsigned long opt; opt = IDTE_PTOA; - if (MACHINE_HAS_TLB_GUEST) + if (machine_has_tlb_guest()) opt |= IDTE_GUEST_ASCE; /* Global TLB flush for the mm */ asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); @@ -33,9 +35,9 @@ static inline void __tlb_flush_idte(unsigned long asce) */ static inline void __tlb_flush_global(void) { - unsigned int dummy = 0; + unsigned long dummy = 0; - csp(&dummy, 0, 0); + cspg(&dummy, 0, 0); } /* @@ -46,18 +48,13 @@ static inline void __tlb_flush_mm(struct mm_struct *mm) { unsigned long gmap_asce; - /* - * If the machine has IDTE we prefer to do a per mm flush - * on all cpus instead of doing a local flush if the mm - * only ran on the local cpu. - */ preempt_disable(); atomic_inc(&mm->context.flush_count); /* Reset TLB flush mask */ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); barrier(); gmap_asce = READ_ONCE(mm->context.gmap_asce); - if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { + if (gmap_asce != -1UL) { if (gmap_asce) __tlb_flush_idte(gmap_asce); __tlb_flush_idte(mm->context.asce); @@ -71,10 +68,7 @@ static inline void __tlb_flush_mm(struct mm_struct *mm) static inline void __tlb_flush_kernel(void) { - if (MACHINE_HAS_IDTE) - __tlb_flush_idte(init_mm.context.asce); - else - __tlb_flush_global(); + __tlb_flush_idte(init_mm.context.asce); } static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) @@ -89,7 +83,6 @@ static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) /* * TLB flushing: - * flush_tlb() - flushes the current mm struct TLBs * flush_tlb_all() - flushes all processes TLBs * flush_tlb_mm(mm) - flushes the specified mm context TLB's * flush_tlb_page(vma, vmaddr) - flushes one page @@ -105,7 +98,6 @@ static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) * only one user. At the end of the update the flush_tlb_mm and * flush_tlb_range functions need to do the flush. */ -#define flush_tlb() do { } while (0) #define flush_tlb_all() do { } while (0) #define flush_tlb_page(vma, addr) do { } while (0) diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 3a0ac0c7a9a3..44110847342a 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -61,12 +61,21 @@ static inline void topology_expect_change(void) { } #endif /* CONFIG_SCHED_TOPOLOGY */ +static inline bool topology_is_primary_thread(unsigned int cpu) +{ + return smp_get_base_cpu(cpu) == cpu; +} +#define topology_is_primary_thread topology_is_primary_thread + #define POLARIZATION_UNKNOWN (-1) #define POLARIZATION_HRZ (0) #define POLARIZATION_VL (1) #define POLARIZATION_VM (2) #define POLARIZATION_VH (3) +#define CPU_CAPACITY_HIGH SCHED_CAPACITY_SCALE +#define CPU_CAPACITY_LOW (SCHED_CAPACITY_SCALE >> 3) + #define SD_BOOK_INIT SD_CPU_INIT #ifdef CONFIG_NUMA diff --git a/arch/s390/include/asm/tpi.h b/arch/s390/include/asm/tpi.h index f76e5fdff23a..71c8b6f76cdd 100644 --- a/arch/s390/include/asm/tpi.h +++ b/arch/s390/include/asm/tpi.h @@ -5,7 +5,7 @@ #include <linux/types.h> #include <uapi/asm/schid.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */ struct tpi_info { @@ -32,6 +32,6 @@ struct tpi_adapter_info { u32 :27; } __packed __aligned(4); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_TPI_H */ diff --git a/arch/s390/include/asm/trace/ap.h b/arch/s390/include/asm/trace/ap.h new file mode 100644 index 000000000000..5c2e6c664b4d --- /dev/null +++ b/arch/s390/include/asm/trace/ap.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Tracepoint definitions for s390 ap bus related trace events + * + * There are two AP bus related tracepoint events defined here: + * There is a tracepoint s390_ap_nqap event immediately after a request + * has been pushed into the AP firmware queue with the NQAP AP command. + * The other tracepoint s390_ap_dqap event fires immediately after a + * reply has been pulled out of the AP firmware queue via DQAP AP command. + * The idea of these two trace events focuses on performance to measure + * the runtime of a crypto request/reply as close as possible at the + * firmware level. In combination with the two zcrypt tracepoints (see the + * zcrypt.h trace event definition file) this gives measurement data about + * the runtime of a request/reply within the zcrpyt and AP bus layer. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM s390 + +#if !defined(_TRACE_S390_AP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_S390_AP_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(s390_ap_nqapdqap_template, + TP_PROTO(u16 card, u16 dom, u32 status, u64 psmid), + TP_ARGS(card, dom, status, psmid), + TP_STRUCT__entry( + __field(u16, card) + __field(u16, dom) + __field(u32, status) + __field(u64, psmid)), + TP_fast_assign( + __entry->card = card; + __entry->dom = dom; + __entry->status = status; + __entry->psmid = psmid;), + TP_printk("card=%u dom=%u status=0x%08x psmid=0x%016lx", + (unsigned short)__entry->card, + (unsigned short)__entry->dom, + (unsigned int)__entry->status, + (unsigned long)__entry->psmid) +); + +/** + * trace_s390_ap_nqap - ap msg nqap tracepoint function + * @card: Crypto card number addressed. + * @dom: Domain within the crypto card addressed. + * @status: AP queue status (GR1 on return of nqap). + * @psmid: Unique id identifying this request/reply. + * + * Called immediately after a request has been enqueued into + * the AP firmware queue with the NQAP command. + */ +DEFINE_EVENT(s390_ap_nqapdqap_template, + s390_ap_nqap, + TP_PROTO(u16 card, u16 dom, u32 status, u64 psmid), + TP_ARGS(card, dom, status, psmid) +); + +/** + * trace_s390_ap_dqap - ap msg dqap tracepoint function + * @card: Crypto card number addressed. + * @dom: Domain within the crypto card addressed. + * @status: AP queue status (GR1 on return of dqap). + * @psmid: Unique id identifying this request/reply. + * + * Called immediately after a reply has been dequeued from + * the AP firmware queue with the DQAP command. + */ +DEFINE_EVENT(s390_ap_nqapdqap_template, + s390_ap_dqap, + TP_PROTO(u16 card, u16 dom, u32 status, u64 psmid), + TP_ARGS(card, dom, status, psmid) +); + +#endif /* _TRACE_S390_AP_H */ + +/* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm/trace +#define TRACE_INCLUDE_FILE ap + +#include <trace/define_trace.h> diff --git a/arch/s390/include/asm/trace/hiperdispatch.h b/arch/s390/include/asm/trace/hiperdispatch.h new file mode 100644 index 000000000000..46462ee645b0 --- /dev/null +++ b/arch/s390/include/asm/trace/hiperdispatch.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Tracepoint header for hiperdispatch + * + * Copyright IBM Corp. 2024 + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM s390 + +#if !defined(_TRACE_S390_HIPERDISPATCH_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_S390_HIPERDISPATCH_H + +#include <linux/tracepoint.h> + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm/trace +#define TRACE_INCLUDE_FILE hiperdispatch + +TRACE_EVENT(s390_hd_work_fn, + TP_PROTO(int steal_time_percentage, + int entitled_core_count, + int highcap_core_count), + TP_ARGS(steal_time_percentage, + entitled_core_count, + highcap_core_count), + TP_STRUCT__entry(__field(int, steal_time_percentage) + __field(int, entitled_core_count) + __field(int, highcap_core_count)), + TP_fast_assign(__entry->steal_time_percentage = steal_time_percentage; + __entry->entitled_core_count = entitled_core_count; + __entry->highcap_core_count = highcap_core_count;), + TP_printk("steal: %d entitled_core_count: %d highcap_core_count: %d", + __entry->steal_time_percentage, + __entry->entitled_core_count, + __entry->highcap_core_count) +); + +TRACE_EVENT(s390_hd_rebuild_domains, + TP_PROTO(int current_highcap_core_count, + int new_highcap_core_count), + TP_ARGS(current_highcap_core_count, + new_highcap_core_count), + TP_STRUCT__entry(__field(int, current_highcap_core_count) + __field(int, new_highcap_core_count)), + TP_fast_assign(__entry->current_highcap_core_count = current_highcap_core_count; + __entry->new_highcap_core_count = new_highcap_core_count), + TP_printk("change highcap_core_count: %u -> %u", + __entry->current_highcap_core_count, + __entry->new_highcap_core_count) +); + +#endif /* _TRACE_S390_HIPERDISPATCH_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/s390/include/asm/trace/zcrypt.h b/arch/s390/include/asm/trace/zcrypt.h index 457ddaa99e19..bfb01e335f31 100644 --- a/arch/s390/include/asm/trace/zcrypt.h +++ b/arch/s390/include/asm/trace/zcrypt.h @@ -2,7 +2,7 @@ /* * Tracepoint definitions for the s390 zcrypt device driver * - * Copyright IBM Corp. 2016 + * Copyright IBM Corp. 2016,2025 * Author(s): Harald Freudenberger <freude@de.ibm.com> * * Currently there are two tracepoint events defined here. @@ -73,14 +73,15 @@ TRACE_EVENT(s390_zcrypt_req, /** * trace_s390_zcrypt_rep - zcrypt reply tracepoint function - * @ptr: Address of the local buffer where the request from userspace - * is stored. Can be used as a unique id to match together - * request and reply. - * @fc: Function code. - * @rc: The bare returncode as returned by the device driver ioctl - * function. - * @dev: The adapter nr where this request was actually processed. - * @dom: Domain id of the device where this request was processed. + * @ptr: Address of the local buffer where the request from userspace + * is stored. Can be used as a unique id to match together + * request and reply. + * @fc: Function code. + * @rc: The bare returncode as returned by the device driver ioctl + * function. + * @card: The adapter nr where this request was actually processed. + * @dom: Domain id of the device where this request was processed. + * @psmid: Unique id identifying this request/reply. * * Called upon recognising the reply from the crypto adapter. This * message may act as the exit timestamp for the request but also @@ -88,26 +89,29 @@ TRACE_EVENT(s390_zcrypt_req, * and the returncode from the device driver. */ TRACE_EVENT(s390_zcrypt_rep, - TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom), - TP_ARGS(ptr, fc, rc, dev, dom), + TP_PROTO(void *ptr, u32 fc, u32 rc, u16 card, u16 dom, u64 psmid), + TP_ARGS(ptr, fc, rc, card, dom, psmid), TP_STRUCT__entry( __field(void *, ptr) __field(u32, fc) __field(u32, rc) - __field(u16, device) - __field(u16, domain)), + __field(u16, card) + __field(u16, dom) + __field(u64, psmid)), TP_fast_assign( __entry->ptr = ptr; __entry->fc = fc; __entry->rc = rc; - __entry->device = dev; - __entry->domain = dom;), - TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx", + __entry->card = card; + __entry->dom = dom; + __entry->psmid = psmid;), + TP_printk("ptr=%p fc=0x%04x rc=%d card=%u dom=%u psmid=0x%016lx", __entry->ptr, - (unsigned int) __entry->fc, - (int) __entry->rc, - (unsigned short) __entry->device, - (unsigned short) __entry->domain) + (unsigned int)__entry->fc, + (int)__entry->rc, + (unsigned short)__entry->card, + (unsigned short)__entry->dom, + (unsigned long)__entry->psmid) ); #endif /* _TRACE_S390_ZCRYPT_H */ diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h index 0b5d550a0478..53695b2196f7 100644 --- a/arch/s390/include/asm/types.h +++ b/arch/s390/include/asm/types.h @@ -5,7 +5,7 @@ #include <uapi/asm/types.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ union register_pair { unsigned __int128 pair; @@ -15,5 +15,5 @@ union register_pair { }; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_S390_TYPES_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 81ae8a98e7ec..c5e02addcd67 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -13,24 +13,81 @@ /* * User space memory access functions */ +#include <linux/pgtable.h> #include <asm/asm-extable.h> #include <asm/processor.h> #include <asm/extable.h> #include <asm/facility.h> #include <asm-generic/access_ok.h> +#include <asm/asce.h> +#include <linux/instrumented.h> void debug_user_asce(int exit); -unsigned long __must_check -raw_copy_from_user(void *to, const void __user *from, unsigned long n); - -unsigned long __must_check -raw_copy_to_user(void __user *to, const void *from, unsigned long n); +#ifdef CONFIG_KMSAN +#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory +#else +#define uaccess_kmsan_or_inline __always_inline +#endif -#ifndef CONFIG_KASAN #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER -#endif + +static uaccess_kmsan_or_inline __must_check unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long size) +{ + unsigned long osize; + int cc; + + while (1) { + osize = size; + asm_inline volatile( + " lhi %%r0,%[spec]\n" + "0: mvcos %[to],%[from],%[size]\n" + "1: nopr %%r7\n" + CC_IPM(cc) + EX_TABLE_UA_MVCOS_FROM(0b, 0b) + EX_TABLE_UA_MVCOS_FROM(1b, 0b) + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to) + : [spec] "I" (0x81), [from] "Q" (*(const char __user *)from) + : CC_CLOBBER_LIST("memory", "0")); + if (__builtin_constant_p(osize) && osize <= 4096) + return osize - size; + if (likely(CC_TRANSFORM(cc) == 0)) + return osize - size; + size -= 4096; + to += 4096; + from += 4096; + } +} + +static uaccess_kmsan_or_inline __must_check unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long size) +{ + unsigned long osize; + int cc; + + while (1) { + osize = size; + asm_inline volatile( + " llilh %%r0,%[spec]\n" + "0: mvcos %[to],%[from],%[size]\n" + "1: nopr %%r7\n" + CC_IPM(cc) + EX_TABLE_UA_MVCOS_TO(0b, 0b) + EX_TABLE_UA_MVCOS_TO(1b, 0b) + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) + : [spec] "I" (0x81), [from] "Q" (*(const char *)from) + : CC_CLOBBER_LIST("memory", "0")); + if (__builtin_constant_p(osize) && osize <= 4096) + return osize - size; + if (likely(CC_TRANSFORM(cc) == 0)) + return osize - size; + size -= 4096; + to += 4096; + from += 4096; + } +} unsigned long __must_check _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key); @@ -54,163 +111,113 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo return n; } -union oac { - unsigned int val; - struct { - struct { - unsigned short key : 4; - unsigned short : 4; - unsigned short as : 2; - unsigned short : 4; - unsigned short k : 1; - unsigned short a : 1; - } oac1; - struct { - unsigned short key : 4; - unsigned short : 4; - unsigned short as : 2; - unsigned short : 4; - unsigned short k : 1; - unsigned short a : 1; - } oac2; - }; -}; - int __noreturn __put_user_bad(void); -#define __put_user_asm(to, from, size) \ -({ \ - union oac __oac_spec = { \ - .oac1.as = PSW_BITS_AS_SECONDARY, \ - .oac1.a = 1, \ - }; \ - int __rc; \ - \ - asm volatile( \ - " lr 0,%[spec]\n" \ - "0: mvcos %[_to],%[_from],%[_size]\n" \ - "1: xr %[rc],%[rc]\n" \ - "2:\n" \ - EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ - EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ - : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \ - : [_size] "d" (size), [_from] "Q" (*(from)), \ - [spec] "d" (__oac_spec.val) \ - : "cc", "0"); \ - __rc; \ -}) - -static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) -{ - int rc; - - switch (size) { - case 1: - rc = __put_user_asm((unsigned char __user *)ptr, - (unsigned char *)x, - size); - break; - case 2: - rc = __put_user_asm((unsigned short __user *)ptr, - (unsigned short *)x, - size); - break; - case 4: - rc = __put_user_asm((unsigned int __user *)ptr, - (unsigned int *)x, - size); - break; - case 8: - rc = __put_user_asm((unsigned long __user *)ptr, - (unsigned long *)x, - size); - break; - default: - __put_user_bad(); - break; - } - return rc; +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT + +#define DEFINE_PUT_USER_NOINSTR(type) \ +static uaccess_kmsan_or_inline int \ +__put_user_##type##_noinstr(unsigned type __user *to, \ + unsigned type *from, \ + unsigned long size) \ +{ \ + asm goto( \ + " llilh %%r0,%[spec]\n" \ + "0: mvcos %[to],%[from],%[size]\n" \ + "1: nopr %%r7\n" \ + EX_TABLE(0b, %l[Efault]) \ + EX_TABLE(1b, %l[Efault]) \ + : [to] "+Q" (*to) \ + : [size] "d" (size), [from] "Q" (*from), \ + [spec] "I" (0x81) \ + : "cc", "0" \ + : Efault \ + ); \ + return 0; \ +Efault: \ + return -EFAULT; \ } -int __noreturn __get_user_bad(void); +#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ -#define __get_user_asm(to, from, size) \ -({ \ - union oac __oac_spec = { \ - .oac2.as = PSW_BITS_AS_SECONDARY, \ - .oac2.a = 1, \ - }; \ - int __rc; \ +#define DEFINE_PUT_USER_NOINSTR(type) \ +static uaccess_kmsan_or_inline int \ +__put_user_##type##_noinstr(unsigned type __user *to, \ + unsigned type *from, \ + unsigned long size) \ +{ \ + int rc; \ \ - asm volatile( \ - " lr 0,%[spec]\n" \ - "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \ - "1: xr %[rc],%[rc]\n" \ + asm_inline volatile( \ + " llilh %%r0,%[spec]\n" \ + "0: mvcos %[to],%[from],%[size]\n" \ + "1: lhi %[rc],0\n" \ "2:\n" \ - EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \ - EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \ - : [rc] "=&d" (__rc), "=Q" (*(to)) \ - : [_size] "d" (size), [_from] "Q" (*(from)), \ - [spec] "d" (__oac_spec.val), [_to] "a" (to), \ - [_ksize] "K" (size) \ + EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \ + EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \ + : [rc] "=d" (rc), [to] "+Q" (*to) \ + : [size] "d" (size), [from] "Q" (*from), \ + [spec] "I" (0x81) \ : "cc", "0"); \ - __rc; \ -}) + return rc; \ +} -static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) -{ - int rc; +#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ - switch (size) { - case 1: - rc = __get_user_asm((unsigned char *)x, - (unsigned char __user *)ptr, - size); - break; - case 2: - rc = __get_user_asm((unsigned short *)x, - (unsigned short __user *)ptr, - size); - break; - case 4: - rc = __get_user_asm((unsigned int *)x, - (unsigned int __user *)ptr, - size); - break; - case 8: - rc = __get_user_asm((unsigned long *)x, - (unsigned long __user *)ptr, - size); - break; - default: - __get_user_bad(); - break; - } - return rc; +DEFINE_PUT_USER_NOINSTR(char); +DEFINE_PUT_USER_NOINSTR(short); +DEFINE_PUT_USER_NOINSTR(int); +DEFINE_PUT_USER_NOINSTR(long); + +#define DEFINE_PUT_USER(type) \ +static __always_inline int \ +__put_user_##type(unsigned type __user *to, unsigned type *from, \ + unsigned long size) \ +{ \ + int rc; \ + \ + rc = __put_user_##type##_noinstr(to, from, size); \ + instrument_put_user(*from, to, size); \ + return rc; \ } -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - */ +DEFINE_PUT_USER(char); +DEFINE_PUT_USER(short); +DEFINE_PUT_USER(int); +DEFINE_PUT_USER(long); + #define __put_user(x, ptr) \ ({ \ __typeof__(*(ptr)) __x = (x); \ - int __pu_err = -EFAULT; \ + int __prc; \ \ __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: \ + __prc = __put_user_char((unsigned char __user *)(ptr), \ + (unsigned char *)&__x, \ + sizeof(*(ptr))); \ + break; \ case 2: \ + __prc = __put_user_short((unsigned short __user *)(ptr),\ + (unsigned short *)&__x, \ + sizeof(*(ptr))); \ + break; \ case 4: \ + __prc = __put_user_int((unsigned int __user *)(ptr), \ + (unsigned int *)&__x, \ + sizeof(*(ptr))); \ + break; \ case 8: \ - __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \ + __prc = __put_user_long((unsigned long __user *)(ptr), \ + (unsigned long *)&__x, \ + sizeof(*(ptr))); \ break; \ default: \ - __put_user_bad(); \ + __prc = __put_user_bad(); \ break; \ } \ - __builtin_expect(__pu_err, 0); \ + __builtin_expect(__prc, 0); \ }) #define put_user(x, ptr) \ @@ -219,45 +226,129 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign __put_user(x, ptr); \ }) +int __noreturn __get_user_bad(void); + +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT + +#define DEFINE_GET_USER_NOINSTR(type) \ +static uaccess_kmsan_or_inline int \ +__get_user_##type##_noinstr(unsigned type *to, \ + const unsigned type __user *from, \ + unsigned long size) \ +{ \ + asm goto( \ + " lhi %%r0,%[spec]\n" \ + "0: mvcos %[to],%[from],%[size]\n" \ + "1: nopr %%r7\n" \ + EX_TABLE(0b, %l[Efault]) \ + EX_TABLE(1b, %l[Efault]) \ + : [to] "=Q" (*to) \ + : [size] "d" (size), [from] "Q" (*from), \ + [spec] "I" (0x81) \ + : "cc", "0" \ + : Efault \ + ); \ + return 0; \ +Efault: \ + *to = 0; \ + return -EFAULT; \ +} + +#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ + +#define DEFINE_GET_USER_NOINSTR(type) \ +static uaccess_kmsan_or_inline int \ +__get_user_##type##_noinstr(unsigned type *to, \ + const unsigned type __user *from, \ + unsigned long size) \ +{ \ + int rc; \ + \ + asm_inline volatile( \ + " lhi %%r0,%[spec]\n" \ + "0: mvcos %[to],%[from],%[size]\n" \ + "1: lhi %[rc],0\n" \ + "2:\n" \ + EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \ + EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \ + : [rc] "=d" (rc), [to] "=Q" (*to) \ + : [size] "d" (size), [from] "Q" (*from), \ + [spec] "I" (0x81) \ + : "cc", "0"); \ + if (likely(!rc)) \ + return 0; \ + *to = 0; \ + return rc; \ +} + +#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ + +DEFINE_GET_USER_NOINSTR(char); +DEFINE_GET_USER_NOINSTR(short); +DEFINE_GET_USER_NOINSTR(int); +DEFINE_GET_USER_NOINSTR(long); + +#define DEFINE_GET_USER(type) \ +static __always_inline int \ +__get_user_##type(unsigned type *to, const unsigned type __user *from, \ + unsigned long size) \ +{ \ + int rc; \ + \ + rc = __get_user_##type##_noinstr(to, from, size); \ + instrument_get_user(*to); \ + return rc; \ +} + +DEFINE_GET_USER(char); +DEFINE_GET_USER(short); +DEFINE_GET_USER(int); +DEFINE_GET_USER(long); + #define __get_user(x, ptr) \ ({ \ - int __gu_err = -EFAULT; \ + const __user void *____guptr = (ptr); \ + int __grc; \ \ __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ + const unsigned char __user *__guptr = ____guptr; \ unsigned char __x; \ \ - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *)&__x; \ break; \ }; \ case 2: { \ + const unsigned short __user *__guptr = ____guptr; \ unsigned short __x; \ \ - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\ (x) = *(__force __typeof__(*(ptr)) *)&__x; \ break; \ }; \ case 4: { \ + const unsigned int __user *__guptr = ____guptr; \ unsigned int __x; \ \ - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *)&__x; \ break; \ }; \ case 8: { \ + const unsigned long __user *__guptr = ____guptr; \ unsigned long __x; \ \ - __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ + __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *)&__x; \ break; \ }; \ default: \ - __get_user_bad(); \ + __grc = __get_user_bad(); \ break; \ } \ - __builtin_expect(__gu_err, 0); \ + __builtin_expect(__grc, 0); \ }) #define get_user(x, ptr) \ @@ -273,295 +364,139 @@ long __must_check strncpy_from_user(char *dst, const char __user *src, long coun long __must_check strnlen_user(const char __user *src, long count); -/* - * Zero Userspace - */ -unsigned long __must_check __clear_user(void __user *to, unsigned long size); +static uaccess_kmsan_or_inline __must_check unsigned long +__clear_user(void __user *to, unsigned long size) +{ + unsigned long osize; + int cc; + + while (1) { + osize = size; + asm_inline volatile( + " llilh %%r0,%[spec]\n" + "0: mvcos %[to],%[from],%[size]\n" + "1: nopr %%r7\n" + CC_IPM(cc) + EX_TABLE_UA_MVCOS_TO(0b, 0b) + EX_TABLE_UA_MVCOS_TO(1b, 0b) + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) + : [spec] "I" (0x81), [from] "Q" (*(const char *)empty_zero_page) + : CC_CLOBBER_LIST("memory", "0")); + if (__builtin_constant_p(osize) && osize <= 4096) + return osize - size; + if (CC_TRANSFORM(cc) == 0) + return osize - size; + size -= 4096; + to += 4096; + } +} -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +static __always_inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); return __clear_user(to, n); } -void *s390_kernel_write(void *dst, const void *src, size_t size); +void *__s390_kernel_write(void *dst, const void *src, size_t size); -int __noreturn __put_kernel_bad(void); +static inline void *s390_kernel_write(void *dst, const void *src, size_t size) +{ + if (__is_defined(__DECOMPRESSOR)) + return memcpy(dst, src, size); + return __s390_kernel_write(dst, src, size); +} -#define __put_kernel_asm(val, to, insn) \ -({ \ - int __rc; \ - \ - asm volatile( \ - "0: " insn " %[_val],%[_to]\n" \ - "1: xr %[rc],%[rc]\n" \ - "2:\n" \ - EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ - EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ - : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \ - : [_val] "d" (val) \ - : "cc"); \ - __rc; \ -}) +void __noreturn __mvc_kernel_nofault_bad(void); + +#if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS) -#define __put_kernel_nofault(dst, src, type, err_label) \ +#define __mvc_kernel_nofault(dst, src, type, err_label) \ do { \ - unsigned long __x = (unsigned long)(*((type *)(src))); \ - int __pk_err; \ - \ switch (sizeof(type)) { \ case 1: \ - __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \ - break; \ case 2: \ - __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \ - break; \ case 4: \ - __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \ - break; \ case 8: \ - __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \ + asm goto( \ + "0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \ + "1: nopr %%r7\n" \ + EX_TABLE(0b, %l[err_label]) \ + EX_TABLE(1b, %l[err_label]) \ + : [_dst] "=Q" (*(type *)dst) \ + : [_src] "Q" (*(type *)(src)), \ + [_len] "I" (sizeof(type)) \ + : \ + : err_label); \ break; \ default: \ - __pk_err = __put_kernel_bad(); \ + __mvc_kernel_nofault_bad(); \ break; \ } \ - if (unlikely(__pk_err)) \ - goto err_label; \ } while (0) -int __noreturn __get_kernel_bad(void); - -#define __get_kernel_asm(val, from, insn) \ -({ \ - int __rc; \ - \ - asm volatile( \ - "0: " insn " %[_val],%[_from]\n" \ - "1: xr %[rc],%[rc]\n" \ - "2:\n" \ - EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \ - EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \ - : [rc] "=d" (__rc), [_val] "=d" (val) \ - : [_from] "Q" (*(from)) \ - : "cc"); \ - __rc; \ -}) +#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ -#define __get_kernel_nofault(dst, src, type, err_label) \ +#define __mvc_kernel_nofault(dst, src, type, err_label) \ do { \ - int __gk_err; \ + type *(__dst) = (type *)(dst); \ + int __rc; \ \ switch (sizeof(type)) { \ - case 1: { \ - unsigned char __x; \ - \ - __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \ - *((type *)(dst)) = (type)__x; \ - break; \ - }; \ - case 2: { \ - unsigned short __x; \ - \ - __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \ - *((type *)(dst)) = (type)__x; \ - break; \ - }; \ - case 4: { \ - unsigned int __x; \ - \ - __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \ - *((type *)(dst)) = (type)__x; \ - break; \ - }; \ - case 8: { \ - unsigned long __x; \ - \ - __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \ - *((type *)(dst)) = (type)__x; \ + case 1: \ + case 2: \ + case 4: \ + case 8: \ + asm_inline volatile( \ + "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \ + "1: lhi %[_rc],0\n" \ + "2:\n" \ + EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \ + EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \ + : [_rc] "=d" (__rc), \ + "=m" (*__dst) \ + : [_src] "Q" (*(type *)(src)), \ + [_dst] "a" (__dst), \ + [_len] "I" (sizeof(type))); \ + if (__rc) \ + goto err_label; \ break; \ - }; \ default: \ - __gk_err = __get_kernel_bad(); \ + __mvc_kernel_nofault_bad(); \ break; \ } \ - if (unlikely(__gk_err)) \ - goto err_label; \ } while (0) -void __cmpxchg_user_key_called_with_bad_pointer(void); +#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ -#define CMPXCHG_USER_KEY_MAX_LOOPS 128 +#define arch_get_kernel_nofault __mvc_kernel_nofault +#define arch_put_kernel_nofault __mvc_kernel_nofault -static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, - __uint128_t old, __uint128_t new, - unsigned long key, int size) -{ - int rc = 0; +void __cmpxchg_user_key_called_with_bad_pointer(void); +int __cmpxchg_user_key1(unsigned long address, unsigned char *uval, + unsigned char old, unsigned char new, unsigned long key); +int __cmpxchg_user_key2(unsigned long address, unsigned short *uval, + unsigned short old, unsigned short new, unsigned long key); +int __cmpxchg_user_key4(unsigned long address, unsigned int *uval, + unsigned int old, unsigned int new, unsigned long key); +int __cmpxchg_user_key8(unsigned long address, unsigned long *uval, + unsigned long old, unsigned long new, unsigned long key); +int __cmpxchg_user_key16(unsigned long address, __uint128_t *uval, + __uint128_t old, __uint128_t new, unsigned long key); + +static __always_inline int _cmpxchg_user_key(unsigned long address, void *uval, + __uint128_t old, __uint128_t new, + unsigned long key, int size) +{ switch (size) { - case 1: { - unsigned int prev, shift, mask, _old, _new; - unsigned long count; - - shift = (3 ^ (address & 3)) << 3; - address ^= address & 3; - _old = ((unsigned int)old & 0xff) << shift; - _new = ((unsigned int)new & 0xff) << shift; - mask = ~(0xff << shift); - asm volatile( - " spka 0(%[key])\n" - " sacf 256\n" - " llill %[count],%[max_loops]\n" - "0: l %[prev],%[address]\n" - "1: nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "2: lr %[tmp],%[prev]\n" - "3: cs %[prev],%[new],%[address]\n" - "4: jnl 5f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jnz 5f\n" - " brct %[count],2b\n" - "5: sacf 768\n" - " spka %[default_key]\n" - EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) - : [rc] "+&d" (rc), - [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (_old), - [new] "+&d" (_new), - [mask] "+&d" (mask), - [count] "=a" (count) - : [key] "%[count]" (key << 4), - [default_key] "J" (PAGE_DEFAULT_KEY), - [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) - : "memory", "cc"); - *(unsigned char *)uval = prev >> shift; - if (!count) - rc = -EAGAIN; - return rc; - } - case 2: { - unsigned int prev, shift, mask, _old, _new; - unsigned long count; - - shift = (2 ^ (address & 2)) << 3; - address ^= address & 2; - _old = ((unsigned int)old & 0xffff) << shift; - _new = ((unsigned int)new & 0xffff) << shift; - mask = ~(0xffff << shift); - asm volatile( - " spka 0(%[key])\n" - " sacf 256\n" - " llill %[count],%[max_loops]\n" - "0: l %[prev],%[address]\n" - "1: nr %[prev],%[mask]\n" - " xilf %[mask],0xffffffff\n" - " or %[new],%[prev]\n" - " or %[prev],%[tmp]\n" - "2: lr %[tmp],%[prev]\n" - "3: cs %[prev],%[new],%[address]\n" - "4: jnl 5f\n" - " xr %[tmp],%[prev]\n" - " xr %[new],%[tmp]\n" - " nr %[tmp],%[mask]\n" - " jnz 5f\n" - " brct %[count],2b\n" - "5: sacf 768\n" - " spka %[default_key]\n" - EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) - : [rc] "+&d" (rc), - [prev] "=&d" (prev), - [address] "+Q" (*(int *)address), - [tmp] "+&d" (_old), - [new] "+&d" (_new), - [mask] "+&d" (mask), - [count] "=a" (count) - : [key] "%[count]" (key << 4), - [default_key] "J" (PAGE_DEFAULT_KEY), - [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) - : "memory", "cc"); - *(unsigned short *)uval = prev >> shift; - if (!count) - rc = -EAGAIN; - return rc; - } - case 4: { - unsigned int prev = old; - - asm volatile( - " spka 0(%[key])\n" - " sacf 256\n" - "0: cs %[prev],%[new],%[address]\n" - "1: sacf 768\n" - " spka %[default_key]\n" - EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) - : [rc] "+&d" (rc), - [prev] "+&d" (prev), - [address] "+Q" (*(int *)address) - : [new] "d" ((unsigned int)new), - [key] "a" (key << 4), - [default_key] "J" (PAGE_DEFAULT_KEY) - : "memory", "cc"); - *(unsigned int *)uval = prev; - return rc; - } - case 8: { - unsigned long prev = old; - - asm volatile( - " spka 0(%[key])\n" - " sacf 256\n" - "0: csg %[prev],%[new],%[address]\n" - "1: sacf 768\n" - " spka %[default_key]\n" - EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) - : [rc] "+&d" (rc), - [prev] "+&d" (prev), - [address] "+QS" (*(long *)address) - : [new] "d" ((unsigned long)new), - [key] "a" (key << 4), - [default_key] "J" (PAGE_DEFAULT_KEY) - : "memory", "cc"); - *(unsigned long *)uval = prev; - return rc; - } - case 16: { - __uint128_t prev = old; - - asm volatile( - " spka 0(%[key])\n" - " sacf 256\n" - "0: cdsg %[prev],%[new],%[address]\n" - "1: sacf 768\n" - " spka %[default_key]\n" - EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev]) - EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev]) - : [rc] "+&d" (rc), - [prev] "+&d" (prev), - [address] "+QS" (*(__int128_t *)address) - : [new] "d" (new), - [key] "a" (key << 4), - [default_key] "J" (PAGE_DEFAULT_KEY) - : "memory", "cc"); - *(__uint128_t *)uval = prev; - return rc; - } + case 1: return __cmpxchg_user_key1(address, uval, old, new, key); + case 2: return __cmpxchg_user_key2(address, uval, old, new, key); + case 4: return __cmpxchg_user_key4(address, uval, old, new, key); + case 8: return __cmpxchg_user_key8(address, uval, old, new, key); + case 16: return __cmpxchg_user_key16(address, uval, old, new, key); + default: __cmpxchg_user_key_called_with_bad_pointer(); } - __cmpxchg_user_key_called_with_bad_pointer(); - return rc; + return 0; } /** @@ -593,8 +528,8 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \ might_fault(); \ __chk_user_ptr(__ptr); \ - __cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \ - (old), (new), (key), sizeof(*(__ptr))); \ + _cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \ + (old), (new), (key), sizeof(*(__ptr))); \ }) #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 4260bc5ce7f8..921c3fb3586b 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -8,7 +8,8 @@ #define _ASM_S390_UNISTD_H_ #include <uapi/asm/unistd.h> -#include <asm/unistd_nr.h> + +#define NR_syscalls (__NR_syscalls) #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR @@ -27,14 +28,8 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -# ifdef CONFIG_COMPAT -# define __ARCH_WANT_COMPAT_STAT -# define __ARCH_WANT_SYS_TIME32 -# define __ARCH_WANT_SYS_UTIME32 -# endif #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #endif /* _ASM_S390_UNISTD_H_ */ diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 0e7bd3873907..8018549a1ad2 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -2,7 +2,7 @@ /* * Ultravisor Interfaces * - * Copyright IBM Corp. 2019, 2022 + * Copyright IBM Corp. 2019, 2024 * * Author(s): * Vasily Gorbik <gor@linux.ibm.com> @@ -16,7 +16,7 @@ #include <linux/bug.h> #include <linux/sched.h> #include <asm/page.h> -#include <asm/gmap.h> +#include <asm/asm.h> #define UVC_CC_OK 0 #define UVC_CC_ERROR 1 @@ -28,9 +28,11 @@ #define UVC_RC_INV_STATE 0x0003 #define UVC_RC_INV_LEN 0x0005 #define UVC_RC_NO_RESUME 0x0007 +#define UVC_RC_MORE_DATA 0x0100 #define UVC_RC_NEED_DESTROY 0x8000 #define UVC_CMD_QUI 0x0001 +#define UVC_CMD_QUERY_KEYS 0x0002 #define UVC_CMD_INIT_UV 0x000f #define UVC_CMD_CREATE_SEC_CONF 0x0100 #define UVC_CMD_DESTROY_SEC_CONF 0x0101 @@ -61,6 +63,7 @@ #define UVC_CMD_ADD_SECRET 0x1031 #define UVC_CMD_LIST_SECRETS 0x1033 #define UVC_CMD_LOCK_SECRETS 0x1034 +#define UVC_CMD_RETR_SECRET 0x1035 /* Bits in installed uv calls */ enum uv_cmds_inst { @@ -94,6 +97,8 @@ enum uv_cmds_inst { BIT_UVC_CMD_ADD_SECRET = 29, BIT_UVC_CMD_LIST_SECRETS = 30, BIT_UVC_CMD_LOCK_SECRETS = 31, + BIT_UVC_CMD_RETR_SECRET = 33, + BIT_UVC_CMD_QUERY_KEYS = 34, }; enum uv_feat_ind { @@ -140,11 +145,27 @@ struct uv_cb_qui { u64 reservedf0; /* 0x00f0 */ u64 supp_add_secret_req_ver; /* 0x00f8 */ u64 supp_add_secret_pcf; /* 0x0100 */ - u64 supp_secret_types; /* 0x0180 */ - u16 max_secrets; /* 0x0110 */ - u8 reserved112[0x120 - 0x112]; /* 0x0112 */ + u64 supp_secret_types; /* 0x0108 */ + u16 max_assoc_secrets; /* 0x0110 */ + u16 max_retr_secrets; /* 0x0112 */ + u8 reserved114[0x120 - 0x114]; /* 0x0114 */ } __packed __aligned(8); +struct uv_key_hash { + u64 dword[4]; +} __packed __aligned(8); + +#define UVC_QUERY_KEYS_IDX_HK 0 +#define UVC_QUERY_KEYS_IDX_BACK_HK 1 + +/* Query Ultravisor Keys */ +struct uv_cb_query_keys { + struct uv_cb_header header; /* 0x0000 */ + u64 reserved08[3]; /* 0x0008 */ + struct uv_key_hash key_hashes[15]; /* 0x0020 */ +} __packed __aligned(8); +static_assert(sizeof(struct uv_cb_query_keys) == 0x200); + /* Initialize Ultravisor */ struct uv_cb_init { struct uv_cb_header header; @@ -317,7 +338,6 @@ struct uv_cb_dump_complete { * A common UV call struct for pv guests that contains a single address * Examples: * Add Secret - * List Secrets */ struct uv_cb_guest_addr { struct uv_cb_header header; @@ -326,18 +346,102 @@ struct uv_cb_guest_addr { u64 reserved28[4]; } __packed __aligned(8); +#define UVC_RC_RETR_SECR_BUF_SMALL 0x0109 +#define UVC_RC_RETR_SECR_STORE_EMPTY 0x010f +#define UVC_RC_RETR_SECR_INV_IDX 0x0110 +#define UVC_RC_RETR_SECR_INV_SECRET 0x0111 + +struct uv_cb_retr_secr { + struct uv_cb_header header; + u64 reserved08[2]; + u16 secret_idx; + u16 reserved1a; + u32 buf_size; + u64 buf_addr; + u64 reserved28[4]; +} __packed __aligned(8); + +struct uv_cb_list_secrets { + struct uv_cb_header header; + u64 reserved08[2]; + u8 reserved18[6]; + u16 start_idx; + u64 list_addr; + u64 reserved28[4]; +} __packed __aligned(8); + +enum uv_secret_types { + UV_SECRET_INVAL = 0x0, + UV_SECRET_NULL = 0x1, + UV_SECRET_ASSOCIATION = 0x2, + UV_SECRET_PLAIN = 0x3, + UV_SECRET_AES_128 = 0x4, + UV_SECRET_AES_192 = 0x5, + UV_SECRET_AES_256 = 0x6, + UV_SECRET_AES_XTS_128 = 0x7, + UV_SECRET_AES_XTS_256 = 0x8, + UV_SECRET_HMAC_SHA_256 = 0x9, + UV_SECRET_HMAC_SHA_512 = 0xa, + /* 0x0b - 0x10 reserved */ + UV_SECRET_ECDSA_P256 = 0x11, + UV_SECRET_ECDSA_P384 = 0x12, + UV_SECRET_ECDSA_P521 = 0x13, + UV_SECRET_ECDSA_ED25519 = 0x14, + UV_SECRET_ECDSA_ED448 = 0x15, +}; + +/** + * uv_secret_list_item_hdr - UV secret metadata. + * @index: Index of the secret in the secret list. + * @type: Type of the secret. See `enum uv_secret_types`. + * @length: Length of the stored secret. + */ +struct uv_secret_list_item_hdr { + u16 index; + u16 type; + u32 length; +} __packed __aligned(8); + +#define UV_SECRET_ID_LEN 32 +/** + * uv_secret_list_item - UV secret entry. + * @hdr: The metadata of this secret. + * @id: The ID of this secret, not the secret itself. + */ +struct uv_secret_list_item { + struct uv_secret_list_item_hdr hdr; + u64 reserverd08; + u8 id[UV_SECRET_ID_LEN]; +} __packed __aligned(8); + +/** + * uv_secret_list - UV secret-metadata list. + * @num_secr_stored: Number of secrets stored in this list. + * @total_num_secrets: Number of secrets stored in the UV for this guest. + * @next_secret_idx: positive number if there are more secrets available or zero. + * @secrets: Up to 85 UV-secret metadata entries. + */ +struct uv_secret_list { + u16 num_secr_stored; + u16 total_num_secrets; + u16 next_secret_idx; + u16 reserved_06; + u64 reserved_08; + struct uv_secret_list_item secrets[85]; +} __packed __aligned(8); +static_assert(sizeof(struct uv_secret_list) == PAGE_SIZE); + static inline int __uv_call(unsigned long r1, unsigned long r2) { int cc; asm volatile( - " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=d" (cc) + " .insn rrf,0xb9a40000,%[r1],%[r2],0,0\n" + CC_IPM(cc) + : CC_OUT(cc, cc) : [r1] "a" (r1), [r2] "a" (r2) - : "memory", "cc"); - return cc; + : CC_CLOBBER_LIST("memory")); + return CC_TRANSFORM(cc); } static inline int uv_call(unsigned long r1, unsigned long r2) @@ -382,6 +486,48 @@ static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc) return cc ? -EINVAL : 0; } +/** + * uv_list_secrets() - Do a List Secrets UVC. + * + * @buf: Buffer to write list into; size of one page. + * @start_idx: The smallest index that should be included in the list. + * For the fist invocation use 0. + * @rc: Pointer to store the return code or NULL. + * @rrc: Pointer to store the return reason code or NULL. + * + * This function calls the List Secrets UVC. The result is written into `buf`, + * that needs to be at least one page of writable memory. + * `buf` consists of: + * * %struct uv_secret_list_hdr + * * %struct uv_secret_list_item (multiple) + * + * For `start_idx` use _0_ for the first call. If there are more secrets available + * but could not fit into the page then `rc` is `UVC_RC_MORE_DATA`. + * In this case use `uv_secret_list_hdr.next_secret_idx` for `start_idx`. + * + * Context: might sleep. + * + * Return: The UVC condition code. + */ +static inline int uv_list_secrets(struct uv_secret_list *buf, u16 start_idx, + u16 *rc, u16 *rrc) +{ + struct uv_cb_list_secrets uvcb = { + .header.len = sizeof(uvcb), + .header.cmd = UVC_CMD_LIST_SECRETS, + .start_idx = start_idx, + .list_addr = (u64)buf, + }; + int cc = uv_call_sched(0, (u64)&uvcb); + + if (rc) + *rc = uvcb.header.rc; + if (rrc) + *rrc = uvcb.header.rrc; + + return cc; +} + struct uv_info { unsigned long inst_calls_list[4]; unsigned long uv_base_stor_len; @@ -402,7 +548,8 @@ struct uv_info { unsigned long supp_add_secret_req_ver; unsigned long supp_add_secret_pcf; unsigned long supp_secret_types; - unsigned short max_secrets; + unsigned short max_assoc_secrets; + unsigned short max_retr_secrets; }; extern struct uv_info uv_info; @@ -414,7 +561,6 @@ static inline bool uv_has_feature(u8 feature_bit) return test_bit_inv(feature_bit, &uv_info.uv_feature_indications); } -#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST extern int prot_virt_guest; static inline int is_prot_virt_guest(void) @@ -442,7 +588,10 @@ static inline int share(unsigned long addr, u16 cmd) if (!uv_call(0, (u64)&uvcb)) return 0; - return -EINVAL; + pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n", + uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare", + uvcb.header.rc, uvcb.header.rrc); + panic("System security cannot be guaranteed unless the system panics now.\n"); } /* @@ -466,13 +615,11 @@ static inline int uv_remove_shared(unsigned long addr) return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS); } -#else -#define is_prot_virt_guest() 0 -static inline int uv_set_shared(unsigned long addr) { return 0; } -static inline int uv_remove_shared(unsigned long addr) { return 0; } -#endif +int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list *list, + struct uv_secret_list_item_hdr *secret); +int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size); -#if IS_ENABLED(CONFIG_KVM) extern int prot_virt_host; static inline int is_prot_virt_host(void) @@ -481,37 +628,13 @@ static inline int is_prot_virt_host(void) } int uv_pin_shared(unsigned long paddr); -int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); -int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr); -int uv_destroy_owned_page(unsigned long paddr); +int uv_destroy_folio(struct folio *folio); +int uv_destroy_pte(pte_t pte); +int uv_convert_from_secure_pte(pte_t pte); +int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb); int uv_convert_from_secure(unsigned long paddr); -int uv_convert_owned_from_secure(unsigned long paddr); -int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); +int uv_convert_from_secure_folio(struct folio *folio); void setup_uv(void); -#else -#define is_prot_virt_host() 0 -static inline void setup_uv(void) {} - -static inline int uv_pin_shared(unsigned long paddr) -{ - return 0; -} - -static inline int uv_destroy_owned_page(unsigned long paddr) -{ - return 0; -} - -static inline int uv_convert_from_secure(unsigned long paddr) -{ - return 0; -} - -static inline int uv_convert_owned_from_secure(unsigned long paddr) -{ - return 0; -} -#endif #endif /* _ASM_S390_UV_H */ diff --git a/arch/s390/include/asm/vdso-symbols.h b/arch/s390/include/asm/vdso-symbols.h new file mode 100644 index 000000000000..e3561e67c4e3 --- /dev/null +++ b/arch/s390/include/asm/vdso-symbols.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_VDSO_SYMBOLS_H__ +#define __S390_VDSO_SYMBOLS_H__ + +#include <generated/vdso-offsets.h> + +#define VDSO_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso_offset_##name)) + +#endif /* __S390_VDSO_SYMBOLS_H__ */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 53165aa7813a..8e2fffa0ca68 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -4,30 +4,13 @@ #include <vdso/datapage.h> -#ifndef __ASSEMBLY__ - -#include <generated/vdso64-offsets.h> -#ifdef CONFIG_COMPAT -#include <generated/vdso32-offsets.h> -#endif - -#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) -#ifdef CONFIG_COMPAT -#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) -#else -#define VDSO32_SYMBOL(tsk, name) (-1UL) -#endif - -extern struct vdso_data *vdso_data; +#ifndef __ASSEMBLER__ int vdso_getcpu_init(void); -#endif /* __ASSEMBLY__ */ - -/* Default link address for the vDSO */ -#define VDSO_LBASE 0 +#endif /* __ASSEMBLER__ */ -#define __VVAR_PAGES 2 +#define __VDSO_PAGES 4 #define VDSO_VERSION_STRING LINUX_2.6.29 diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h deleted file mode 100644 index 0e2b40ef69b0..000000000000 --- a/arch/s390/include/asm/vdso/data.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __S390_ASM_VDSO_DATA_H -#define __S390_ASM_VDSO_DATA_H - -#include <linux/types.h> - -struct arch_vdso_data { - __s64 tod_steering_delta; - __u64 tod_steering_end; -}; - -#endif /* __S390_ASM_VDSO_DATA_H */ diff --git a/arch/s390/include/asm/vdso/getrandom.h b/arch/s390/include/asm/vdso/getrandom.h new file mode 100644 index 000000000000..6741a27199f8 --- /dev/null +++ b/arch/s390/include/asm/vdso/getrandom.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_VDSO_GETRANDOM_H +#define __ASM_VDSO_GETRANDOM_H + +#ifndef __ASSEMBLER__ + +#include <vdso/datapage.h> +#include <asm/vdso/vsyscall.h> +#include <asm/syscall.h> +#include <asm/unistd.h> +#include <asm/page.h> + +/** + * getrandom_syscall - Invoke the getrandom() syscall. + * @buffer: Destination buffer to fill with random bytes. + * @len: Size of @buffer in bytes. + * @flags: Zero or more GRND_* flags. + * Returns: The number of random bytes written to @buffer, or a negative value indicating an error. + */ +static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags) +{ + return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags); +} + +#endif /* !__ASSEMBLER__ */ + +#endif /* __ASM_VDSO_GETRANDOM_H */ diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h index 7937765ccfa5..c31ac5f61c83 100644 --- a/arch/s390/include/asm/vdso/gettimeofday.h +++ b/arch/s390/include/asm/vdso/gettimeofday.h @@ -14,20 +14,9 @@ #include <linux/compiler.h> -static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_time_data *vd) { - return _vdso_data; -} - -static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd) -{ - u64 adj, now; - - now = get_tod_clock(); - adj = vd->arch_data.tod_steering_end - now; - if (unlikely((s64) adj > 0)) - now += (vd->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); - return now; + return get_tod_clock() - vd->arch_data.tod_delta; } static __always_inline @@ -49,12 +38,4 @@ long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts) return syscall2(__NR_clock_getres, (long)clkid, (long)ts); } -#ifdef CONFIG_TIME_NS -static __always_inline -const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) -{ - return _timens_data; -} -#endif - #endif diff --git a/arch/s390/include/asm/vdso/time_data.h b/arch/s390/include/asm/vdso/time_data.h new file mode 100644 index 000000000000..25c4e0d9f596 --- /dev/null +++ b/arch/s390/include/asm/vdso/time_data.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __S390_ASM_VDSO_TIME_DATA_H +#define __S390_ASM_VDSO_TIME_DATA_H + +#include <linux/types.h> + +struct arch_vdso_time_data { + __s64 tod_delta; +}; + +#endif /* __S390_ASM_VDSO_TIME_DATA_H */ diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h index 6c67c08cefdd..b00acec8ddbc 100644 --- a/arch/s390/include/asm/vdso/vsyscall.h +++ b/arch/s390/include/asm/vdso/vsyscall.h @@ -2,25 +2,15 @@ #ifndef __ASM_VDSO_VSYSCALL_H #define __ASM_VDSO_VSYSCALL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/hrtimer.h> -#include <linux/timekeeper_internal.h> #include <vdso/datapage.h> #include <asm/vdso.h> -/* - * Update the vDSO data page to keep in sync with kernel timekeeping. - */ - -static __always_inline struct vdso_data *__s390_get_k_vdso_data(void) -{ - return vdso_data; -} -#define __arch_get_k_vdso_data __s390_get_k_vdso_data /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h index 561c91c1a87c..9d25fb35a042 100644 --- a/arch/s390/include/asm/vtime.h +++ b/arch/s390/include/asm/vtime.h @@ -4,16 +4,20 @@ static inline void update_timer_sys(void) { - S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; - S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer; - S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer; + struct lowcore *lc = get_lowcore(); + + lc->system_timer += lc->last_update_timer - lc->exit_timer; + lc->user_timer += lc->exit_timer - lc->sys_enter_timer; + lc->last_update_timer = lc->sys_enter_timer; } static inline void update_timer_mcck(void) { - S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; - S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer; - S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer; + struct lowcore *lc = get_lowcore(); + + lc->system_timer += lc->last_update_timer - lc->exit_timer; + lc->user_timer += lc->exit_timer - lc->mcck_enter_timer; + lc->last_update_timer = lc->mcck_enter_timer; } #endif /* _S390_VTIME_H */ diff --git a/arch/s390/include/asm/word-at-a-time.h b/arch/s390/include/asm/word-at-a-time.h index 203acd6e431b..eaa19dee7699 100644 --- a/arch/s390/include/asm/word-at-a-time.h +++ b/arch/s390/include/asm/word-at-a-time.h @@ -52,7 +52,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) { unsigned long data; - asm volatile( + asm_inline volatile( "0: lg %[data],0(%[addr])\n" "1: nopr %%r7\n" EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr]) diff --git a/arch/s390/include/uapi/asm/bitsperlong.h b/arch/s390/include/uapi/asm/bitsperlong.h index cceaf47b021a..7af27a985f25 100644 --- a/arch/s390/include/uapi/asm/bitsperlong.h +++ b/arch/s390/include/uapi/asm/bitsperlong.h @@ -2,11 +2,7 @@ #ifndef __ASM_S390_BITSPERLONG_H #define __ASM_S390_BITSPERLONG_H -#ifndef __s390x__ -#define __BITS_PER_LONG 32 -#else #define __BITS_PER_LONG 64 -#endif #include <asm-generic/bitsperlong.h> diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h index b11d98800458..7c364b33c84d 100644 --- a/arch/s390/include/uapi/asm/dasd.h +++ b/arch/s390/include/uapi/asm/dasd.h @@ -294,7 +294,7 @@ struct dasd_snid_ioctl_data { /******************************************************************************** * SECTION: Definition of IOCTLs * - * Here ist how the ioctl-nr should be used: + * Here is how the ioctl-nr should be used: * 0 - 31 DASD driver itself * 32 - 239 still open * 240 - 255 reserved for EMC diff --git a/arch/s390/include/uapi/asm/diag.h b/arch/s390/include/uapi/asm/diag.h new file mode 100644 index 000000000000..b7e6ccb4ff6e --- /dev/null +++ b/arch/s390/include/uapi/asm/diag.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Diag ioctls and its associated structures definitions. + * + * Copyright IBM Corp. 2024 + */ + +#ifndef __S390_UAPI_ASM_DIAG_H +#define __S390_UAPI_ASM_DIAG_H + +#include <linux/types.h> + +#define DIAG_MAGIC_STR 'D' + +struct diag324_pib { + __u64 address; + __u64 sequence; +}; + +struct diag310_memtop { + __u64 address; + __u64 nesting_lvl; +}; + +/* Diag ioctl definitions */ +#define DIAG324_GET_PIBBUF _IOWR(DIAG_MAGIC_STR, 0x77, struct diag324_pib) +#define DIAG324_GET_PIBLEN _IOR(DIAG_MAGIC_STR, 0x78, size_t) +#define DIAG310_GET_STRIDE _IOR(DIAG_MAGIC_STR, 0x79, size_t) +#define DIAG310_GET_MEMTOPLEN _IOWR(DIAG_MAGIC_STR, 0x7a, size_t) +#define DIAG310_GET_MEMTOPBUF _IOWR(DIAG_MAGIC_STR, 0x7b, struct diag310_memtop) + +#endif /* __S390_UAPI_ASM_DIAG_H */ diff --git a/arch/s390/include/uapi/asm/ipcbuf.h b/arch/s390/include/uapi/asm/ipcbuf.h index 1030cd186899..9277e76d6d72 100644 --- a/arch/s390/include/uapi/asm/ipcbuf.h +++ b/arch/s390/include/uapi/asm/ipcbuf.h @@ -24,9 +24,6 @@ struct ipc64_perm __kernel_mode_t mode; unsigned short __pad1; unsigned short seq; -#ifndef __s390x__ - unsigned short __pad2; -#endif /* ! __s390x__ */ unsigned long __unused1; unsigned long __unused2; }; diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 05eaf6db3ad4..60345dd2cba2 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -469,7 +469,8 @@ struct kvm_s390_vm_cpu_subfunc { __u8 kdsa[16]; /* with MSA9 */ __u8 sortl[32]; /* with STFLE.150 */ __u8 dfltcc[32]; /* with STFLE.151 */ - __u8 reserved[1728]; + __u8 pfcr[16]; /* with STFLE.201 */ + __u8 reserved[1712]; }; #define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6 diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h index 5ad76471e73f..ca42e941675d 100644 --- a/arch/s390/include/uapi/asm/pkey.h +++ b/arch/s390/include/uapi/asm/pkey.h @@ -41,23 +41,29 @@ #define PKEY_KEYTYPE_ECC_P521 7 #define PKEY_KEYTYPE_ECC_ED25519 8 #define PKEY_KEYTYPE_ECC_ED448 9 +#define PKEY_KEYTYPE_AES_XTS_128 10 +#define PKEY_KEYTYPE_AES_XTS_256 11 +#define PKEY_KEYTYPE_HMAC_512 12 +#define PKEY_KEYTYPE_HMAC_1024 13 /* the newer ioctls use a pkey_key_type enum for type information */ enum pkey_key_type { - PKEY_TYPE_CCA_DATA = (__u32) 1, - PKEY_TYPE_CCA_CIPHER = (__u32) 2, - PKEY_TYPE_EP11 = (__u32) 3, - PKEY_TYPE_CCA_ECC = (__u32) 0x1f, - PKEY_TYPE_EP11_AES = (__u32) 6, - PKEY_TYPE_EP11_ECC = (__u32) 7, + PKEY_TYPE_CCA_DATA = (__u32)1, + PKEY_TYPE_CCA_CIPHER = (__u32)2, + PKEY_TYPE_EP11 = (__u32)3, + PKEY_TYPE_CCA_ECC = (__u32)0x1f, + PKEY_TYPE_EP11_AES = (__u32)6, + PKEY_TYPE_EP11_ECC = (__u32)7, + PKEY_TYPE_PROTKEY = (__u32)8, + PKEY_TYPE_UVSECRET = (__u32)9, }; /* the newer ioctls use a pkey_key_size enum for key size information */ enum pkey_key_size { - PKEY_SIZE_AES_128 = (__u32) 128, - PKEY_SIZE_AES_192 = (__u32) 192, - PKEY_SIZE_AES_256 = (__u32) 256, - PKEY_SIZE_UNKNOWN = (__u32) 0xFFFFFFFF, + PKEY_SIZE_AES_128 = (__u32)128, + PKEY_SIZE_AES_192 = (__u32)192, + PKEY_SIZE_AES_256 = (__u32)256, + PKEY_SIZE_UNKNOWN = (__u32)0xFFFFFFFF, }; /* some of the newer ioctls use these flags */ @@ -120,6 +126,7 @@ struct pkey_genseck { __u32 keytype; /* in: key type to generate */ struct pkey_seckey seckey; /* out: the secure key blob */ }; + #define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck) /* @@ -132,6 +139,7 @@ struct pkey_clr2seck { struct pkey_clrkey clrkey; /* in: the clear key value */ struct pkey_seckey seckey; /* out: the secure key blob */ }; + #define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck) /* @@ -143,6 +151,7 @@ struct pkey_sec2protk { struct pkey_seckey seckey; /* in: the secure key blob */ struct pkey_protkey protkey; /* out: the protected key */ }; + #define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk) /* @@ -153,6 +162,7 @@ struct pkey_clr2protk { struct pkey_clrkey clrkey; /* in: the clear key value */ struct pkey_protkey protkey; /* out: the protected key */ }; + #define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk) /* @@ -164,6 +174,7 @@ struct pkey_findcard { __u16 cardnr; /* out: card number */ __u16 domain; /* out: domain number */ }; + #define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard) /* @@ -173,6 +184,7 @@ struct pkey_skey2pkey { struct pkey_seckey seckey; /* in: the secure key blob */ struct pkey_protkey protkey; /* out: the protected key */ }; + #define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey) /* @@ -190,6 +202,7 @@ struct pkey_verifykey { __u16 keysize; /* out: key size in bits */ __u32 attributes; /* out: attribute bits */ }; + #define PKEY_VERIFYKEY _IOWR(PKEY_IOCTL_MAGIC, 0x07, struct pkey_verifykey) #define PKEY_VERIFY_ATTR_AES 0x00000001 /* key is an AES key */ #define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */ @@ -221,6 +234,7 @@ struct pkey_kblob2pkey { __u32 keylen; /* in: the key blob length */ struct pkey_protkey protkey; /* out: the protected key */ }; + #define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey) /* @@ -253,6 +267,7 @@ struct pkey_genseck2 { __u32 keylen; /* in: available key blob buffer size */ /* out: actual key blob size */ }; + #define PKEY_GENSECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x11, struct pkey_genseck2) /* @@ -287,6 +302,7 @@ struct pkey_clr2seck2 { __u32 keylen; /* in: available key blob buffer size */ /* out: actual key blob size */ }; + #define PKEY_CLR2SECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x12, struct pkey_clr2seck2) /* @@ -324,6 +340,7 @@ struct pkey_verifykey2 { enum pkey_key_size size; /* out: the key size */ __u32 flags; /* out: additional key info flags */ }; + #define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2) /* @@ -346,6 +363,7 @@ struct pkey_kblob2pkey2 { __u32 apqn_entries; /* in: # of apqn target list entries */ struct pkey_protkey protkey; /* out: the protected key */ }; + #define PKEY_KBLOB2PROTK2 _IOWR(PKEY_IOCTL_MAGIC, 0x1A, struct pkey_kblob2pkey2) /* @@ -382,6 +400,7 @@ struct pkey_apqns4key { __u32 apqn_entries; /* in: max # of apqn entries in the list */ /* out: # apqns stored into the list */ }; + #define PKEY_APQNS4K _IOWR(PKEY_IOCTL_MAGIC, 0x1B, struct pkey_apqns4key) /* @@ -421,6 +440,7 @@ struct pkey_apqns4keytype { __u32 apqn_entries; /* in: max # of apqn entries in the list */ /* out: # apqns stored into the list */ }; + #define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype) /* @@ -447,6 +467,7 @@ struct pkey_kblob2pkey3 { __u32 pkeylen; /* in/out: size of pkey buffer/actual len of pkey */ __u8 __user *pkey; /* in: pkey blob buffer space ptr */ }; + #define PKEY_KBLOB2PROTK3 _IOWR(PKEY_IOCTL_MAGIC, 0x1D, struct pkey_kblob2pkey3) #endif /* _UAPI_PKEY_H */ diff --git a/arch/s390/include/uapi/asm/posix_types.h b/arch/s390/include/uapi/asm/posix_types.h index 1913613e71b6..ad5ab940d192 100644 --- a/arch/s390/include/uapi/asm/posix_types.h +++ b/arch/s390/include/uapi/asm/posix_types.h @@ -26,17 +26,6 @@ typedef unsigned short __kernel_old_gid_t; #define __kernel_old_uid_t __kernel_old_uid_t #endif -#ifndef __s390x__ - -typedef unsigned long __kernel_ino_t; -typedef unsigned short __kernel_mode_t; -typedef unsigned short __kernel_ipc_pid_t; -typedef unsigned short __kernel_uid_t; -typedef unsigned short __kernel_gid_t; -typedef int __kernel_ptrdiff_t; - -#else /* __s390x__ */ - typedef unsigned int __kernel_ino_t; typedef unsigned int __kernel_mode_t; typedef int __kernel_ipc_pid_t; @@ -45,8 +34,6 @@ typedef unsigned int __kernel_gid_t; typedef long __kernel_ptrdiff_t; typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ -#endif /* __s390x__ */ - #define __kernel_ino_t __kernel_ino_t #define __kernel_mode_t __kernel_mode_t #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index bb0826024bb9..ea29ba470e5a 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -14,94 +14,6 @@ * Offsets in the user_regs_struct. They are used for the ptrace * system call and in entry.S */ -#ifndef __s390x__ - -#define PT_PSWMASK 0x00 -#define PT_PSWADDR 0x04 -#define PT_GPR0 0x08 -#define PT_GPR1 0x0C -#define PT_GPR2 0x10 -#define PT_GPR3 0x14 -#define PT_GPR4 0x18 -#define PT_GPR5 0x1C -#define PT_GPR6 0x20 -#define PT_GPR7 0x24 -#define PT_GPR8 0x28 -#define PT_GPR9 0x2C -#define PT_GPR10 0x30 -#define PT_GPR11 0x34 -#define PT_GPR12 0x38 -#define PT_GPR13 0x3C -#define PT_GPR14 0x40 -#define PT_GPR15 0x44 -#define PT_ACR0 0x48 -#define PT_ACR1 0x4C -#define PT_ACR2 0x50 -#define PT_ACR3 0x54 -#define PT_ACR4 0x58 -#define PT_ACR5 0x5C -#define PT_ACR6 0x60 -#define PT_ACR7 0x64 -#define PT_ACR8 0x68 -#define PT_ACR9 0x6C -#define PT_ACR10 0x70 -#define PT_ACR11 0x74 -#define PT_ACR12 0x78 -#define PT_ACR13 0x7C -#define PT_ACR14 0x80 -#define PT_ACR15 0x84 -#define PT_ORIGGPR2 0x88 -#define PT_FPC 0x90 -/* - * A nasty fact of life that the ptrace api - * only supports passing of longs. - */ -#define PT_FPR0_HI 0x98 -#define PT_FPR0_LO 0x9C -#define PT_FPR1_HI 0xA0 -#define PT_FPR1_LO 0xA4 -#define PT_FPR2_HI 0xA8 -#define PT_FPR2_LO 0xAC -#define PT_FPR3_HI 0xB0 -#define PT_FPR3_LO 0xB4 -#define PT_FPR4_HI 0xB8 -#define PT_FPR4_LO 0xBC -#define PT_FPR5_HI 0xC0 -#define PT_FPR5_LO 0xC4 -#define PT_FPR6_HI 0xC8 -#define PT_FPR6_LO 0xCC -#define PT_FPR7_HI 0xD0 -#define PT_FPR7_LO 0xD4 -#define PT_FPR8_HI 0xD8 -#define PT_FPR8_LO 0XDC -#define PT_FPR9_HI 0xE0 -#define PT_FPR9_LO 0xE4 -#define PT_FPR10_HI 0xE8 -#define PT_FPR10_LO 0xEC -#define PT_FPR11_HI 0xF0 -#define PT_FPR11_LO 0xF4 -#define PT_FPR12_HI 0xF8 -#define PT_FPR12_LO 0xFC -#define PT_FPR13_HI 0x100 -#define PT_FPR13_LO 0x104 -#define PT_FPR14_HI 0x108 -#define PT_FPR14_LO 0x10C -#define PT_FPR15_HI 0x110 -#define PT_FPR15_LO 0x114 -#define PT_CR_9 0x118 -#define PT_CR_10 0x11C -#define PT_CR_11 0x120 -#define PT_IEEE_IP 0x13C -#define PT_LASTOFF PT_IEEE_IP -#define PT_ENDREGS 0x140-1 - -#define GPR_SIZE 4 -#define CR_SIZE 4 - -#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */ - -#else /* __s390x__ */ - #define PT_PSWMASK 0x00 #define PT_PSWADDR 0x08 #define PT_GPR0 0x10 @@ -166,38 +78,6 @@ #define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */ -#endif /* __s390x__ */ - -#ifndef __s390x__ - -#define PSW_MASK_PER _AC(0x40000000, UL) -#define PSW_MASK_DAT _AC(0x04000000, UL) -#define PSW_MASK_IO _AC(0x02000000, UL) -#define PSW_MASK_EXT _AC(0x01000000, UL) -#define PSW_MASK_KEY _AC(0x00F00000, UL) -#define PSW_MASK_BASE _AC(0x00080000, UL) /* always one */ -#define PSW_MASK_MCHECK _AC(0x00040000, UL) -#define PSW_MASK_WAIT _AC(0x00020000, UL) -#define PSW_MASK_PSTATE _AC(0x00010000, UL) -#define PSW_MASK_ASC _AC(0x0000C000, UL) -#define PSW_MASK_CC _AC(0x00003000, UL) -#define PSW_MASK_PM _AC(0x00000F00, UL) -#define PSW_MASK_RI _AC(0x00000000, UL) -#define PSW_MASK_EA _AC(0x00000000, UL) -#define PSW_MASK_BA _AC(0x00000000, UL) - -#define PSW_MASK_USER _AC(0x0000FF00, UL) - -#define PSW_ADDR_AMODE _AC(0x80000000, UL) -#define PSW_ADDR_INSN _AC(0x7FFFFFFF, UL) - -#define PSW_ASC_PRIMARY _AC(0x00000000, UL) -#define PSW_ASC_ACCREG _AC(0x00004000, UL) -#define PSW_ASC_SECONDARY _AC(0x00008000, UL) -#define PSW_ASC_HOME _AC(0x0000C000, UL) - -#else /* __s390x__ */ - #define PSW_MASK_PER _AC(0x4000000000000000, UL) #define PSW_MASK_DAT _AC(0x0400000000000000, UL) #define PSW_MASK_IO _AC(0x0200000000000000, UL) @@ -224,8 +104,6 @@ #define PSW_ASC_SECONDARY _AC(0x0000800000000000, UL) #define PSW_ASC_HOME _AC(0x0000C00000000000, UL) -#endif /* __s390x__ */ - #define NUM_GPRS 16 #define NUM_FPRS 16 #define NUM_CRS 16 @@ -242,7 +120,8 @@ #define PTRACE_OLDSETOPTIONS 21 #define PTRACE_SYSEMU 31 #define PTRACE_SYSEMU_SINGLESTEP 32 -#ifndef __ASSEMBLY__ + +#ifndef __ASSEMBLER__ #include <linux/stddef.h> #include <linux/types.h> @@ -307,9 +186,7 @@ typedef struct { #define PER_EM_MASK 0xE8000000UL typedef struct { -#ifdef __s390x__ unsigned : 32; -#endif /* __s390x__ */ unsigned em_branching : 1; unsigned em_instruction_fetch : 1; /* @@ -450,6 +327,6 @@ struct user_regs_struct { unsigned long ieee_instruction_pointer; /* obsolete, always 0 */ }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _UAPI_S390_PTRACE_H */ diff --git a/arch/s390/include/uapi/asm/schid.h b/arch/s390/include/uapi/asm/schid.h index a3e1cf168553..d804d1a5b1b3 100644 --- a/arch/s390/include/uapi/asm/schid.h +++ b/arch/s390/include/uapi/asm/schid.h @@ -4,7 +4,7 @@ #include <linux/types.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct subchannel_id { __u32 cssid : 8; @@ -15,6 +15,6 @@ struct subchannel_id { __u32 sch_no : 16; } __attribute__ ((packed, aligned(4))); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _UAPIASM_SCHID_H */ diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h index 8b35033334c4..7c90b30c50fd 100644 --- a/arch/s390/include/uapi/asm/sigcontext.h +++ b/arch/s390/include/uapi/asm/sigcontext.h @@ -17,24 +17,12 @@ #define __NUM_VXRS_LOW 16 #define __NUM_VXRS_HIGH 16 -#ifndef __s390x__ - -/* Has to be at least _NSIG_WORDS from asm/signal.h */ -#define _SIGCONTEXT_NSIG 64 -#define _SIGCONTEXT_NSIG_BPW 32 -/* Size of stack frame allocated when calling signal handler. */ -#define __SIGNAL_FRAMESIZE 96 - -#else /* __s390x__ */ - /* Has to be at least _NSIG_WORDS from asm/signal.h */ #define _SIGCONTEXT_NSIG 64 #define _SIGCONTEXT_NSIG_BPW 64 /* Size of stack frame allocated when calling signal handler. */ #define __SIGNAL_FRAMESIZE 160 -#endif /* __s390x__ */ - #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) @@ -66,9 +54,6 @@ typedef struct typedef struct { -#ifndef __s390x__ - unsigned long gprs_high[__NUM_GPRS]; -#endif unsigned long long vxrs_low[__NUM_VXRS_LOW]; __vector128 vxrs_high[__NUM_VXRS_HIGH]; unsigned char __reserved[128]; diff --git a/arch/s390/include/uapi/asm/stat.h b/arch/s390/include/uapi/asm/stat.h index ac253d23606b..21599298c2f5 100644 --- a/arch/s390/include/uapi/asm/stat.h +++ b/arch/s390/include/uapi/asm/stat.h @@ -8,74 +8,6 @@ #ifndef _S390_STAT_H #define _S390_STAT_H -#ifndef __s390x__ -struct __old_kernel_stat { - unsigned short st_dev; - unsigned short st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned long st_size; - unsigned long st_atime; - unsigned long st_mtime; - unsigned long st_ctime; -}; - -struct stat { - unsigned short st_dev; - unsigned short __pad1; - unsigned long st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned short __pad2; - unsigned long st_size; - unsigned long st_blksize; - unsigned long st_blocks; - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; - unsigned long __unused4; - unsigned long __unused5; -}; - -/* This matches struct stat64 in glibc2.1, hence the absolutely - * insane amounts of padding around dev_t's. - */ -struct stat64 { - unsigned long long st_dev; - unsigned int __pad1; -#define STAT64_HAS_BROKEN_ST_INO 1 - unsigned long __st_ino; - unsigned int st_mode; - unsigned int st_nlink; - unsigned long st_uid; - unsigned long st_gid; - unsigned long long st_rdev; - unsigned int __pad3; - long long st_size; - unsigned long st_blksize; - unsigned char __pad4[4]; - unsigned long __pad5; /* future possible st_blocks high bits */ - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */ - unsigned long long st_ino; -}; - -#else /* __s390x__ */ - struct stat { unsigned long st_dev; unsigned long st_ino; @@ -97,8 +29,6 @@ struct stat { unsigned long __unused[3]; }; -#endif /* __s390x__ */ - #define STAT_HAVE_NSEC 1 #endif diff --git a/arch/s390/include/uapi/asm/types.h b/arch/s390/include/uapi/asm/types.h index 84457dbb26b4..4ab468c5032e 100644 --- a/arch/s390/include/uapi/asm/types.h +++ b/arch/s390/include/uapi/asm/types.h @@ -10,7 +10,7 @@ #include <asm-generic/int-ll64.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ typedef unsigned long addr_t; typedef __signed__ long saddr_t; @@ -25,6 +25,6 @@ typedef struct { }; } __attribute__((packed, aligned(4))) __vector128; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _UAPI_S390_TYPES_H */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 01b5fe8b9db6..b0c5afe19db2 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -8,10 +8,6 @@ #ifndef _UAPI_ASM_S390_UNISTD_H_ #define _UAPI_ASM_S390_UNISTD_H_ -#ifdef __s390x__ #include <asm/unistd_64.h> -#else -#include <asm/unistd_32.h> -#endif #endif /* _UAPI_ASM_S390_UNISTD_H_ */ diff --git a/arch/s390/include/uapi/asm/uvdevice.h b/arch/s390/include/uapi/asm/uvdevice.h index b9c2f14a6af3..4947f26ad9fb 100644 --- a/arch/s390/include/uapi/asm/uvdevice.h +++ b/arch/s390/include/uapi/asm/uvdevice.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* - * Copyright IBM Corp. 2022 + * Copyright IBM Corp. 2022, 2024 * Author(s): Steffen Eiden <seiden@linux.ibm.com> */ #ifndef __S390_ASM_UVDEVICE_H @@ -52,7 +52,7 @@ struct uvio_uvdev_info { __u64 supp_uvio_cmds; /* * If bit `n` is set, the Ultravisor(UV) supports the UV-call - * corresponding to the IOCTL with nr `n` in the calling contextx (host + * corresponding to the IOCTL with nr `n` in the calling context (host * or guest). The value is only valid if the corresponding bit in * @supp_uvio_cmds is set as well. */ @@ -71,6 +71,7 @@ struct uvio_uvdev_info { #define UVIO_ATT_ADDITIONAL_MAX_LEN 0x8000 #define UVIO_ADD_SECRET_MAX_LEN 0x100000 #define UVIO_LIST_SECRETS_LEN 0x1000 +#define UVIO_RETR_SECRET_MAX_LEN 0x2000 #define UVIO_DEVICE_NAME "uv" #define UVIO_TYPE_UVC 'u' @@ -81,22 +82,25 @@ enum UVIO_IOCTL_NR { UVIO_IOCTL_ADD_SECRET_NR, UVIO_IOCTL_LIST_SECRETS_NR, UVIO_IOCTL_LOCK_SECRETS_NR, + UVIO_IOCTL_RETR_SECRET_NR, /* must be the last entry */ UVIO_IOCTL_NUM_IOCTLS }; -#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb) -#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR) -#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR) -#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR) -#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR) -#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR) +#define UVIO_IOCTL(nr) _IOWR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb) +#define UVIO_IOCTL_UVDEV_INFO UVIO_IOCTL(UVIO_IOCTL_UVDEV_INFO_NR) +#define UVIO_IOCTL_ATT UVIO_IOCTL(UVIO_IOCTL_ATT_NR) +#define UVIO_IOCTL_ADD_SECRET UVIO_IOCTL(UVIO_IOCTL_ADD_SECRET_NR) +#define UVIO_IOCTL_LIST_SECRETS UVIO_IOCTL(UVIO_IOCTL_LIST_SECRETS_NR) +#define UVIO_IOCTL_LOCK_SECRETS UVIO_IOCTL(UVIO_IOCTL_LOCK_SECRETS_NR) +#define UVIO_IOCTL_RETR_SECRET UVIO_IOCTL(UVIO_IOCTL_RETR_SECRET_NR) -#define UVIO_SUPP_CALL(nr) (1ULL << (nr)) -#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR) -#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR) -#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR) -#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR) -#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR) +#define UVIO_SUPP_CALL(nr) (1ULL << (nr)) +#define UVIO_SUPP_UDEV_INFO UVIO_SUPP_CALL(UVIO_IOCTL_UDEV_INFO_NR) +#define UVIO_SUPP_ATT UVIO_SUPP_CALL(UVIO_IOCTL_ATT_NR) +#define UVIO_SUPP_ADD_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_ADD_SECRET_NR) +#define UVIO_SUPP_LIST_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LIST_SECRETS_NR) +#define UVIO_SUPP_LOCK_SECRETS UVIO_SUPP_CALL(UVIO_IOCTL_LOCK_SECRETS_NR) +#define UVIO_SUPP_RETR_SECRET UVIO_SUPP_CALL(UVIO_IOCTL_RETR_SECRET_NR) #endif /* __S390_ASM_UVDEVICE_H */ |
