summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 11:43:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 11:43:31 -0700
commit10de638d8ea57ebab4231ea077bed01d9bade775 (patch)
treef9de1b131e1a94c1cbe051e55cda1ba9b9418ee9 /arch/s390/include
parentd55571c0084465f1f7e1e29f22bd910d366a6e1d (diff)
parent2a405f6bb3a5b2baaa74dfc5aaa0e1b99145bd1b (diff)
Merge tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik: - Add support for stackleak feature. Also allow specifying architecture-specific stackleak poison function to enable faster implementation. On s390, the mvc-based implementation helps decrease typical overhead from a factor of 3 to just 25% - Convert all assembler files to use SYM* style macros, deprecating the ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS - Improve KASLR to also randomize module and special amode31 code base load addresses - Rework decompressor memory tracking to support memory holes and improve error handling - Add support for protected virtualization AP binding - Add support for set_direct_map() calls - Implement set_memory_rox() and noexec module_alloc() - Remove obsolete overriding of mem*() functions for KASAN - Rework kexec/kdump to avoid using nodat_stack to call purgatory - Convert the rest of the s390 code to use flexible-array member instead of a zero-length array - Clean up uaccess inline asm - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable DEBUG_FORCE_FUNCTION_ALIGN_64B - Resolve last_break in userspace fault reports - Simplify one-level sysctl registration - Clean up branch prediction handling - Rework CPU counter facility to retrieve available counter sets just once - Other various small fixes and improvements all over the code * tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (118 commits) s390/stackleak: provide fast __stackleak_poison() implementation stackleak: allow to specify arch specific stackleak poison function s390: select ARCH_USE_SYM_ANNOTATIONS s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc() s390: wire up memfd_secret system call s390/mm: enable ARCH_HAS_SET_DIRECT_MAP s390/mm: use BIT macro to generate SET_MEMORY bit masks s390/relocate_kernel: adjust indentation s390/relocate_kernel: use SYM* macros instead of ENTRY(), etc. s390/entry: use SYM* macros instead of ENTRY(), etc. s390/purgatory: use SYM* macros instead of ENTRY(), etc. s390/kprobes: use SYM* macros instead of ENTRY(), etc. s390/reipl: use SYM* macros instead of ENTRY(), etc. s390/head64: use SYM* macros instead of ENTRY(), etc. s390/earlypgm: use SYM* macros instead of ENTRY(), etc. s390/mcount: use SYM* macros instead of ENTRY(), etc. s390/crc32le: use SYM* macros instead of ENTRY(), etc. s390/crc32be: use SYM* macros instead of ENTRY(), etc. s390/crypto,chacha: use SYM* macros instead of ENTRY(), etc. s390/amode31: use SYM* macros instead of ENTRY(), etc. ...
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/ap.h152
-rw-r--r--arch/s390/include/asm/checksum.h10
-rw-r--r--arch/s390/include/asm/diag.h2
-rw-r--r--arch/s390/include/asm/entry-common.h5
-rw-r--r--arch/s390/include/asm/fcx.h2
-rw-r--r--arch/s390/include/asm/kasan.h31
-rw-r--r--arch/s390/include/asm/linkage.h2
-rw-r--r--arch/s390/include/asm/mem_detect.h117
-rw-r--r--arch/s390/include/asm/nospec-insn.h3
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/physmem_info.h171
-rw-r--r--arch/s390/include/asm/processor.h46
-rw-r--r--arch/s390/include/asm/set_memory.h36
-rw-r--r--arch/s390/include/asm/setup.h20
-rw-r--r--arch/s390/include/asm/stacktrace.h52
-rw-r--r--arch/s390/include/asm/string.h15
-rw-r--r--arch/s390/include/asm/thread_info.h10
18 files changed, 441 insertions, 237 deletions
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index c699f251a464..d5d967166bac 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -43,10 +43,11 @@ struct ap_queue_status {
unsigned int queue_empty : 1;
unsigned int replies_waiting : 1;
unsigned int queue_full : 1;
- unsigned int _pad1 : 4;
+ unsigned int : 3;
+ unsigned int async : 1;
unsigned int irq_enabled : 1;
unsigned int response_code : 8;
- unsigned int _pad2 : 16;
+ unsigned int : 16;
};
/*
@@ -86,6 +87,42 @@ static inline bool ap_instructions_available(void)
return reg1 != 0;
}
+/* TAPQ register GR2 response struct */
+struct ap_tapq_gr2 {
+ union {
+ unsigned long value;
+ struct {
+ unsigned int fac : 32; /* facility bits */
+ unsigned int apinfo : 32; /* ap type, ... */
+ };
+ struct {
+ unsigned int s : 1; /* APSC */
+ unsigned int m : 1; /* AP4KM */
+ unsigned int c : 1; /* AP4KC */
+ unsigned int mode : 3;
+ unsigned int n : 1; /* APXA */
+ unsigned int : 1;
+ unsigned int class : 8;
+ unsigned int bs : 2; /* SE bind/assoc */
+ unsigned int : 14;
+ unsigned int at : 8; /* ap type */
+ unsigned int nd : 8; /* nr of domains */
+ unsigned int : 4;
+ unsigned int ml : 4; /* apxl ml */
+ unsigned int : 4;
+ unsigned int qd : 4; /* queue depth */
+ };
+ };
+};
+
+/*
+ * Convenience defines to be used with the bs field from struct ap_tapq_gr2
+ */
+#define AP_BS_Q_USABLE 0
+#define AP_BS_Q_USABLE_NO_SECURE_KEY 1
+#define AP_BS_Q_AVAIL_FOR_BINDING 2
+#define AP_BS_Q_UNUSABLE 3
+
/**
* ap_tapq(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -93,7 +130,7 @@ static inline bool ap_instructions_available(void)
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, struct ap_tapq_gr2 *info)
{
union ap_queue_status_reg reg1;
unsigned long reg2;
@@ -108,7 +145,7 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
: [qid] "d" (qid)
: "cc", "0", "1", "2");
if (info)
- *info = reg2;
+ info->value = reg2;
return reg1.status;
}
@@ -116,13 +153,12 @@ static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @tbit: Test facilities bit
- * @info: Pointer to queue descriptor
+ * @info: Ptr to tapq gr2 struct
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
- int tbit,
- unsigned long *info)
+static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit,
+ struct ap_tapq_gr2 *info)
{
if (tbit)
qid |= 1UL << 23; /* set T bit*/
@@ -132,14 +168,18 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
/**
* ap_pqap_rapq(): Reset adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
union ap_queue_status_reg reg1;
+ if (fbit)
+ reg0 |= 1UL << 22;
+
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */
@@ -153,14 +193,18 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
/**
* ap_pqap_zapq(): Reset and zeroize adjunct processor queue.
* @qid: The AP queue number
+ * @fbit: if != 0 set F bit
*
* Returns AP queue status structure.
*/
-static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
+static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
{
unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
union ap_queue_status_reg reg1;
+ if (fbit)
+ reg0 |= 1UL << 22;
+
asm volatile(
" lgr 0,%[reg0]\n" /* qid arg into gr0 */
" .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */
@@ -180,15 +224,16 @@ struct ap_config_info {
unsigned int apxa : 1; /* N bit */
unsigned int qact : 1; /* C bit */
unsigned int rc8a : 1; /* R bit */
- unsigned char _reserved1 : 4;
- unsigned char _reserved2[3];
- unsigned char Na; /* max # of APs - 1 */
- unsigned char Nd; /* max # of Domains - 1 */
- unsigned char _reserved3[10];
+ unsigned int : 4;
+ unsigned int apsb : 1; /* B bit */
+ unsigned int : 23;
+ unsigned char na; /* max # of APs - 1 */
+ unsigned char nd; /* max # of Domains - 1 */
+ unsigned char _reserved0[10];
unsigned int apm[8]; /* AP ID mask */
unsigned int aqm[8]; /* AP (usage) queue mask */
unsigned int adm[8]; /* AP (control) domain mask */
- unsigned char _reserved4[16];
+ unsigned char _reserved1[16];
} __aligned(8);
/**
@@ -318,6 +363,59 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
return reg1.status;
}
+/*
+ * ap_bapq(): SE bind AP queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
+{
+ unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0)
+ : "cc", "0", "1");
+
+ return reg1.status;
+}
+
+/*
+ * ap_aapq(): SE associate AP queue.
+ * @qid: The AP queue number
+ * @sec_idx: The secret index
+ *
+ * Returns AP queue status structure.
+ *
+ * Invoking this function in a non-SE environment
+ * may case a specification exception.
+ */
+static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
+{
+ unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */
+ unsigned long reg2 = sec_idx;
+ union ap_queue_status_reg reg1;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
+ " lgr 2,%[reg2]\n" /* secret index into gr2 */
+ " .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */
+ " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
+ : [reg1] "=&d" (reg1.value)
+ : [reg0] "d" (reg0), [reg2] "d" (reg2)
+ : "cc", "0", "1", "2");
+
+ return reg1.status;
+}
+
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
@@ -359,10 +457,11 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* ap_dqap(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- * @reslength: Resitual length on return
- * @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content
+ * @msg: Pointer to message buffer
+ * @msglen: Message buffer size
+ * @length: Pointer to length of actually written bytes
+ * @reslength: Residual length on return
+ * @resgr0: input: gr0 value (only used if != 0), output: residual gr0 content
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
@@ -386,8 +485,9 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* *resgr0 is to be used instead of qid to further process this entry.
*/
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
- unsigned long long *psmid,
- void *msg, size_t length,
+ unsigned long *psmid,
+ void *msg, size_t msglen,
+ size_t *length,
size_t *reslength,
unsigned long *resgr0)
{
@@ -399,7 +499,7 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
rp1.even = 0UL;
rp1.odd = 0UL;
rp2.even = (unsigned long)msg;
- rp2.odd = (unsigned long)length;
+ rp2.odd = (unsigned long)msglen;
asm volatile(
" lgr 0,%[reg0]\n" /* qid param into gr0 */
@@ -429,11 +529,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
if (resgr0)
*resgr0 = reg0;
} else {
- *psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd;
+ *psmid = (rp1.even << 32) + rp1.odd;
if (resgr0)
*resgr0 = 0;
}
+ /* update *length with the nr of bytes stored into the msg buffer */
+ if (length)
+ *length = msglen - rp2.odd;
+
return reg1.status;
}
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index d977a3a2f619..69837eec2ff5 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -12,13 +12,7 @@
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
-#ifdef CONFIG_GENERIC_CSUM
-
-#include <asm-generic/checksum.h>
-
-#else /* CONFIG_GENERIC_CSUM */
-
-#include <linux/uaccess.h>
+#include <linux/kasan-checks.h>
#include <linux/in6.h>
/*
@@ -40,6 +34,7 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
.odd = (unsigned long) len,
};
+ kasan_check_read(buff, len);
asm volatile(
"0: cksm %[sum],%[rp]\n"
" jo 0b\n"
@@ -135,5 +130,4 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold((__force __wsum)(sum >> 32));
}
-#endif /* CONFIG_GENERIC_CSUM */
#endif /* _S390_CHECKSUM_H */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 674a939f16ee..902e0330dd91 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -90,7 +90,7 @@ struct diag8c {
u8 num_partitions;
u16 width;
u16 height;
- u8 data[0];
+ u8 data[];
} __packed __aligned(4);
extern int diag8c(struct diag8c *out, struct ccw_dev_id *devno);
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 000de2b1e67a..fdd319a622b0 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -60,9 +60,4 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
-static inline bool on_thread_stack(void)
-{
- return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
-}
-
#endif
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index b8a028a36173..29784b4b44f6 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -286,7 +286,7 @@ struct tccb_tcat {
*/
struct tccb {
struct tccb_tcah tcah;
- u8 tca[0];
+ u8 tca[];
} __attribute__ ((packed, aligned(8)));
struct tcw *tcw_get_intrg(struct tcw *tcw);
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index e5cfc81d5b61..0cffead0f2f2 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#include <asm/pgtable.h>
+#include <linux/const.h>
#ifdef CONFIG_KASAN
@@ -13,35 +13,6 @@
#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
-extern void kasan_early_init(void);
-
-/*
- * Estimate kasan memory requirements, which it will reserve
- * at the very end of available physical memory. To estimate
- * that, we take into account that kasan would require
- * 1/8 of available physical memory (for shadow memory) +
- * creating page tables for the shadow memory region.
- * To keep page tables estimates simple take the double of
- * combined ptes size.
- *
- * physmem parameter has to be already adjusted if not entire physical memory
- * would be used (e.g. due to effect of "mem=" option).
- */
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem)
-{
- unsigned long kasan_needs;
- unsigned long pages;
- /* for shadow memory */
- kasan_needs = round_up(physmem / 8, PAGE_SIZE);
- /* for paging structures */
- pages = DIV_ROUND_UP(kasan_needs, PAGE_SIZE);
- kasan_needs += DIV_ROUND_UP(pages, _PAGE_ENTRIES) * _PAGE_TABLE_SIZE * 2;
-
- return kasan_needs;
-}
-#else
-static inline void kasan_early_init(void) { }
-static inline unsigned long kasan_estimate_memory_needs(unsigned long physmem) { return 0; }
#endif
#endif
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index c76777b15fec..df3fb7d8227b 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -4,7 +4,7 @@
#include <linux/stringify.h>
-#define __ALIGN .align 16, 0x07
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
#endif
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
deleted file mode 100644
index f9e7354036d2..000000000000
--- a/arch/s390/include/asm/mem_detect.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_MEM_DETECT_H
-#define _ASM_S390_MEM_DETECT_H
-
-#include <linux/types.h>
-
-enum mem_info_source {
- MEM_DETECT_NONE = 0,
- MEM_DETECT_SCLP_STOR_INFO,
- MEM_DETECT_DIAG260,
- MEM_DETECT_SCLP_READ_INFO,
- MEM_DETECT_BIN_SEARCH
-};
-
-struct mem_detect_block {
- u64 start;
- u64 end;
-};
-
-/*
- * Storage element id is defined as 1 byte (up to 256 storage elements).
- * In practise only storage element id 0 and 1 are used).
- * According to architecture one storage element could have as much as
- * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
- * If more mem_detect_blocks are required, a block of memory from already
- * known mem_detect_block is taken (entries_extended points to it).
- */
-#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
-
-struct mem_detect_info {
- u32 count;
- u8 info_source;
- unsigned long usable;
- struct mem_detect_block entries[MEM_INLINED_ENTRIES];
- struct mem_detect_block *entries_extended;
-};
-extern struct mem_detect_info mem_detect;
-
-void add_mem_detect_block(u64 start, u64 end);
-
-static inline int __get_mem_detect_block(u32 n, unsigned long *start,
- unsigned long *end, bool respect_usable_limit)
-{
- if (n >= mem_detect.count) {
- *start = 0;
- *end = 0;
- return -1;
- }
-
- if (n < MEM_INLINED_ENTRIES) {
- *start = (unsigned long)mem_detect.entries[n].start;
- *end = (unsigned long)mem_detect.entries[n].end;
- } else {
- *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
- *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
- }
-
- if (respect_usable_limit && mem_detect.usable) {
- if (*start >= mem_detect.usable)
- return -1;
- if (*end > mem_detect.usable)
- *end = mem_detect.usable;
- }
- return 0;
-}
-
-/**
- * for_each_mem_detect_usable_block - early online memory range iterator
- * @i: an integer used as loop variable
- * @p_start: ptr to unsigned long for start address of the range
- * @p_end: ptr to unsigned long for end address of the range
- *
- * Walks over detected online memory ranges below usable limit.
- */
-#define for_each_mem_detect_usable_block(i, p_start, p_end) \
- for (i = 0; !__get_mem_detect_block(i, p_start, p_end, true); i++)
-
-/* Walks over all detected online memory ranges disregarding usable limit. */
-#define for_each_mem_detect_block(i, p_start, p_end) \
- for (i = 0; !__get_mem_detect_block(i, p_start, p_end, false); i++)
-
-static inline unsigned long get_mem_detect_usable_total(void)
-{
- unsigned long start, end, total = 0;
- int i;
-
- for_each_mem_detect_usable_block(i, &start, &end)
- total += end - start;
-
- return total;
-}
-
-static inline void get_mem_detect_reserved(unsigned long *start,
- unsigned long *size)
-{
- *start = (unsigned long)mem_detect.entries_extended;
- if (mem_detect.count > MEM_INLINED_ENTRIES)
- *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
- else
- *size = 0;
-}
-
-static inline unsigned long get_mem_detect_end(void)
-{
- unsigned long start;
- unsigned long end;
-
- if (mem_detect.usable)
- return mem_detect.usable;
- if (mem_detect.count) {
- __get_mem_detect_block(mem_detect.count - 1, &start, &end, false);
- return end;
- }
- return 0;
-}
-
-#endif
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 7e9e99523e95..7a946c42ad13 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_NOSPEC_ASM_H
#define _ASM_S390_NOSPEC_ASM_H
+#include <linux/linkage.h>
#include <asm/dwarf.h>
#ifdef __ASSEMBLY__
@@ -16,7 +17,7 @@
.macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN
.pushsection .text,"ax",@progbits
- .align 16,0x07
+ __ALIGN
#else
.pushsection .text.\name,"axG",@progbits,\name,comdat
#endif
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index b9da71632827..9917e2717b2b 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -60,7 +60,6 @@ struct perf_sf_sde_regs {
#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
-#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define REG_NONE 0
@@ -71,7 +70,6 @@ struct perf_sf_sde_regs {
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
-#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c1f6b46ec555..6822a11c2c8a 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -34,7 +34,7 @@ enum {
PG_DIRECT_MAP_MAX
};
-extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
static inline void update_page_count(int level, long count)
{
diff --git a/arch/s390/include/asm/physmem_info.h b/arch/s390/include/asm/physmem_info.h
new file mode 100644
index 000000000000..8e9c582592b3
--- /dev/null
+++ b/arch/s390/include/asm/physmem_info.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+
+enum physmem_info_source {
+ MEM_DETECT_NONE = 0,
+ MEM_DETECT_SCLP_STOR_INFO,
+ MEM_DETECT_DIAG260,
+ MEM_DETECT_SCLP_READ_INFO,
+ MEM_DETECT_BIN_SEARCH
+};
+
+struct physmem_range {
+ u64 start;
+ u64 end;
+};
+
+enum reserved_range_type {
+ RR_DECOMPRESSOR,
+ RR_INITRD,
+ RR_VMLINUX,
+ RR_AMODE31,
+ RR_IPLREPORT,
+ RR_CERT_COMP_LIST,
+ RR_MEM_DETECT_EXTENDED,
+ RR_VMEM,
+ RR_MAX
+};
+
+struct reserved_range {
+ unsigned long start;
+ unsigned long end;
+ struct reserved_range *chain;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
+ * If more physmem_ranges are required, a block of memory from already
+ * known physmem_range is taken (online_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct physmem_info {
+ u32 range_count;
+ u8 info_source;
+ unsigned long usable;
+ struct reserved_range reserved[RR_MAX];
+ struct physmem_range online[MEM_INLINED_ENTRIES];
+ struct physmem_range *online_extended;
+};
+
+extern struct physmem_info physmem_info;
+
+void add_physmem_online_range(u64 start, u64 end);
+
+static inline int __get_physmem_range(u32 n, unsigned long *start,
+ unsigned long *end, bool respect_usable_limit)
+{
+ if (n >= physmem_info.range_count) {
+ *start = 0;
+ *end = 0;
+ return -1;
+ }
+
+ if (n < MEM_INLINED_ENTRIES) {
+ *start = (unsigned long)physmem_info.online[n].start;
+ *end = (unsigned long)physmem_info.online[n].end;
+ } else {
+ *start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
+ *end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
+ }
+
+ if (respect_usable_limit && physmem_info.usable) {
+ if (*start >= physmem_info.usable)
+ return -1;
+ if (*end > physmem_info.usable)
+ *end = physmem_info.usable;
+ }
+ return 0;
+}
+
+/**
+ * for_each_physmem_usable_range - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges below usable limit.
+ */
+#define for_each_physmem_usable_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
+
+/* Walks over all detected online memory ranges disregarding usable limit. */
+#define for_each_physmem_online_range(i, p_start, p_end) \
+ for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
+
+static inline const char *get_physmem_info_source(void)
+{
+ switch (physmem_info.info_source) {
+ case MEM_DETECT_SCLP_STOR_INFO:
+ return "sclp storage info";
+ case MEM_DETECT_DIAG260:
+ return "diag260";
+ case MEM_DETECT_SCLP_READ_INFO:
+ return "sclp read info";
+ case MEM_DETECT_BIN_SEARCH:
+ return "binary search";
+ }
+ return "none";
+}
+
+#define RR_TYPE_NAME(t) case RR_ ## t: return #t
+static inline const char *get_rr_type_name(enum reserved_range_type t)
+{
+ switch (t) {
+ RR_TYPE_NAME(DECOMPRESSOR);
+ RR_TYPE_NAME(INITRD);
+ RR_TYPE_NAME(VMLINUX);
+ RR_TYPE_NAME(AMODE31);
+ RR_TYPE_NAME(IPLREPORT);
+ RR_TYPE_NAME(CERT_COMP_LIST);
+ RR_TYPE_NAME(MEM_DETECT_EXTENDED);
+ RR_TYPE_NAME(VMEM);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
+ for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
+ range && range->end; range = range->chain, \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
+ struct reserved_range *range)
+{
+ if (!range) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ if (range->chain)
+ return range->chain;
+ while (++*t < RR_MAX) {
+ range = &physmem_info.reserved[*t];
+ if (range->end)
+ return range;
+ }
+ return NULL;
+}
+
+#define for_each_physmem_reserved_range(t, range, p_start, p_end) \
+ for (t = 0, range = __physmem_reserved_next(&t, NULL), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0; \
+ range; range = __physmem_reserved_next(&t, range), \
+ *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
+
+static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
+ unsigned long *addr, unsigned long *size)
+{
+ *addr = physmem_info.reserved[type].start;
+ *size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
+ return *size;
+}
+
+#endif
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e98d9650764b..dc17896a001a 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -99,7 +99,6 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
-extern void __bpon(void);
unsigned long vdso_size(void);
/*
@@ -119,6 +118,41 @@ unsigned long vdso_size(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#define __stackleak_poison __stackleak_poison
+static __always_inline void __stackleak_poison(unsigned long erase_low,
+ unsigned long erase_high,
+ unsigned long poison)
+{
+ unsigned long tmp, count;
+
+ count = erase_high - erase_low;
+ if (!count)
+ return;
+ asm volatile(
+ " cghi %[count],8\n"
+ " je 2f\n"
+ " aghi %[count],-(8+1)\n"
+ " srlg %[tmp],%[count],8\n"
+ " ltgr %[tmp],%[tmp]\n"
+ " jz 1f\n"
+ "0: stg %[poison],0(%[addr])\n"
+ " mvc 8(256-8,%[addr]),0(%[addr])\n"
+ " la %[addr],256(%[addr])\n"
+ " brctg %[tmp],0b\n"
+ "1: stg %[poison],0(%[addr])\n"
+ " larl %[tmp],3f\n"
+ " ex %[count],0(%[tmp])\n"
+ " j 4f\n"
+ "2: stg %[poison],0(%[addr])\n"
+ " j 4f\n"
+ "3: mvc 8(1,%[addr]),0(%[addr])\n"
+ "4:\n"
+ : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
+ : [poison] "d" (poison)
+ : "memory", "cc"
+ );
+}
+
/*
* Thread structure
*/
@@ -227,6 +261,13 @@ static __always_inline unsigned long __current_stack_pointer(void)
return sp;
}
+static __always_inline bool on_thread_stack(void)
+{
+ unsigned long ksp = S390_lowcore.kernel_stack;
+
+ return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
static __always_inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -329,9 +370,6 @@ static __always_inline void __noreturn disabled_wait(void)
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
-extern int s390_isolate_bp(void);
-extern int s390_isolate_bp_guest(void);
-
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return arch_irqs_disabled_flags(regs->psw.mask);
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index 950d87bd997a..7a3eefd7a242 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -6,11 +6,23 @@
extern struct mutex cpa_mutex;
-#define SET_MEMORY_RO 1UL
-#define SET_MEMORY_RW 2UL
-#define SET_MEMORY_NX 4UL
-#define SET_MEMORY_X 8UL
-#define SET_MEMORY_4K 16UL
+enum {
+ _SET_MEMORY_RO_BIT,
+ _SET_MEMORY_RW_BIT,
+ _SET_MEMORY_NX_BIT,
+ _SET_MEMORY_X_BIT,
+ _SET_MEMORY_4K_BIT,
+ _SET_MEMORY_INV_BIT,
+ _SET_MEMORY_DEF_BIT,
+};
+
+#define SET_MEMORY_RO BIT(_SET_MEMORY_RO_BIT)
+#define SET_MEMORY_RW BIT(_SET_MEMORY_RW_BIT)
+#define SET_MEMORY_NX BIT(_SET_MEMORY_NX_BIT)
+#define SET_MEMORY_X BIT(_SET_MEMORY_X_BIT)
+#define SET_MEMORY_4K BIT(_SET_MEMORY_4K_BIT)
+#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
+#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
int __set_memory(unsigned long addr, int numpages, unsigned long flags);
@@ -34,9 +46,23 @@ static inline int set_memory_x(unsigned long addr, int numpages)
return __set_memory(addr, numpages, SET_MEMORY_X);
}
+#define set_memory_rox set_memory_rox
+static inline int set_memory_rox(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RO | SET_MEMORY_X);
+}
+
+static inline int set_memory_rwnx(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RW | SET_MEMORY_NX);
+}
+
static inline int set_memory_4k(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_4K);
}
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 3a1f8825bc7d..f191255c60db 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -74,10 +74,6 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
-extern unsigned long pgalloc_pos;
-extern unsigned long pgalloc_end;
-extern unsigned long pgalloc_low;
-extern unsigned long __amode31_base;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
@@ -150,13 +146,13 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset;
}
-extern int is_full_image;
-
-struct initrd_data {
- unsigned long start;
- unsigned long size;
-};
-extern struct initrd_data initrd_data;
+extern int __kaslr_enabled;
+static inline int kaslr_enabled(void)
+{
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return __kaslr_enabled;
+ return 0;
+}
struct oldmem_data {
unsigned long start;
@@ -164,7 +160,7 @@ struct oldmem_data {
};
extern struct oldmem_data oldmem_data;
-static inline u32 gen_lpswe(unsigned long addr)
+static __always_inline u32 gen_lpswe(unsigned long addr)
{
BUILD_BUG_ON(addr > 0xfff);
return 0xb2b20000 | addr;
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 1802be5abb5d..78f7b729b65f 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -189,17 +189,53 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
(rettype)r2; \
})
-#define call_on_stack_noreturn(fn, stack) \
+/*
+ * Use call_nodat() to call a function with DAT disabled.
+ * Proper sign and zero extension of function arguments is done.
+ * Usage:
+ *
+ * rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...)
+ *
+ * - nr specifies the number of function arguments of fn.
+ * - fn is the function to be called, where fn is a physical address.
+ * - rettype is the return type of fn.
+ * - t1, a1, ... are pairs, where t1 must match the type of the first
+ * argument of fn, t2 the second, etc. a1 is the corresponding
+ * first function argument (not name), etc.
+ *
+ * fn() is called with standard C function call ABI, with the exception
+ * that no useful stackframe or stackpointer is passed via register 15.
+ * Therefore the called function must not use r15 to access the stack.
+ */
+#define call_nodat(nr, rettype, fn, ...) \
({ \
- void (*__fn)(void) = fn; \
+ rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \
+ /* aligned since psw_leave must not cross page boundary */ \
+ psw_t __aligned(16) psw_leave; \
+ psw_t psw_enter; \
+ CALL_LARGS_##nr(__VA_ARGS__); \
+ CALL_REGS_##nr; \
\
+ CALL_TYPECHECK_##nr(__VA_ARGS__); \
+ psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \
+ psw_enter.addr = (unsigned long)__fn; \
asm volatile( \
- " la 15,0(%[_stack])\n" \
- " xc %[_bc](8,15),%[_bc](15)\n" \
- " brasl 14,%[_fn]\n" \
- ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
- [_stack] "a" (stack), [_fn] "X" (__fn)); \
- BUG(); \
+ " epsw 0,1\n" \
+ " risbg 1,0,0,31,32\n" \
+ " larl 7,1f\n" \
+ " stg 1,%[psw_leave]\n" \
+ " stg 7,8+%[psw_leave]\n" \
+ " la 7,%[psw_leave]\n" \
+ " lra 7,0(7)\n" \
+ " larl 1,0f\n" \
+ " lra 14,0(1)\n" \
+ " lpswe %[psw_enter]\n" \
+ "0: lpswe 0(7)\n" \
+ "1:\n" \
+ : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
+ : [psw_enter] "Q" (psw_enter) \
+ : "7", CALL_CLOBBER_##nr); \
+ (rettype)r2; \
})
#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 3fae93ddb322..351685de53d2 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -55,18 +55,6 @@ char *strstr(const char *s1, const char *s2);
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
-extern void *__memcpy(void *dest, const void *src, size_t n);
-extern void *__memset(void *s, int c, size_t n);
-extern void *__memmove(void *dest, const void *src, size_t n);
-
-/*
- * For files that are not instrumented (e.g. mm/slub.c) we
- * should use not instrumented version of mem* functions.
- */
-
-#define memcpy(dst, src, len) __memcpy(dst, src, len)
-#define memmove(dst, src, len) __memmove(dst, src, len)
-#define memset(s, c, n) __memset(s, c, n)
#define strlen(s) __strlen(s)
#define __no_sanitize_prefix_strfunc(x) __##x
@@ -79,6 +67,9 @@ extern void *__memmove(void *dest, const void *src, size_t n);
#define __no_sanitize_prefix_strfunc(x) x
#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
+void *__memcpy(void *dest, const void *src, size_t n);
+void *__memset(void *s, int c, size_t n);
+void *__memmove(void *dest, const void *src, size_t n);
void *__memset16(uint16_t *s, uint16_t v, size_t count);
void *__memset32(uint32_t *s, uint32_t v, size_t count);
void *__memset64(uint64_t *s, uint64_t v, size_t count);
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b2ffcb4fe000..c7c97921ed8d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,6 +9,9 @@
#define _ASM_THREAD_INFO_H
#include <linux/bits.h>
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
/*
* General size of kernel stacks
@@ -21,13 +24,12 @@
#define BOOT_STACK_SIZE (PAGE_SIZE << 2)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/page.h>
-#define STACK_INIT_OFFSET \
- (THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
-
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
@@ -70,7 +72,6 @@ void arch_setup_new_exec(void);
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
-#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
@@ -94,7 +95,6 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
-#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)