summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig19
-rw-r--r--arch/s390/boot/boot.h8
-rw-r--r--arch/s390/boot/decompressor.c4
-rw-r--r--arch/s390/boot/physmem_info.c4
-rw-r--r--arch/s390/boot/startup.c13
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/defconfig14
-rw-r--r--arch/s390/include/asm/bitops.h88
-rw-r--r--arch/s390/include/asm/hugetlb.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h30
-rw-r--r--arch/s390/include/asm/thread_info.h50
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/debug.c12
-rw-r--r--arch/s390/kernel/diag/diag324.c4
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/hiperdispatch.c2
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/topology.c20
-rw-r--r--arch/s390/kernel/uv.c12
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/mmap.c10
-rw-r--r--arch/s390/mm/pgalloc.c25
-rw-r--r--arch/s390/net/Makefile2
-rw-r--r--arch/s390/net/bpf_jit_comp.c148
-rw-r--r--arch/s390/net/bpf_timed_may_goto.S45
27 files changed, 288 insertions, 258 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index bf680c26a33c..c4145672ca34 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -49,6 +49,13 @@ config KASAN_SHADOW_OFFSET
depends on KASAN
default 0x1C000000000000
+config CC_HAS_BUILTIN_FFS
+ def_bool !(CC_IS_GCC && GCC_VERSION < 160000)
+ help
+ GCC versions before 16.0.0 generate library calls to ffs()
+ for __builtin_ffs() even when __has_builtin(__builtin_ffs)
+ is true.
+
config CC_ASM_FLAG_OUTPUT_BROKEN
def_bool CC_IS_GCC && GCC_VERSION < 140200
help
@@ -167,8 +174,6 @@ config S390
select GENERIC_GETTIMEOFDAY
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
- select GENERIC_VDSO_DATA_STORE
- select GENERIC_VDSO_TIME_NS
select GENERIC_IOREMAP if PCI
select HAVE_ALIGNED_STRUCT_PAGE
select HAVE_ARCH_AUDITSYSCALL
@@ -199,6 +204,7 @@ config S390
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES
select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_GENERIC_TIF_BITS
select HAVE_GUP_FAST
select HAVE_FENTRY
select HAVE_FTRACE_GRAPH_FUNC
@@ -547,15 +553,11 @@ config NODES_SHIFT
depends on NUMA
default "1"
-config SCHED_SMT
- def_bool n
-
-config SCHED_MC
- def_bool n
-
config SCHED_TOPOLOGY
def_bool y
prompt "Topology scheduler support"
+ select ARCH_SUPPORTS_SCHED_SMT
+ select ARCH_SUPPORTS_SCHED_MC
select SCHED_SMT
select SCHED_MC
help
@@ -710,7 +712,6 @@ menu "Memory setup"
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
- select SPARSEMEM_VMEMMAP
config ARCH_SPARSEMEM_DEFAULT
def_bool y
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index c0152db285f0..37d5b097ede5 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -10,6 +10,7 @@
#include <linux/printk.h>
#include <asm/physmem_info.h>
+#include <asm/stacktrace.h>
struct vmlinux_info {
unsigned long entry;
@@ -89,6 +90,13 @@ void __noreturn jump_to_kernel(psw_t *psw);
#define boot_info(fmt, ...) boot_printk(KERN_INFO boot_fmt(fmt), ##__VA_ARGS__)
#define boot_debug(fmt, ...) boot_printk(KERN_DEBUG boot_fmt(fmt), ##__VA_ARGS__)
+#define boot_panic(...) do { \
+ boot_emerg(__VA_ARGS__); \
+ print_stacktrace(current_frame_address()); \
+ boot_emerg(" -- System halted\n"); \
+ disabled_wait(); \
+} while (0)
+
extern struct machine_info machine;
extern int boot_console_loglevel;
extern bool boot_ignore_loglevel;
diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
index 03500b9d9fb9..8d1bc25a6bf4 100644
--- a/arch/s390/boot/decompressor.c
+++ b/arch/s390/boot/decompressor.c
@@ -68,9 +68,7 @@ static void decompress_error(char *m)
{
if (bootdebug)
boot_rb_dump();
- boot_emerg("Decompression error: %s\n", m);
- boot_emerg(" -- System halted\n");
- disabled_wait();
+ boot_panic("Decompression error: %s\n", m);
}
unsigned long mem_safe_offset(void)
diff --git a/arch/s390/boot/physmem_info.c b/arch/s390/boot/physmem_info.c
index 45e3d057cfaa..1f2ca5435838 100644
--- a/arch/s390/boot/physmem_info.c
+++ b/arch/s390/boot/physmem_info.c
@@ -228,9 +228,7 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min,
boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n",
total_mem, total_reserved_mem,
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
- print_stacktrace(current_frame_address());
- boot_emerg(" -- System halted\n");
- disabled_wait();
+ boot_panic("Oom\n");
}
static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 93684a775716..3fbd25b9498f 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -44,13 +44,6 @@ u64 __bootdata_preserved(clock_comparator_max) = -1UL;
u64 __bootdata_preserved(stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
-void error(char *x)
-{
- boot_emerg("%s\n", x);
- boot_emerg(" -- System halted\n");
- disabled_wait();
-}
-
static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static void detect_machine_type(void)
@@ -220,10 +213,10 @@ static void rescue_initrd(unsigned long min, unsigned long max)
static void copy_bootdata(void)
{
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
- error(".boot.data section size mismatch");
+ boot_panic(".boot.data section size mismatch\n");
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
- error(".boot.preserved.data section size mismatch");
+ boot_panic(".boot.preserved.data section size mismatch\n");
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
}
@@ -237,7 +230,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
loc = (long)*reloc + phys_offset;
if (loc < min_addr || loc > max_addr)
- error("64-bit relocation outside of kernel!\n");
+ boot_panic("64-bit relocation outside of kernel!\n");
*(u64 *)loc += offset;
}
}
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 5e616bc988ac..b31c1df90257 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -118,10 +118,17 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
+CONFIG_TLS_DEVICE=y
+CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y
+CONFIG_SMC=m
CONFIG_SMC_DIAG=m
-CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -542,6 +549,7 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_SF=y
# CONFIG_NET_VENDOR_META is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
@@ -658,9 +666,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
-CONFIG_BCACHEFS_FS=y
-CONFIG_BCACHEFS_QUOTA=y
-CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@@ -761,7 +766,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 094599cdaf4d..161dad7ef211 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -109,10 +109,17 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
+CONFIG_TLS_DEVICE=y
+CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y
+CONFIG_SMC=m
CONFIG_SMC_DIAG=m
-CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -532,6 +539,7 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_SF=y
# CONFIG_NET_VENDOR_META is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
@@ -645,9 +653,6 @@ CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
-CONFIG_BCACHEFS_FS=m
-CONFIG_BCACHEFS_QUOTA=y
-CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@@ -745,7 +750,6 @@ CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
-CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARIA=m
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index a5ca0a947691..ec945fb60c02 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -122,6 +122,8 @@ static inline bool test_bit_inv(unsigned long nr,
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
+#ifndef CONFIG_CC_HAS_BUILTIN_FFS
+
/**
* __flogr - find leftmost one
* @word - The word to search
@@ -130,11 +132,12 @@ static inline bool test_bit_inv(unsigned long nr,
* where the most significant bit has bit number 0.
* If no bit is set this function returns 64.
*/
-static inline unsigned char __flogr(unsigned long word)
+static __always_inline __attribute_const__ unsigned long __flogr(unsigned long word)
{
- if (__builtin_constant_p(word)) {
- unsigned long bit = 0;
+ unsigned long bit;
+ if (__builtin_constant_p(word)) {
+ bit = 0;
if (!word)
return 64;
if (!(word & 0xffffffff00000000UL)) {
@@ -163,86 +166,49 @@ static inline unsigned char __flogr(unsigned long word)
}
return bit;
} else {
- union register_pair rp;
+ union register_pair rp __uninitialized;
rp.even = word;
- asm volatile(
- " flogr %[rp],%[rp]\n"
- : [rp] "+d" (rp.pair) : : "cc");
- return rp.even;
+ asm("flogr %[rp],%[rp]"
+ : [rp] "+d" (rp.pair) : : "cc");
+ bit = rp.even;
+ /*
+ * The result of the flogr instruction is a value in the range
+ * of 0..64. Let the compiler know that the AND operation can
+ * be optimized away.
+ */
+ __assume(bit <= 64);
+ return bit & 127;
}
}
/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
-}
-
-/**
* ffs - find first bit set
* @word: the word to search
*
* This is defined the same way as the libc and
* compiler builtin ffs routines (man ffs).
*/
-static inline int ffs(int word)
+static __always_inline __flatten __attribute_const__ int ffs(int word)
{
- unsigned long mask = 2 * BITS_PER_LONG - 1;
unsigned int val = (unsigned int)word;
- return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
-}
-
-/**
- * __fls - find last (most-significant) set bit in a long word
- * @word: the word to search
- *
- * Undefined if no set bit exists, so code should check against 0 first.
- */
-static inline unsigned long __fls(unsigned long word)
-{
- return __flogr(word) ^ (BITS_PER_LONG - 1);
+ return BITS_PER_LONG - __flogr(-val & val);
}
-/**
- * fls64 - find last set bit in a 64-bit word
- * @word: the word to search
- *
- * This is defined in a similar way as the libc and compiler builtin
- * ffsll, but returns the position of the most significant set bit.
- *
- * fls64(value) returns 0 if value is 0 or the position of the last
- * set bit if value is nonzero. The last (most significant) bit is
- * at position 64.
- */
-static inline int fls64(unsigned long word)
-{
- unsigned long mask = 2 * BITS_PER_LONG - 1;
+#else /* CONFIG_CC_HAS_BUILTIN_FFS */
- return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
-}
+#include <asm-generic/bitops/builtin-ffs.h>
-/**
- * fls - find last (most-significant) bit set
- * @word: the word to search
- *
- * This is defined the same way as ffs.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-static inline int fls(unsigned int word)
-{
- return fls64(word);
-}
+#endif /* CONFIG_CC_HAS_BUILTIN_FFS */
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-fls.h>
+#include <asm-generic/bitops/fls64.h>
#include <asm/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
-#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 931fcc413598..69131736daaa 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -39,7 +39,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 5345398df653..a16e65072371 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -19,12 +19,16 @@
#define CRST_ALLOC_ORDER 2
-unsigned long *crst_table_alloc(struct mm_struct *);
+unsigned long *crst_table_alloc_noprof(struct mm_struct *);
+#define crst_table_alloc(...) alloc_hooks(crst_table_alloc_noprof(__VA_ARGS__))
void crst_table_free(struct mm_struct *, unsigned long *);
-unsigned long *page_table_alloc(struct mm_struct *);
-struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
+unsigned long *page_table_alloc_noprof(struct mm_struct *);
+#define page_table_alloc(...) alloc_hooks(page_table_alloc_noprof(__VA_ARGS__))
void page_table_free(struct mm_struct *, unsigned long *);
+
+struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm);
+#define page_table_alloc_pgste(...) alloc_hooks(page_table_alloc_pgste_noprof(__VA_ARGS__))
void page_table_free_pgste(struct ptdesc *ptdesc);
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
@@ -48,9 +52,9 @@ static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long
return addr;
}
-static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long address)
{
- unsigned long *table = crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc_noprof(mm);
if (!table)
return NULL;
@@ -59,6 +63,7 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
return (p4d_t *) table;
}
+#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
@@ -69,9 +74,9 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
crst_table_free(mm, (unsigned long *) p4d);
}
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long address)
{
- unsigned long *table = crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc_noprof(mm);
if (!table)
return NULL;
@@ -80,6 +85,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
return (pud_t *) table;
}
+#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
@@ -90,9 +96,9 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
crst_table_free(mm, (unsigned long *) pud);
}
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
+static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long vmaddr)
{
- unsigned long *table = crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc_noprof(mm);
if (!table)
return NULL;
@@ -103,6 +109,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
}
return (pmd_t *) table;
}
+#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
@@ -127,9 +134,9 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
}
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+static inline pgd_t *pgd_alloc_noprof(struct mm_struct *mm)
{
- unsigned long *table = crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc_noprof(mm);
if (!table)
return NULL;
@@ -137,6 +144,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return (pgd_t *) table;
}
+#define pgd_alloc(...) alloc_hooks(pgd_alloc_noprof(__VA_ARGS__))
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f6ed2c8192c8..7878e9bfbf07 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -56,49 +56,31 @@ void arch_setup_new_exec(void);
/*
* thread information flags bit numbers
+ *
+ * Tell the generic TIF infrastructure which special bits s390 supports
*/
-#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */
-#define TIF_SIGPENDING 1 /* signal pending */
-#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */
-#define TIF_UPROBE 4 /* breakpointed or single-stepping */
-#define TIF_PATCH_PENDING 5 /* pending live patching update */
-#define TIF_ASCE_PRIMARY 6 /* primary asce is kernel asce */
-#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
-#define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */
-#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
-#define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */
-#define TIF_31BIT 16 /* 32bit process */
-#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
-#define TIF_SINGLE_STEP 19 /* This task is single stepped */
-#define TIF_BLOCK_STEP 20 /* This task is block stepped */
-#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
-#define TIF_SYSCALL_TRACE 24 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */
-#define TIF_SECCOMP 26 /* secure computing */
-#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */
+#define HAVE_TIF_NEED_RESCHED_LAZY
+#define HAVE_TIF_RESTORE_SIGMASK
+
+#include <asm-generic/thread_info_tif.h>
+
+/* Architecture specific bits */
+#define TIF_ASCE_PRIMARY 16 /* primary asce is kernel asce */
+#define TIF_GUARDED_STORAGE 17 /* load guarded storage control block */
+#define TIF_ISOLATE_BP_GUEST 18 /* Run KVM guests with isolated BP */
+#define TIF_PER_TRAP 19 /* Need to handle PER trap on exit to usermode */
+#define TIF_31BIT 20 /* 32bit process */
+#define TIF_SINGLE_STEP 21 /* This task is single stepped */
+#define TIF_BLOCK_STEP 22 /* This task is block stepped */
+#define TIF_UPROBE_SINGLESTEP 23 /* This task is uprobe single stepped */
-#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
-#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
-#define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY)
-#define _TIF_UPROBE BIT(TIF_UPROBE)
-#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
#define _TIF_ASCE_PRIMARY BIT(TIF_ASCE_PRIMARY)
-#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
#define _TIF_PER_TRAP BIT(TIF_PER_TRAP)
#define _TIF_31BIT BIT(TIF_31BIT)
-#define _TIF_MEMDIE BIT(TIF_MEMDIE)
-#define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
#define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP)
#define _TIF_BLOCK_STEP BIT(TIF_BLOCK_STEP)
#define _TIF_UPROBE_SINGLESTEP BIT(TIF_UPROBE_SINGLESTEP)
-#define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE)
-#define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP BIT(TIF_SECCOMP)
-#define _TIF_SYSCALL_TRACEPOINT BIT(TIF_SYSCALL_TRACEPOINT)
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 95ecad9c7d7d..a8915663e917 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -4,6 +4,7 @@
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
+#define COMPILE_OFFSETS
#include <linux/kbuild.h>
#include <linux/sched.h>
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c62100dc62c8..6a26f202441d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1416,18 +1416,12 @@ static inline char *debug_get_user_string(const char __user *user_buf,
{
char *buffer;
- buffer = kmalloc(user_len + 1, GFP_KERNEL);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(buffer, user_buf, user_len) != 0) {
- kfree(buffer);
- return ERR_PTR(-EFAULT);
- }
+ buffer = memdup_user_nul(user_buf, user_len);
+ if (IS_ERR(buffer))
+ return buffer;
/* got the string, now strip linefeed. */
if (buffer[user_len - 1] == '\n')
buffer[user_len - 1] = 0;
- else
- buffer[user_len] = 0;
return buffer;
}
diff --git a/arch/s390/kernel/diag/diag324.c b/arch/s390/kernel/diag/diag324.c
index 7fa4c0b7eb6c..f0a8b4841fb9 100644
--- a/arch/s390/kernel/diag/diag324.c
+++ b/arch/s390/kernel/diag/diag324.c
@@ -116,7 +116,7 @@ static void pibwork_handler(struct work_struct *work)
mutex_lock(&pibmutex);
timedout = ktime_add_ns(data->expire, PIBWORK_DELAY);
if (ktime_before(ktime_get(), timedout)) {
- mod_delayed_work(system_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
+ mod_delayed_work(system_percpu_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
goto out;
}
vfree(data->pib);
@@ -174,7 +174,7 @@ long diag324_pibbuf(unsigned long arg)
pib_update(data);
data->sequence++;
data->expire = ktime_add_ns(ktime_get(), tod_to_ns(data->pib->intv));
- mod_delayed_work(system_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
+ mod_delayed_work(system_percpu_wq, &pibwork, nsecs_to_jiffies(PIBWORK_DELAY));
first = false;
}
rc = data->rc;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 9adfbdd377dc..544e5403dd91 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <asm/asm-extable.h>
#include <linux/memblock.h>
+#include <linux/kasan.h>
#include <asm/access-regs.h>
#include <asm/asm-offsets.h>
#include <asm/machine.h>
@@ -65,7 +66,7 @@ static void __init kasan_early_init(void)
{
#ifdef CONFIG_KASAN
init_task.kasan_depth = 0;
- pr_info("KernelAddressSanitizer initialized\n");
+ kasan_init_generic();
#endif
}
diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c
index e7b66d046e8d..2507bc3f7757 100644
--- a/arch/s390/kernel/hiperdispatch.c
+++ b/arch/s390/kernel/hiperdispatch.c
@@ -191,7 +191,7 @@ int hd_enable_hiperdispatch(void)
return 0;
if (hd_online_cores <= hd_entitled_cores)
return 0;
- mod_delayed_work(system_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
+ mod_delayed_work(system_dfl_wq, &hd_capacity_work, HD_DELAY_INTERVAL * hd_delay_factor);
hd_update_capacities();
return 1;
}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index f55f09cda6f8..b107dbca4ed7 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -106,7 +106,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
- unsigned long clone_flags = args->flags;
+ u64 clone_flags = args->flags;
unsigned long new_stackp = args->stack;
unsigned long tls = args->tls;
struct fake_frame
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 46569b8e47dd..1594c80e9bc4 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -509,33 +509,27 @@ int topology_cpu_init(struct cpu *cpu)
return rc;
}
-static const struct cpumask *cpu_thread_mask(int cpu)
-{
- return &cpu_topology[cpu].thread_mask;
-}
-
-
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_mask;
}
-static const struct cpumask *cpu_book_mask(int cpu)
+static const struct cpumask *tl_book_mask(struct sched_domain_topology_level *tl, int cpu)
{
return &cpu_topology[cpu].book_mask;
}
-static const struct cpumask *cpu_drawer_mask(int cpu)
+static const struct cpumask *tl_drawer_mask(struct sched_domain_topology_level *tl, int cpu)
{
return &cpu_topology[cpu].drawer_mask;
}
static struct sched_domain_topology_level s390_topology[] = {
- SDTL_INIT(cpu_thread_mask, cpu_smt_flags, SMT),
- SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
- SDTL_INIT(cpu_book_mask, NULL, BOOK),
- SDTL_INIT(cpu_drawer_mask, NULL, DRAWER),
- SDTL_INIT(cpu_cpu_mask, NULL, PKG),
+ SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT),
+ SDTL_INIT(tl_mc_mask, cpu_core_flags, MC),
+ SDTL_INIT(tl_book_mask, NULL, BOOK),
+ SDTL_INIT(tl_drawer_mask, NULL, DRAWER),
+ SDTL_INIT(tl_pkg_mask, NULL, PKG),
{ NULL, },
};
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 47f574cd1728..93b2a01bae40 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -144,7 +144,7 @@ int uv_destroy_folio(struct folio *folio)
folio_get(folio);
rc = uv_destroy(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -193,7 +193,7 @@ int uv_convert_from_secure_folio(struct folio *folio)
folio_get(folio);
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -289,7 +289,7 @@ static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
expected = expected_folio_refs(folio) + 1;
if (!folio_ref_freeze(folio, expected))
return -EBUSY;
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
/*
* If the UVC does not succeed or fail immediately, we don't want to
* loop for long, or we might get stall notifications.
@@ -483,18 +483,18 @@ int arch_make_folio_accessible(struct folio *folio)
* convert_to_secure.
* As secure pages are never large folios, both variants can co-exists.
*/
- if (!test_bit(PG_arch_1, &folio->flags))
+ if (!test_bit(PG_arch_1, &folio->flags.f))
return 0;
rc = uv_pin_shared(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 1c606dfa595d..feecf1a6ddb4 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -209,6 +209,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_end = . ;
+ /* Debugging sections. */
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
/*
* uncompressed image info used by the decompressor
* it should match struct vmlinux_info
@@ -239,11 +244,6 @@ SECTIONS
#endif
} :NONE
- /* Debugging sections. */
- STABS_DEBUG
- DWARF_DEBUG
- ELF_DETAILS
-
/*
* Make sure that the .got.plt is either completely empty or it
* contains only the three reserved double words.
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index c7defe4ed1f6..8ff6bba107e8 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2272,7 +2272,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
start = pmd_val(*pmd) & HPAGE_MASK;
end = start + HPAGE_SIZE;
__storage_key_init_range(start, end);
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
cond_resched();
return 0;
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e88c02c9e642..72e8fa136af5 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -155,7 +155,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
paddr = rste & PMD_MASK;
}
- if (!test_and_set_bit(PG_arch_1, &folio->flags))
+ if (!test_and_set_bit(PG_arch_1, &folio->flags.f))
__storage_key_init_range(paddr, paddr + size);
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 40a526d28184..197c1d9497a7 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -27,7 +27,7 @@ static unsigned long stack_maxrandom_size(void)
return STACK_RND_MASK << PAGE_SHIFT;
}
-static inline int mmap_is_legacy(struct rlimit *rlim_stack)
+static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
@@ -47,7 +47,7 @@ static unsigned long mmap_base_legacy(unsigned long rnd)
}
static inline unsigned long mmap_base(unsigned long rnd,
- struct rlimit *rlim_stack)
+ const struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
@@ -169,7 +169,7 @@ check_asce_limit:
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
@@ -182,10 +182,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
*/
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = mmap_base_legacy(random_factor);
- clear_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_clear(MMF_TOPDOWN, mm);
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
- set_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_set(MMF_TOPDOWN, mm);
}
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index d2f6f1f6d2fc..76d92069799f 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -14,14 +14,18 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-unsigned long *crst_table_alloc(struct mm_struct *mm)
+unsigned long *crst_table_alloc_noprof(struct mm_struct *mm)
{
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
+ gfp_t gfp = GFP_KERNEL_ACCOUNT;
+ struct ptdesc *ptdesc;
unsigned long *table;
+ if (mm == &init_mm)
+ gfp &= ~__GFP_ACCOUNT;
+ ptdesc = pagetable_alloc_noprof(gfp, CRST_ALLOC_ORDER);
if (!ptdesc)
return NULL;
- table = ptdesc_to_virt(ptdesc);
+ table = ptdesc_address(ptdesc);
__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
return table;
}
@@ -112,14 +116,14 @@ err_p4d:
#ifdef CONFIG_PGSTE
-struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
+struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm)
{
struct ptdesc *ptdesc;
u64 *table;
- ptdesc = pagetable_alloc(GFP_KERNEL, 0);
+ ptdesc = pagetable_alloc_noprof(GFP_KERNEL_ACCOUNT, 0);
if (ptdesc) {
- table = (u64 *)ptdesc_to_virt(ptdesc);
+ table = (u64 *)ptdesc_address(ptdesc);
__arch_set_page_dat(table, 1);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
@@ -134,19 +138,22 @@ void page_table_free_pgste(struct ptdesc *ptdesc)
#endif /* CONFIG_PGSTE */
-unsigned long *page_table_alloc(struct mm_struct *mm)
+unsigned long *page_table_alloc_noprof(struct mm_struct *mm)
{
+ gfp_t gfp = GFP_KERNEL_ACCOUNT;
struct ptdesc *ptdesc;
unsigned long *table;
- ptdesc = pagetable_alloc(GFP_KERNEL, 0);
+ if (mm == &init_mm)
+ gfp &= ~__GFP_ACCOUNT;
+ ptdesc = pagetable_alloc_noprof(gfp, 0);
if (!ptdesc)
return NULL;
if (!pagetable_pte_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
- table = ptdesc_to_virt(ptdesc);
+ table = ptdesc_address(ptdesc);
__arch_set_page_dat(table, 1);
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile
index 8cab6deb0403..9275cf63192a 100644
--- a/arch/s390/net/Makefile
+++ b/arch/s390/net/Makefile
@@ -2,5 +2,5 @@
#
# Arch-specific network modules
#
-obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o
obj-$(CONFIG_HAVE_PNETID) += pnet.o
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bb17efe29d65..cf461d76e9da 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -675,20 +675,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp)
} while (0)
/*
- * Call r1 either directly or via __s390_indirect_jump_r1 thunk
- */
-static void call_r1(struct bpf_jit *jit)
-{
- if (nospec_uses_trampoline())
- /* brasl %r14,__s390_indirect_jump_r1 */
- EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14,
- __s390_indirect_jump_r1);
- else
- /* basr %r14,%r1 */
- EMIT2(0x0d00, REG_14, REG_1);
-}
-
-/*
* Function epilogue
*/
static void bpf_jit_epilogue(struct bpf_jit *jit)
@@ -1790,20 +1776,21 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
+
/*
* Copy the tail call counter to where the callee expects it.
- *
- * Note 1: The callee can increment the tail call counter, but
- * we do not load it back, since the x86 JIT does not do this
- * either.
- *
- * Note 2: We assume that the verifier does not let us call the
- * main program, which clears the tail call counter on entry.
*/
- /* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
- _EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
- 0xf000 | (jit->frame_off +
- offsetof(struct prog_frame, tail_call_cnt)));
+
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ /*
+ * mvc tail_call_cnt(4,%r15),
+ * frame_off+tail_call_cnt(%r15)
+ */
+ _EMIT6(0xd203f000 | offsetof(struct prog_frame,
+ tail_call_cnt),
+ 0xf000 | (jit->frame_off +
+ offsetof(struct prog_frame,
+ tail_call_cnt)));
/* Sign-extend the kfunc arguments. */
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
@@ -1819,12 +1806,38 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
}
}
- /* lgrl %w1,func */
- EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
- /* %r1() */
- call_r1(jit);
- /* lgr %b0,%r2: load return value into %b0 */
- EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ if ((void *)func == arch_bpf_timed_may_goto) {
+ /*
+ * arch_bpf_timed_may_goto() has a special ABI: the
+ * parameters are in BPF_REG_AX and BPF_REG_10; the
+ * return value is in BPF_REG_AX; and all GPRs except
+ * REG_W0, REG_W1, and BPF_REG_AX are callee-saved.
+ */
+
+ /* brasl %r0,func */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_0, (void *)func);
+ } else {
+ /* brasl %r14,func */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, (void *)func);
+ /* lgr %b0,%r2: load return value into %b0 */
+ EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ }
+
+ /*
+ * Copy the potentially updated tail call counter back.
+ */
+
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ /*
+ * mvc frame_off+tail_call_cnt(%r15),
+ * tail_call_cnt(4,%r15)
+ */
+ _EMIT6(0xd203f000 | (jit->frame_off +
+ offsetof(struct prog_frame,
+ tail_call_cnt)),
+ 0xf000 | offsetof(struct prog_frame,
+ tail_call_cnt));
+
break;
}
case BPF_JMP | BPF_TAIL_CALL: {
@@ -2517,14 +2530,12 @@ static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
* goto skip;
*/
- /* %r1 = __bpf_prog_enter */
- load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
/* %r2 = p */
load_imm64(jit, REG_2, (u64)p);
/* la %r3,run_ctx_off(%r15) */
EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
- /* %r1() */
- call_r1(jit);
+ /* brasl %r14,__bpf_prog_enter */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, bpf_trampoline_enter(p));
/* ltgr %r7,%r2 */
EMIT4(0xb9020000, REG_7, REG_2);
/* brcl 8,skip */
@@ -2535,15 +2546,13 @@ static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
* retval = bpf_func(args, p->insnsi);
*/
- /* %r1 = p->bpf_func */
- load_imm64(jit, REG_1, (u64)p->bpf_func);
/* la %r2,bpf_args_off(%r15) */
EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
/* %r3 = p->insnsi */
if (!p->jited)
load_imm64(jit, REG_3, (u64)p->insnsi);
- /* %r1() */
- call_r1(jit);
+ /* brasl %r14,p->bpf_func */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, p->bpf_func);
/* stg %r2,retval_off(%r15) */
if (save_ret) {
if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
@@ -2560,16 +2569,14 @@ static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
* __bpf_prog_exit(p, start, &run_ctx);
*/
- /* %r1 = __bpf_prog_exit */
- load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
/* %r2 = p */
load_imm64(jit, REG_2, (u64)p);
/* lgr %r3,%r7 */
EMIT4(0xb9040000, REG_3, REG_7);
/* la %r4,run_ctx_off(%r15) */
EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
- /* %r1() */
- call_r1(jit);
+ /* brasl %r14,__bpf_prog_exit */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, bpf_trampoline_exit(p));
return 0;
}
@@ -2729,9 +2736,6 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
/* lgr %r8,%r0 */
EMIT4(0xb9040000, REG_8, REG_0);
- } else {
- /* %r8 = func_addr + S390X_PATCH_SIZE */
- load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
}
/*
@@ -2757,12 +2761,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
* __bpf_tramp_enter(im);
*/
- /* %r1 = __bpf_tramp_enter */
- load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
/* %r2 = im */
load_imm64(jit, REG_2, (u64)im);
- /* %r1() */
- call_r1(jit);
+ /* brasl %r14,__bpf_tramp_enter */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, __bpf_tramp_enter);
}
for (i = 0; i < fentry->nr_links; i++)
@@ -2815,13 +2817,25 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
/* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */
_EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
0xf000 | tjit->tccnt_off);
- /* lgr %r1,%r8 */
- EMIT4(0xb9040000, REG_1, REG_8);
- /* %r1() */
- call_r1(jit);
+ if (flags & BPF_TRAMP_F_ORIG_STACK) {
+ if (nospec_uses_trampoline())
+ /* brasl %r14,__s390_indirect_jump_r8 */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14,
+ __s390_indirect_jump_r8);
+ else
+ /* basr %r14,%r8 */
+ EMIT2(0x0d00, REG_14, REG_8);
+ } else {
+ /* brasl %r14,func_addr+S390X_PATCH_SIZE */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14,
+ func_addr + S390X_PATCH_SIZE);
+ }
/* stg %r2,retval_off(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
tjit->retval_off);
+ /* mvc tccnt_off(%r15),tail_call_cnt(4,%r15) */
+ _EMIT6(0xd203f000 | tjit->tccnt_off,
+ 0xf000 | offsetof(struct prog_frame, tail_call_cnt));
im->ip_after_call = jit->prg_buf + jit->prg;
@@ -2846,12 +2860,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
* __bpf_tramp_exit(im);
*/
- /* %r1 = __bpf_tramp_exit */
- load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
/* %r2 = im */
load_imm64(jit, REG_2, (u64)im);
- /* %r1() */
- call_r1(jit);
+ /* brasl %r14,__bpf_tramp_exit */
+ EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, __bpf_tramp_exit);
}
/* lmg %r2,%rN,reg_args_off(%r15) */
@@ -2860,7 +2872,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
REG_2 + (nr_reg_args - 1), REG_15,
tjit->reg_args_off);
/* lgr %r1,%r8 */
- if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
+ if (!(flags & BPF_TRAMP_F_SKIP_FRAME) &&
+ (flags & BPF_TRAMP_F_ORIG_STACK))
EMIT4(0xb9040000, REG_1, REG_8);
/* lmg %r7,%r8,r7_r8_off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
@@ -2879,9 +2892,12 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
if (flags & BPF_TRAMP_F_SKIP_FRAME)
EMIT_JUMP_REG(14);
- else
+ else if (flags & BPF_TRAMP_F_ORIG_STACK)
EMIT_JUMP_REG(1);
-
+ else
+ /* brcl 0xf,func_addr+S390X_PATCH_SIZE */
+ EMIT6_PCREL_RILC_PTR(0xc0040000, 0xf,
+ func_addr + S390X_PATCH_SIZE);
return 0;
}
@@ -2951,6 +2967,11 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
case BPF_STX | BPF_ATOMIC | BPF_DW:
if (bpf_atomic_is_load_store(insn))
return false;
+ break;
+ case BPF_LDX | BPF_MEMSX | BPF_B:
+ case BPF_LDX | BPF_MEMSX | BPF_H:
+ case BPF_LDX | BPF_MEMSX | BPF_W:
+ return false;
}
return true;
}
@@ -2989,3 +3010,8 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64),
prev_addr = addr;
}
}
+
+bool bpf_jit_supports_timed_may_goto(void)
+{
+ return true;
+}
diff --git a/arch/s390/net/bpf_timed_may_goto.S b/arch/s390/net/bpf_timed_may_goto.S
new file mode 100644
index 000000000000..06f567a460d7
--- /dev/null
+++ b/arch/s390/net/bpf_timed_may_goto.S
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/export.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
+
+#define R2_OFF 0
+#define R5_OFF (R2_OFF + (5 - 2 + 1) * 8)
+#define R14_OFF (R5_OFF + 8)
+#define RETADDR_OFF (R14_OFF + 8)
+#define R15_OFF (RETADDR_OFF + 8)
+#define BACKCHAIN_OFF (R15_OFF + 8)
+#define FRAME_SIZE (BACKCHAIN_OFF + 8)
+#define FRAME_OFF (STACK_FRAME_OVERHEAD - FRAME_SIZE)
+#if (FRAME_OFF + BACKCHAIN_OFF) != __SF_BACKCHAIN
+#error Stack frame layout calculation is broken
+#endif
+
+ GEN_BR_THUNK %r1
+
+SYM_FUNC_START(arch_bpf_timed_may_goto)
+ /*
+ * This function has a special ABI: the parameters are in %r12 and
+ * %r13; the return value is in %r12; all GPRs except %r0, %r1, and
+ * %r12 are callee-saved; and the return address is in %r0.
+ */
+ stmg %r2,%r5,FRAME_OFF+R2_OFF(%r15)
+ stg %r14,FRAME_OFF+R14_OFF(%r15)
+ stg %r0,FRAME_OFF+RETADDR_OFF(%r15)
+ stg %r15,FRAME_OFF+R15_OFF(%r15)
+ lgr %r1,%r15
+ lay %r15,-FRAME_SIZE(%r15)
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+ lay %r2,0(%r12,%r13)
+ brasl %r14,bpf_check_timed_may_goto
+ lgr %r12,%r2
+
+ lg %r15,FRAME_SIZE+FRAME_OFF+R15_OFF(%r15)
+ lmg %r2,%r5,FRAME_OFF+R2_OFF(%r15)
+ lg %r14,FRAME_OFF+R14_OFF(%r15)
+ lg %r1,FRAME_OFF+RETADDR_OFF(%r15)
+ BR_EX %r1
+SYM_FUNC_END(arch_bpf_timed_may_goto)